Merge lp:~corey.bryant/charms/trusty/ceph/amulet-basic into lp:~openstack-charmers-archive/charms/trusty/ceph/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 81
Proposed branch: lp:~corey.bryant/charms/trusty/ceph/amulet-basic
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph/next
Diff against target: 1897 lines (+1619/-38)
25 files modified
Makefile (+17/-3)
charm-helpers-hooks.yaml (+10/-0)
charm-helpers-sync.yaml (+0/-10)
charm-helpers-tests.yaml (+5/-0)
hooks/charmhelpers/contrib/network/ip.py (+19/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0)
hooks/charmhelpers/core/hookenv.py (+2/-1)
hooks/charmhelpers/core/host.py (+34/-1)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+310/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+41/-4)
templates/ceph.conf (+18/-18)
tests/00-setup (+8/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+47/-0)
tests/basic_deployment.py (+302/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+71/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+61/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+275/-0)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/ceph/amulet-basic
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
OpenStack Charmers Pending
Review via email: mp+226509@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Needs lint fixes (see inline comment) and there's a syntax error in there causing the tests to break http://paste.ubuntu.com/7829730/

review: Needs Fixing
76. By Liam Young

[jamespage. r=gnuoy] Use charm-helper for pkg version comparisons

77. By James Page

[james-page,r=dosaboy] Add support for ceph public and cluster network configuration.

78. By Liam Young

[corey.bryant, r=gnuoy] This fix is for https://bugs.launchpad.net/charms/+source/ceph/+bug/1352547. This removes the default for the source config option, allowing it to get set to None, which allows the charm to use the default distro packages.

Revision history for this message
Corey Bryant (corey.bryant) wrote :

Hi Liam,

Sorry, I completely missed your comments on this until now. Thanks for reviewing. I updated the noqa comment and fixed resulting lint errors. I wasn't able to reproduce the syntax error. If you can still reproduce that on this latest version please let me know.

Thanks,
Corey

79. By Corey Bryant

Move charm-helpers-sync.yaml to charm-helpers-hooks.yaml and
add charm-helpers-tests.yaml.

80. By Corey Bryant

Automatically pull down charm_helpers_sync when 'make sync' is called.

81. By Corey Bryant

Sync with charm-helpers

82. By Corey Bryant

Add Amulet basic tests

83. By Corey Bryant

Remove leading whitespace from templates/ceph.conf (ConfigParser can't parse)

Revision history for this message
Liam Young (gnuoy) wrote :

Approve

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'Makefile'
2--- Makefile 2014-05-21 10:09:21 +0000
3+++ Makefile 2014-08-26 02:07:33 +0000
4@@ -1,11 +1,25 @@
5 #!/usr/bin/make
6+PYTHON := /usr/bin/env python
7
8 lint:
9- @flake8 --exclude hooks/charmhelpers hooks
10+ @flake8 --exclude hooks/charmhelpers hooks tests
11 @charm proof
12
13-sync:
14- @charm-helper-sync -c charm-helpers-sync.yaml
15+test:
16+ @echo Starting Amulet tests...
17+ # coreycb note: The -v should only be temporary until Amulet sends
18+ # raise_status() messages to stderr:
19+ # https://bugs.launchpad.net/amulet/+bug/1320357
20+ @juju test -v -p AMULET_HTTP_PROXY
21+
22+bin/charm_helpers_sync.py:
23+ @mkdir -p bin
24+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
25+ > bin/charm_helpers_sync.py
26+
27+sync: bin/charm_helpers_sync.py
28+ $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
29+ $(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
30
31 publish: lint
32 bzr push lp:charms/ceph
33
34=== added file 'charm-helpers-hooks.yaml'
35--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
36+++ charm-helpers-hooks.yaml 2014-08-26 02:07:33 +0000
37@@ -0,0 +1,10 @@
38+branch: lp:charm-helpers
39+destination: hooks/charmhelpers
40+include:
41+ - core
42+ - fetch
43+ - contrib.storage.linux:
44+ - utils
45+ - payload.execd
46+ - contrib.openstack.alternatives
47+ - contrib.network.ip
48
49=== removed file 'charm-helpers-sync.yaml'
50--- charm-helpers-sync.yaml 2014-07-25 08:08:17 +0000
51+++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000
52@@ -1,10 +0,0 @@
53-branch: lp:charm-helpers
54-destination: hooks/charmhelpers
55-include:
56- - core
57- - fetch
58- - contrib.storage.linux:
59- - utils
60- - payload.execd
61- - contrib.openstack.alternatives
62- - contrib.network.ip
63
64=== added file 'charm-helpers-tests.yaml'
65--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
66+++ charm-helpers-tests.yaml 2014-08-26 02:07:33 +0000
67@@ -0,0 +1,5 @@
68+branch: lp:charm-helpers
69+destination: tests/charmhelpers
70+include:
71+ - contrib.amulet
72+ - contrib.openstack.amulet
73
74=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
75--- hooks/charmhelpers/contrib/network/ip.py 2014-07-25 08:01:29 +0000
76+++ hooks/charmhelpers/contrib/network/ip.py 2014-08-26 02:07:33 +0000
77@@ -4,7 +4,7 @@
78
79 from charmhelpers.fetch import apt_install
80 from charmhelpers.core.hookenv import (
81- ERROR, log,
82+ ERROR, log, config,
83 )
84
85 try:
86@@ -154,3 +154,21 @@
87 get_iface_for_address = partial(_get_for_address, key='iface')
88
89 get_netmask_for_address = partial(_get_for_address, key='netmask')
90+
91+
92+def get_ipv6_addr(iface="eth0"):
93+ try:
94+ iface_addrs = netifaces.ifaddresses(iface)
95+ if netifaces.AF_INET6 not in iface_addrs:
96+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
97+
98+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
99+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
100+ and config('vip') != a['addr']]
101+ if not ipv6_addr:
102+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
103+
104+ return ipv6_addr[0]
105+
106+ except ValueError:
107+ raise ValueError("Invalid interface '%s'" % iface)
108
109=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
110--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-25 08:01:29 +0000
111+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-26 02:07:33 +0000
112@@ -46,5 +46,8 @@
113 :returns: boolean: True if the path represents a mounted device, False if
114 it doesn't.
115 '''
116+ is_partition = bool(re.search(r".*[0-9]+\b", device))
117 out = check_output(['mount'])
118+ if is_partition:
119+ return bool(re.search(device + r"\b", out))
120 return bool(re.search(device + r"[0-9]+\b", out))
121
122=== modified file 'hooks/charmhelpers/core/hookenv.py'
123--- hooks/charmhelpers/core/hookenv.py 2014-07-25 08:01:29 +0000
124+++ hooks/charmhelpers/core/hookenv.py 2014-08-26 02:07:33 +0000
125@@ -285,8 +285,9 @@
126 raise
127
128
129-def relation_set(relation_id=None, relation_settings={}, **kwargs):
130+def relation_set(relation_id=None, relation_settings=None, **kwargs):
131 """Set relation information for the current unit"""
132+ relation_settings = relation_settings if relation_settings else {}
133 relation_cmd_line = ['relation-set']
134 if relation_id is not None:
135 relation_cmd_line.extend(('-r', relation_id))
136
137=== modified file 'hooks/charmhelpers/core/host.py'
138--- hooks/charmhelpers/core/host.py 2014-07-25 08:08:17 +0000
139+++ hooks/charmhelpers/core/host.py 2014-08-26 02:07:33 +0000
140@@ -12,6 +12,8 @@
141 import string
142 import subprocess
143 import hashlib
144+import shutil
145+from contextlib import contextmanager
146
147 from collections import OrderedDict
148
149@@ -52,7 +54,7 @@
150 def service_running(service):
151 """Determine whether a system service is running"""
152 try:
153- output = subprocess.check_output(['service', service, 'status'])
154+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
155 except subprocess.CalledProcessError:
156 return False
157 else:
158@@ -62,6 +64,16 @@
159 return False
160
161
162+def service_available(service_name):
163+ """Determine whether a system service is available"""
164+ try:
165+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
166+ except subprocess.CalledProcessError:
167+ return False
168+ else:
169+ return True
170+
171+
172 def adduser(username, password=None, shell='/bin/bash', system_user=False):
173 """Add a user to the system"""
174 try:
175@@ -329,3 +341,24 @@
176 pkgcache = apt_pkg.Cache()
177 pkg = pkgcache[package]
178 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
179+
180+
181+@contextmanager
182+def chdir(d):
183+ cur = os.getcwd()
184+ try:
185+ yield os.chdir(d)
186+ finally:
187+ os.chdir(cur)
188+
189+
190+def chownr(path, owner, group):
191+ uid = pwd.getpwnam(owner).pw_uid
192+ gid = grp.getgrnam(group).gr_gid
193+
194+ for root, dirs, files in os.walk(path):
195+ for name in dirs + files:
196+ full = os.path.join(root, name)
197+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
198+ if not broken_symlink:
199+ os.chown(full, uid, gid)
200
201=== added directory 'hooks/charmhelpers/core/services'
202=== added file 'hooks/charmhelpers/core/services/__init__.py'
203--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
204+++ hooks/charmhelpers/core/services/__init__.py 2014-08-26 02:07:33 +0000
205@@ -0,0 +1,2 @@
206+from .base import *
207+from .helpers import *
208
209=== added file 'hooks/charmhelpers/core/services/base.py'
210--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
211+++ hooks/charmhelpers/core/services/base.py 2014-08-26 02:07:33 +0000
212@@ -0,0 +1,310 @@
213+import os
214+import re
215+import json
216+from collections import Iterable
217+
218+from charmhelpers.core import host
219+from charmhelpers.core import hookenv
220+
221+
222+__all__ = ['ServiceManager', 'ManagerCallback',
223+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
224+ 'service_restart', 'service_stop']
225+
226+
227+class ServiceManager(object):
228+ def __init__(self, services=None):
229+ """
230+ Register a list of services, given their definitions.
231+
232+ Service definitions are dicts in the following formats (all keys except
233+ 'service' are optional)::
234+
235+ {
236+ "service": <service name>,
237+ "required_data": <list of required data contexts>,
238+ "provided_data": <list of provided data contexts>,
239+ "data_ready": <one or more callbacks>,
240+ "data_lost": <one or more callbacks>,
241+ "start": <one or more callbacks>,
242+ "stop": <one or more callbacks>,
243+ "ports": <list of ports to manage>,
244+ }
245+
246+ The 'required_data' list should contain dicts of required data (or
247+ dependency managers that act like dicts and know how to collect the data).
248+ Only when all items in the 'required_data' list are populated are the list
249+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
250+ information.
251+
252+ The 'provided_data' list should contain relation data providers, most likely
253+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
254+ that will indicate a set of data to set on a given relation.
255+
256+ The 'data_ready' value should be either a single callback, or a list of
257+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
258+ Each callback will be called with the service name as the only parameter.
259+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
260+ are fired.
261+
262+ The 'data_lost' value should be either a single callback, or a list of
263+ callbacks, to be called when a 'required_data' item no longer passes
264+ `is_ready()`. Each callback will be called with the service name as the
265+ only parameter. After all of the 'data_lost' callbacks are called,
266+ the 'stop' callbacks are fired.
267+
268+ The 'start' value should be either a single callback, or a list of
269+ callbacks, to be called when starting the service, after the 'data_ready'
270+ callbacks are complete. Each callback will be called with the service
271+ name as the only parameter. This defaults to
272+ `[host.service_start, services.open_ports]`.
273+
274+ The 'stop' value should be either a single callback, or a list of
275+ callbacks, to be called when stopping the service. If the service is
276+ being stopped because it no longer has all of its 'required_data', this
277+ will be called after all of the 'data_lost' callbacks are complete.
278+ Each callback will be called with the service name as the only parameter.
279+ This defaults to `[services.close_ports, host.service_stop]`.
280+
281+ The 'ports' value should be a list of ports to manage. The default
282+ 'start' handler will open the ports after the service is started,
283+ and the default 'stop' handler will close the ports prior to stopping
284+ the service.
285+
286+
287+ Examples:
288+
289+ The following registers an Upstart service called bingod that depends on
290+ a mongodb relation and which runs a custom `db_migrate` function prior to
291+ restarting the service, and a Runit service called spadesd::
292+
293+ manager = services.ServiceManager([
294+ {
295+ 'service': 'bingod',
296+ 'ports': [80, 443],
297+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
298+ 'data_ready': [
299+ services.template(source='bingod.conf'),
300+ services.template(source='bingod.ini',
301+ target='/etc/bingod.ini',
302+ owner='bingo', perms=0400),
303+ ],
304+ },
305+ {
306+ 'service': 'spadesd',
307+ 'data_ready': services.template(source='spadesd_run.j2',
308+ target='/etc/sv/spadesd/run',
309+ perms=0555),
310+ 'start': runit_start,
311+ 'stop': runit_stop,
312+ },
313+ ])
314+ manager.manage()
315+ """
316+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
317+ self._ready = None
318+ self.services = {}
319+ for service in services or []:
320+ service_name = service['service']
321+ self.services[service_name] = service
322+
323+ def manage(self):
324+ """
325+ Handle the current hook by doing The Right Thing with the registered services.
326+ """
327+ hook_name = hookenv.hook_name()
328+ if hook_name == 'stop':
329+ self.stop_services()
330+ else:
331+ self.provide_data()
332+ self.reconfigure_services()
333+
334+ def provide_data(self):
335+ """
336+ Set the relation data for each provider in the ``provided_data`` list.
337+
338+ A provider must have a `name` attribute, which indicates which relation
339+ to set data on, and a `provide_data()` method, which returns a dict of
340+ data to set.
341+ """
342+ hook_name = hookenv.hook_name()
343+ for service in self.services.values():
344+ for provider in service.get('provided_data', []):
345+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
346+ data = provider.provide_data()
347+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
348+ if _ready:
349+ hookenv.relation_set(None, data)
350+
351+ def reconfigure_services(self, *service_names):
352+ """
353+ Update all files for one or more registered services, and,
354+ if ready, optionally restart them.
355+
356+ If no service names are given, reconfigures all registered services.
357+ """
358+ for service_name in service_names or self.services.keys():
359+ if self.is_ready(service_name):
360+ self.fire_event('data_ready', service_name)
361+ self.fire_event('start', service_name, default=[
362+ service_restart,
363+ manage_ports])
364+ self.save_ready(service_name)
365+ else:
366+ if self.was_ready(service_name):
367+ self.fire_event('data_lost', service_name)
368+ self.fire_event('stop', service_name, default=[
369+ manage_ports,
370+ service_stop])
371+ self.save_lost(service_name)
372+
373+ def stop_services(self, *service_names):
374+ """
375+ Stop one or more registered services, by name.
376+
377+ If no service names are given, stops all registered services.
378+ """
379+ for service_name in service_names or self.services.keys():
380+ self.fire_event('stop', service_name, default=[
381+ manage_ports,
382+ service_stop])
383+
384+ def get_service(self, service_name):
385+ """
386+ Given the name of a registered service, return its service definition.
387+ """
388+ service = self.services.get(service_name)
389+ if not service:
390+ raise KeyError('Service not registered: %s' % service_name)
391+ return service
392+
393+ def fire_event(self, event_name, service_name, default=None):
394+ """
395+ Fire a data_ready, data_lost, start, or stop event on a given service.
396+ """
397+ service = self.get_service(service_name)
398+ callbacks = service.get(event_name, default)
399+ if not callbacks:
400+ return
401+ if not isinstance(callbacks, Iterable):
402+ callbacks = [callbacks]
403+ for callback in callbacks:
404+ if isinstance(callback, ManagerCallback):
405+ callback(self, service_name, event_name)
406+ else:
407+ callback(service_name)
408+
409+ def is_ready(self, service_name):
410+ """
411+ Determine if a registered service is ready, by checking its 'required_data'.
412+
413+ A 'required_data' item can be any mapping type, and is considered ready
414+ if `bool(item)` evaluates as True.
415+ """
416+ service = self.get_service(service_name)
417+ reqs = service.get('required_data', [])
418+ return all(bool(req) for req in reqs)
419+
420+ def _load_ready_file(self):
421+ if self._ready is not None:
422+ return
423+ if os.path.exists(self._ready_file):
424+ with open(self._ready_file) as fp:
425+ self._ready = set(json.load(fp))
426+ else:
427+ self._ready = set()
428+
429+ def _save_ready_file(self):
430+ if self._ready is None:
431+ return
432+ with open(self._ready_file, 'w') as fp:
433+ json.dump(list(self._ready), fp)
434+
435+ def save_ready(self, service_name):
436+ """
437+ Save an indicator that the given service is now data_ready.
438+ """
439+ self._load_ready_file()
440+ self._ready.add(service_name)
441+ self._save_ready_file()
442+
443+ def save_lost(self, service_name):
444+ """
445+ Save an indicator that the given service is no longer data_ready.
446+ """
447+ self._load_ready_file()
448+ self._ready.discard(service_name)
449+ self._save_ready_file()
450+
451+ def was_ready(self, service_name):
452+ """
453+ Determine if the given service was previously data_ready.
454+ """
455+ self._load_ready_file()
456+ return service_name in self._ready
457+
458+
459+class ManagerCallback(object):
460+ """
461+ Special case of a callback that takes the `ServiceManager` instance
462+ in addition to the service name.
463+
464+ Subclasses should implement `__call__` which should accept three parameters:
465+
466+ * `manager` The `ServiceManager` instance
467+ * `service_name` The name of the service it's being triggered for
468+ * `event_name` The name of the event that this callback is handling
469+ """
470+ def __call__(self, manager, service_name, event_name):
471+ raise NotImplementedError()
472+
473+
474+class PortManagerCallback(ManagerCallback):
475+ """
476+ Callback class that will open or close ports, for use as either
477+ a start or stop action.
478+ """
479+ def __call__(self, manager, service_name, event_name):
480+ service = manager.get_service(service_name)
481+ new_ports = service.get('ports', [])
482+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
483+ if os.path.exists(port_file):
484+ with open(port_file) as fp:
485+ old_ports = fp.read().split(',')
486+ for old_port in old_ports:
487+ if bool(old_port):
488+ old_port = int(old_port)
489+ if old_port not in new_ports:
490+ hookenv.close_port(old_port)
491+ with open(port_file, 'w') as fp:
492+ fp.write(','.join(str(port) for port in new_ports))
493+ for port in new_ports:
494+ if event_name == 'start':
495+ hookenv.open_port(port)
496+ elif event_name == 'stop':
497+ hookenv.close_port(port)
498+
499+
500+def service_stop(service_name):
501+ """
502+ Wrapper around host.service_stop to prevent spurious "unknown service"
503+ messages in the logs.
504+ """
505+ if host.service_running(service_name):
506+ host.service_stop(service_name)
507+
508+
509+def service_restart(service_name):
510+ """
511+ Wrapper around host.service_restart to prevent spurious "unknown service"
512+ messages in the logs.
513+ """
514+ if host.service_available(service_name):
515+ if host.service_running(service_name):
516+ host.service_restart(service_name)
517+ else:
518+ host.service_start(service_name)
519+
520+
521+# Convenience aliases
522+open_ports = close_ports = manage_ports = PortManagerCallback()
523
524=== added file 'hooks/charmhelpers/core/services/helpers.py'
525--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
526+++ hooks/charmhelpers/core/services/helpers.py 2014-08-26 02:07:33 +0000
527@@ -0,0 +1,125 @@
528+from charmhelpers.core import hookenv
529+from charmhelpers.core import templating
530+
531+from charmhelpers.core.services.base import ManagerCallback
532+
533+
534+__all__ = ['RelationContext', 'TemplateCallback',
535+ 'render_template', 'template']
536+
537+
538+class RelationContext(dict):
539+ """
540+ Base class for a context generator that gets relation data from juju.
541+
542+ Subclasses must provide the attributes `name`, which is the name of the
543+ interface of interest, `interface`, which is the type of the interface of
544+ interest, and `required_keys`, which is the set of keys required for the
545+ relation to be considered complete. The data for all interfaces matching
546+ the `name` attribute that are complete will used to populate the dictionary
547+ values (see `get_data`, below).
548+
549+ The generated context will be namespaced under the interface type, to prevent
550+ potential naming conflicts.
551+ """
552+ name = None
553+ interface = None
554+ required_keys = []
555+
556+ def __init__(self, *args, **kwargs):
557+ super(RelationContext, self).__init__(*args, **kwargs)
558+ self.get_data()
559+
560+ def __bool__(self):
561+ """
562+ Returns True if all of the required_keys are available.
563+ """
564+ return self.is_ready()
565+
566+ __nonzero__ = __bool__
567+
568+ def __repr__(self):
569+ return super(RelationContext, self).__repr__()
570+
571+ def is_ready(self):
572+ """
573+ Returns True if all of the `required_keys` are available from any units.
574+ """
575+ ready = len(self.get(self.name, [])) > 0
576+ if not ready:
577+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
578+ return ready
579+
580+ def _is_ready(self, unit_data):
581+ """
582+ Helper method that tests a set of relation data and returns True if
583+ all of the `required_keys` are present.
584+ """
585+ return set(unit_data.keys()).issuperset(set(self.required_keys))
586+
587+ def get_data(self):
588+ """
589+ Retrieve the relation data for each unit involved in a relation and,
590+ if complete, store it in a list under `self[self.name]`. This
591+ is automatically called when the RelationContext is instantiated.
592+
593+ The units are sorted lexographically first by the service ID, then by
594+ the unit ID. Thus, if an interface has two other services, 'db:1'
595+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
596+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
597+ set of data, the relation data for the units will be stored in the
598+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
599+
600+ If you only care about a single unit on the relation, you can just
601+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
602+ support multiple units on a relation, you should iterate over the list,
603+ like::
604+
605+ {% for unit in interface -%}
606+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
607+ {%- endfor %}
608+
609+ Note that since all sets of relation data from all related services and
610+ units are in a single list, if you need to know which service or unit a
611+ set of data came from, you'll need to extend this class to preserve
612+ that information.
613+ """
614+ if not hookenv.relation_ids(self.name):
615+ return
616+
617+ ns = self.setdefault(self.name, [])
618+ for rid in sorted(hookenv.relation_ids(self.name)):
619+ for unit in sorted(hookenv.related_units(rid)):
620+ reldata = hookenv.relation_get(rid=rid, unit=unit)
621+ if self._is_ready(reldata):
622+ ns.append(reldata)
623+
624+ def provide_data(self):
625+ """
626+ Return data to be relation_set for this interface.
627+ """
628+ return {}
629+
630+
631+class TemplateCallback(ManagerCallback):
632+ """
633+ Callback class that will render a template, for use as a ready action.
634+ """
635+ def __init__(self, source, target, owner='root', group='root', perms=0444):
636+ self.source = source
637+ self.target = target
638+ self.owner = owner
639+ self.group = group
640+ self.perms = perms
641+
642+ def __call__(self, manager, service_name, event_name):
643+ service = manager.get_service(service_name)
644+ context = {}
645+ for ctx in service.get('required_data', []):
646+ context.update(ctx)
647+ templating.render(self.source, self.target, context,
648+ self.owner, self.group, self.perms)
649+
650+
651+# Convenience aliases for templates
652+render_template = template = TemplateCallback
653
654=== added file 'hooks/charmhelpers/core/templating.py'
655--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
656+++ hooks/charmhelpers/core/templating.py 2014-08-26 02:07:33 +0000
657@@ -0,0 +1,51 @@
658+import os
659+
660+from charmhelpers.core import host
661+from charmhelpers.core import hookenv
662+
663+
664+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
665+ """
666+ Render a template.
667+
668+ The `source` path, if not absolute, is relative to the `templates_dir`.
669+
670+ The `target` path should be absolute.
671+
672+ The context should be a dict containing the values to be replaced in the
673+ template.
674+
675+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
676+
677+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
678+
679+ Note: Using this requires python-jinja2; if it is not installed, calling
680+ this will attempt to use charmhelpers.fetch.apt_install to install it.
681+ """
682+ try:
683+ from jinja2 import FileSystemLoader, Environment, exceptions
684+ except ImportError:
685+ try:
686+ from charmhelpers.fetch import apt_install
687+ except ImportError:
688+ hookenv.log('Could not import jinja2, and could not import '
689+ 'charmhelpers.fetch to install it',
690+ level=hookenv.ERROR)
691+ raise
692+ apt_install('python-jinja2', fatal=True)
693+ from jinja2 import FileSystemLoader, Environment, exceptions
694+
695+ if templates_dir is None:
696+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
697+ loader = Environment(loader=FileSystemLoader(templates_dir))
698+ try:
699+ source = source
700+ template = loader.get_template(source)
701+ except exceptions.TemplateNotFound as e:
702+ hookenv.log('Could not load template %s from %s.' %
703+ (source, templates_dir),
704+ level=hookenv.ERROR)
705+ raise e
706+ content = template.render(context)
707+ host.mkdir(os.path.dirname(target))
708+ host.write_file(target, content, owner, group, perms)
709
710=== modified file 'hooks/charmhelpers/fetch/__init__.py'
711--- hooks/charmhelpers/fetch/__init__.py 2014-07-25 08:01:29 +0000
712+++ hooks/charmhelpers/fetch/__init__.py 2014-08-26 02:07:33 +0000
713@@ -1,4 +1,5 @@
714 import importlib
715+from tempfile import NamedTemporaryFile
716 import time
717 from yaml import safe_load
718 from charmhelpers.core.host import (
719@@ -122,6 +123,7 @@
720 # Tell apt to build an in-memory cache to prevent race conditions (if
721 # another process is already building the cache).
722 apt_pkg.config.set("Dir::Cache::pkgcache", "")
723+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
724
725 cache = apt_pkg.Cache()
726 _pkgs = []
727@@ -201,6 +203,27 @@
728
729
730 def add_source(source, key=None):
731+ """Add a package source to this system.
732+
733+ @param source: a URL or sources.list entry, as supported by
734+ add-apt-repository(1). Examples:
735+ ppa:charmers/example
736+ deb https://stub:key@private.example.com/ubuntu trusty main
737+
738+ In addition:
739+ 'proposed:' may be used to enable the standard 'proposed'
740+ pocket for the release.
741+ 'cloud:' may be used to activate official cloud archive pockets,
742+ such as 'cloud:icehouse'
743+
744+ @param key: A key to be added to the system's APT keyring and used
745+ to verify the signatures on packages. Ideally, this should be an
746+ ASCII format GPG public key including the block headers. A GPG key
747+ id may also be used, but be aware that only insecure protocols are
748+ available to retrieve the actual public key from a public keyserver
749+ placing your Juju environment at risk. ppa and cloud archive keys
750+ are securely added automtically, so sould not be provided.
751+ """
752 if source is None:
753 log('Source is not present. Skipping')
754 return
755@@ -225,10 +248,23 @@
756 release = lsb_release()['DISTRIB_CODENAME']
757 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
758 apt.write(PROPOSED_POCKET.format(release))
759+ else:
760+ raise SourceConfigError("Unknown source: {!r}".format(source))
761+
762 if key:
763- subprocess.check_call(['apt-key', 'adv', '--keyserver',
764- 'hkp://keyserver.ubuntu.com:80', '--recv',
765- key])
766+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
767+ with NamedTemporaryFile() as key_file:
768+ key_file.write(key)
769+ key_file.flush()
770+ key_file.seek(0)
771+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
772+ else:
773+ # Note that hkp: is in no way a secure protocol. Using a
774+ # GPG key id is pointless from a security POV unless you
775+ # absolutely trust your network and DNS.
776+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
777+ 'hkp://keyserver.ubuntu.com:80', '--recv',
778+ key])
779
780
781 def configure_sources(update=False,
782@@ -238,7 +274,8 @@
783 Configure multiple sources from charm configuration.
784
785 The lists are encoded as yaml fragments in the configuration.
786- The frament needs to be included as a string.
787+ The frament needs to be included as a string. Sources and their
788+ corresponding keys are of the types supported by add_source().
789
790 Example config:
791 install_sources: |
792
793=== modified file 'templates/ceph.conf'
794--- templates/ceph.conf 2014-07-25 08:01:29 +0000
795+++ templates/ceph.conf 2014-08-26 02:07:33 +0000
796@@ -1,35 +1,35 @@
797 [global]
798 {% if old_auth %}
799- auth supported = {{ auth_supported }}
800+auth supported = {{ auth_supported }}
801 {% else %}
802- auth cluster required = {{ auth_supported }}
803- auth service required = {{ auth_supported }}
804- auth client required = {{ auth_supported }}
805+auth cluster required = {{ auth_supported }}
806+auth service required = {{ auth_supported }}
807+auth client required = {{ auth_supported }}
808 {% endif %}
809- keyring = /etc/ceph/$cluster.$name.keyring
810- mon host = {{ mon_hosts }}
811- fsid = {{ fsid }}
812+keyring = /etc/ceph/$cluster.$name.keyring
813+mon host = {{ mon_hosts }}
814+fsid = {{ fsid }}
815
816- log to syslog = {{ use_syslog }}
817- err to syslog = {{ use_syslog }}
818- clog to syslog = {{ use_syslog }}
819- mon cluster log to syslog = {{ use_syslog }}
820+log to syslog = {{ use_syslog }}
821+err to syslog = {{ use_syslog }}
822+clog to syslog = {{ use_syslog }}
823+mon cluster log to syslog = {{ use_syslog }}
824
825 {%- if ceph_public_network is string %}
826- public network = {{ ceph_public_network }}
827+public network = {{ ceph_public_network }}
828 {%- endif %}
829 {%- if ceph_cluster_network is string %}
830- cluster network = {{ ceph_cluster_network }}
831+cluster network = {{ ceph_cluster_network }}
832 {%- endif %}
833
834 [mon]
835- keyring = /var/lib/ceph/mon/$cluster-$id/keyring
836+keyring = /var/lib/ceph/mon/$cluster-$id/keyring
837
838 [mds]
839- keyring = /var/lib/ceph/mds/$cluster-$id/keyring
840+keyring = /var/lib/ceph/mds/$cluster-$id/keyring
841
842 [osd]
843- keyring = /var/lib/ceph/osd/$cluster-$id/keyring
844- osd journal size = {{ osd_journal_size }}
845- filestore xattr use omap = true
846+keyring = /var/lib/ceph/osd/$cluster-$id/keyring
847+osd journal size = {{ osd_journal_size }}
848+filestore xattr use omap = true
849
850
851=== added directory 'tests'
852=== added file 'tests/00-setup'
853--- tests/00-setup 1970-01-01 00:00:00 +0000
854+++ tests/00-setup 2014-08-26 02:07:33 +0000
855@@ -0,0 +1,8 @@
856+#!/bin/bash
857+
858+set -ex
859+
860+sudo add-apt-repository --yes ppa:juju/stable
861+sudo apt-get update --yes
862+sudo apt-get install --yes python-amulet
863+sudo apt-get install --yes python-keystoneclient
864
865=== added file 'tests/12-basic-precise-grizzly'
866--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
867+++ tests/12-basic-precise-grizzly 2014-08-26 02:07:33 +0000
868@@ -0,0 +1,11 @@
869+#!/usr/bin/python
870+
871+"""Amulet tests on a basic ceph deployment on precise-grizzly."""
872+
873+from basic_deployment import CephBasicDeployment
874+
875+if __name__ == '__main__':
876+ deployment = CephBasicDeployment(series='precise',
877+ openstack='cloud:precise-grizzly',
878+ source='cloud:precise-updates/grizzly')
879+ deployment.run_tests()
880
881=== added file 'tests/13-basic-precise-havana'
882--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
883+++ tests/13-basic-precise-havana 2014-08-26 02:07:33 +0000
884@@ -0,0 +1,11 @@
885+#!/usr/bin/python
886+
887+"""Amulet tests on a basic ceph deployment on precise-havana."""
888+
889+from basic_deployment import CephBasicDeployment
890+
891+if __name__ == '__main__':
892+ deployment = CephBasicDeployment(series='precise',
893+ openstack='cloud:precise-havana',
894+ source='cloud:precise-updates/havana')
895+ deployment.run_tests()
896
897=== added file 'tests/14-basic-precise-icehouse'
898--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
899+++ tests/14-basic-precise-icehouse 2014-08-26 02:07:33 +0000
900@@ -0,0 +1,11 @@
901+#!/usr/bin/python
902+
903+"""Amulet tests on a basic ceph deployment on precise-icehouse."""
904+
905+from basic_deployment import CephBasicDeployment
906+
907+if __name__ == '__main__':
908+ deployment = CephBasicDeployment(series='precise',
909+ openstack='cloud:precise-icehouse',
910+ source='cloud:precise-updates/icehouse')
911+ deployment.run_tests()
912
913=== added file 'tests/15-basic-trusty-icehouse'
914--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
915+++ tests/15-basic-trusty-icehouse 2014-08-26 02:07:33 +0000
916@@ -0,0 +1,9 @@
917+#!/usr/bin/python
918+
919+"""Amulet tests on a basic ceph deployment on trusty-icehouse."""
920+
921+from basic_deployment import CephBasicDeployment
922+
923+if __name__ == '__main__':
924+ deployment = CephBasicDeployment(series='trusty')
925+ deployment.run_tests()
926
927=== added file 'tests/README'
928--- tests/README 1970-01-01 00:00:00 +0000
929+++ tests/README 2014-08-26 02:07:33 +0000
930@@ -0,0 +1,47 @@
931+This directory provides Amulet tests that focus on verification of ceph
932+deployments.
933+
934+If you use a web proxy server to access the web, you'll need to set the
935+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
936+
937+The following examples demonstrate different ways that tests can be executed.
938+All examples are run from the charm's root directory.
939+
940+ * To run all tests (starting with 00-setup):
941+
942+ make test
943+
944+ * To run a specific test module (or modules):
945+
946+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
947+
948+ * To run a specific test module (or modules), and keep the environment
949+ deployed after a failure:
950+
951+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
952+
953+ * To re-run a test module against an already deployed environment (one
954+ that was deployed by a previous call to 'juju test --set-e'):
955+
956+ ./tests/15-basic-trusty-icehouse
957+
958+For debugging and test development purposes, all code should be idempotent.
959+In other words, the code should have the ability to be re-run without changing
960+the results beyond the initial run. This enables editing and re-running of a
961+test module against an already deployed environment, as described above.
962+
963+Manual debugging tips:
964+
965+ * Set the following env vars before using the OpenStack CLI as admin:
966+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
967+ export OS_TENANT_NAME=admin
968+ export OS_USERNAME=admin
969+ export OS_PASSWORD=openstack
970+ export OS_REGION_NAME=RegionOne
971+
972+ * Set the following env vars before using the OpenStack CLI as demoUser:
973+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
974+ export OS_TENANT_NAME=demoTenant
975+ export OS_USERNAME=demoUser
976+ export OS_PASSWORD=password
977+ export OS_REGION_NAME=RegionOne
978
979=== added file 'tests/basic_deployment.py'
980--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
981+++ tests/basic_deployment.py 2014-08-26 02:07:33 +0000
982@@ -0,0 +1,302 @@
983+#!/usr/bin/python
984+
985+import amulet
986+from charmhelpers.contrib.openstack.amulet.deployment import (
987+ OpenStackAmuletDeployment
988+)
989+from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
990+ OpenStackAmuletUtils,
991+ DEBUG,
992+ ERROR
993+)
994+
995+# Use DEBUG to turn on debug logging
996+u = OpenStackAmuletUtils(ERROR)
997+
998+
999+class CephBasicDeployment(OpenStackAmuletDeployment):
1000+ """Amulet tests on a basic ceph deployment."""
1001+
1002+ def __init__(self, series=None, openstack=None, source=None):
1003+ """Deploy the entire test environment."""
1004+ super(CephBasicDeployment, self).__init__(series, openstack, source)
1005+ self._add_services()
1006+ self._add_relations()
1007+ self._configure_services()
1008+ self._deploy()
1009+ self._initialize_tests()
1010+
1011+ def _add_services(self):
1012+ """Add services
1013+
1014+ Add the services that we're testing, including the number of units,
1015+ where ceph is local, and mysql and cinder are from the charm
1016+ store.
1017+ """
1018+ this_service = ('ceph', 3)
1019+ other_services = [('mysql', 1), ('keystone', 1),
1020+ ('rabbitmq-server', 1), ('nova-compute', 1),
1021+ ('glance', 1), ('cinder', 1)]
1022+ super(CephBasicDeployment, self)._add_services(this_service,
1023+ other_services)
1024+
1025+ def _add_relations(self):
1026+ """Add all of the relations for the services."""
1027+ relations = {
1028+ 'nova-compute:shared-db': 'mysql:shared-db',
1029+ 'nova-compute:amqp': 'rabbitmq-server:amqp',
1030+ 'nova-compute:image-service': 'glance:image-service',
1031+ 'nova-compute:ceph': 'ceph:client',
1032+ 'keystone:shared-db': 'mysql:shared-db',
1033+ 'glance:shared-db': 'mysql:shared-db',
1034+ 'glance:identity-service': 'keystone:identity-service',
1035+ 'glance:amqp': 'rabbitmq-server:amqp',
1036+ 'glance:ceph': 'ceph:client',
1037+ 'cinder:shared-db': 'mysql:shared-db',
1038+ 'cinder:identity-service': 'keystone:identity-service',
1039+ 'cinder:amqp': 'rabbitmq-server:amqp',
1040+ 'cinder:image-service': 'glance:image-service',
1041+ 'cinder:ceph': 'ceph:client'
1042+ }
1043+ super(CephBasicDeployment, self)._add_relations(relations)
1044+
1045+ def _configure_services(self):
1046+ """Configure all of the services."""
1047+ keystone_config = {'admin-password': 'openstack',
1048+ 'admin-token': 'ubuntutesting'}
1049+ mysql_config = {'dataset-size': '50%'}
1050+ cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
1051+ ceph_config = {
1052+ 'monitor-count': '3',
1053+ 'auth-supported': 'none',
1054+ 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
1055+ 'monitor-secret': 'AQCXrnZQwI7KGBAAiPofmKEXKxu5bUzoYLVkbQ==',
1056+ 'osd-reformat': 'yes',
1057+ 'ephemeral-unmount': '/mnt'
1058+ }
1059+ if self._get_openstack_release() >= self.precise_grizzly:
1060+ ceph_config['osd-devices'] = '/dev/vdb /srv/ceph'
1061+ else:
1062+ ceph_config['osd-devices'] = '/dev/vdb'
1063+
1064+ configs = {'keystone': keystone_config,
1065+ 'mysql': mysql_config,
1066+ 'cinder': cinder_config,
1067+ 'ceph': ceph_config}
1068+ super(CephBasicDeployment, self)._configure_services(configs)
1069+
1070+ def _initialize_tests(self):
1071+ """Perform final initialization before tests get run."""
1072+ # Access the sentries for inspecting service units
1073+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
1074+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
1075+ self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0']
1076+ self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0']
1077+ self.glance_sentry = self.d.sentry.unit['glance/0']
1078+ self.cinder_sentry = self.d.sentry.unit['cinder/0']
1079+ self.ceph0_sentry = self.d.sentry.unit['ceph/0']
1080+ self.ceph1_sentry = self.d.sentry.unit['ceph/1']
1081+ self.ceph2_sentry = self.d.sentry.unit['ceph/2']
1082+
1083+ # Authenticate admin with keystone
1084+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
1085+ user='admin',
1086+ password='openstack',
1087+ tenant='admin')
1088+
1089+ # Authenticate admin with glance endpoint
1090+ self.glance = u.authenticate_glance_admin(self.keystone)
1091+
1092+ # Create a demo tenant/role/user
1093+ self.demo_tenant = 'demoTenant'
1094+ self.demo_role = 'demoRole'
1095+ self.demo_user = 'demoUser'
1096+ if not u.tenant_exists(self.keystone, self.demo_tenant):
1097+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
1098+ description='demo tenant',
1099+ enabled=True)
1100+ self.keystone.roles.create(name=self.demo_role)
1101+ self.keystone.users.create(name=self.demo_user,
1102+ password='password',
1103+ tenant_id=tenant.id,
1104+ email='demo@demo.com')
1105+
1106+ # Authenticate demo user with keystone
1107+ self.keystone_demo = u.authenticate_keystone_user(self.keystone,
1108+ self.demo_user,
1109+ 'password',
1110+ self.demo_tenant)
1111+
1112+ # Authenticate demo user with nova-api
1113+ self.nova_demo = u.authenticate_nova_user(self.keystone,
1114+ self.demo_user,
1115+ 'password',
1116+ self.demo_tenant)
1117+
1118+ def _ceph_osd_id(self, index):
1119+ """Produce a shell command that will return a ceph-osd id."""
1120+ return "`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}' | grep -o '[0-9]*'`".format(index + 1) # noqa
1121+
1122+ def test_services(self):
1123+ """Verify the expected services are running on the service units."""
1124+ ceph_services = ['status ceph-mon-all',
1125+ 'status ceph-mon id=`hostname`']
1126+ commands = {
1127+ self.mysql_sentry: ['status mysql'],
1128+ self.rabbitmq_sentry: ['sudo service rabbitmq-server status'],
1129+ self.nova_compute_sentry: ['status nova-compute'],
1130+ self.keystone_sentry: ['status keystone'],
1131+ self.glance_sentry: ['status glance-registry',
1132+ 'status glance-api'],
1133+ self.cinder_sentry: ['status cinder-api',
1134+ 'status cinder-scheduler',
1135+ 'status cinder-volume']
1136+ }
1137+ if self._get_openstack_release() >= self.precise_grizzly:
1138+ ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
1139+ ceph_osd1 = 'status ceph-osd id={}'.format(self._ceph_osd_id(1))
1140+ ceph_services.extend([ceph_osd0, ceph_osd1, 'status ceph-osd-all'])
1141+ commands[self.ceph0_sentry] = ceph_services
1142+ commands[self.ceph1_sentry] = ceph_services
1143+ commands[self.ceph2_sentry] = ceph_services
1144+ else:
1145+ ceph_osd0 = 'status ceph-osd id={}'.format(self._ceph_osd_id(0))
1146+ ceph_services.append(ceph_osd0)
1147+ commands[self.ceph0_sentry] = ceph_services
1148+ commands[self.ceph1_sentry] = ceph_services
1149+ commands[self.ceph2_sentry] = ceph_services
1150+
1151+ ret = u.validate_services(commands)
1152+ if ret:
1153+ amulet.raise_status(amulet.FAIL, msg=ret)
1154+
1155+ def test_ceph_nova_client_relation(self):
1156+ """Verify the ceph to nova ceph-client relation data."""
1157+ unit = self.ceph0_sentry
1158+ relation = ['client', 'nova-compute:ceph']
1159+ expected = {
1160+ 'private-address': u.valid_ip,
1161+ 'auth': 'none',
1162+ 'key': u.not_null
1163+ }
1164+
1165+ ret = u.validate_relation_data(unit, relation, expected)
1166+ if ret:
1167+ message = u.relation_error('ceph to nova ceph-client', ret)
1168+ amulet.raise_status(amulet.FAIL, msg=message)
1169+
1170+ def test_nova_ceph_client_relation(self):
1171+ """Verify the nova to ceph ceph-client relation data."""
1172+ unit = self.nova_compute_sentry
1173+ relation = ['ceph', 'ceph:client']
1174+ expected = {
1175+ 'private-address': u.valid_ip
1176+ }
1177+
1178+ ret = u.validate_relation_data(unit, relation, expected)
1179+ if ret:
1180+ message = u.relation_error('nova to ceph ceph-client', ret)
1181+ amulet.raise_status(amulet.FAIL, msg=message)
1182+
1183+ def test_ceph_glance_client_relation(self):
1184+ """Verify the ceph to glance ceph-client relation data."""
1185+ unit = self.ceph1_sentry
1186+ relation = ['client', 'glance:ceph']
1187+ expected = {
1188+ 'private-address': u.valid_ip,
1189+ 'auth': 'none',
1190+ 'key': u.not_null
1191+ }
1192+
1193+ ret = u.validate_relation_data(unit, relation, expected)
1194+ if ret:
1195+ message = u.relation_error('ceph to glance ceph-client', ret)
1196+ amulet.raise_status(amulet.FAIL, msg=message)
1197+
1198+ def test_glance_ceph_client_relation(self):
1199+ """Verify the glance to ceph ceph-client relation data."""
1200+ unit = self.glance_sentry
1201+ relation = ['ceph', 'ceph:client']
1202+ expected = {
1203+ 'private-address': u.valid_ip
1204+ }
1205+
1206+ ret = u.validate_relation_data(unit, relation, expected)
1207+ if ret:
1208+ message = u.relation_error('glance to ceph ceph-client', ret)
1209+ amulet.raise_status(amulet.FAIL, msg=message)
1210+
1211+ def test_ceph_cinder_client_relation(self):
1212+ """Verify the ceph to cinder ceph-client relation data."""
1213+ unit = self.ceph2_sentry
1214+ relation = ['client', 'cinder:ceph']
1215+ expected = {
1216+ 'private-address': u.valid_ip,
1217+ 'auth': 'none',
1218+ 'key': u.not_null
1219+ }
1220+
1221+ ret = u.validate_relation_data(unit, relation, expected)
1222+ if ret:
1223+ message = u.relation_error('ceph to cinder ceph-client', ret)
1224+ amulet.raise_status(amulet.FAIL, msg=message)
1225+
1226+ def test_cinder_ceph_client_relation(self):
1227+ """Verify the cinder to ceph ceph-client relation data."""
1228+ unit = self.cinder_sentry
1229+ relation = ['ceph', 'ceph:client']
1230+ expected = {
1231+ 'private-address': u.valid_ip
1232+ }
1233+
1234+ ret = u.validate_relation_data(unit, relation, expected)
1235+ if ret:
1236+ message = u.relation_error('cinder to ceph ceph-client', ret)
1237+ amulet.raise_status(amulet.FAIL, msg=message)
1238+
1239+ def test_ceph_config(self):
1240+ """Verify the data in the ceph config file."""
1241+ unit = self.ceph0_sentry
1242+ conf = '/etc/ceph/ceph.conf'
1243+ expected = {
1244+ 'global': {
1245+ 'keyring': '/etc/ceph/$cluster.$name.keyring',
1246+ 'fsid': '6547bd3e-1397-11e2-82e5-53567c8d32dc',
1247+ 'log to syslog': 'false',
1248+ 'err to syslog': 'false',
1249+ 'clog to syslog': 'false',
1250+ 'mon cluster log to syslog': 'false'
1251+ },
1252+ 'mon': {
1253+ 'keyring': '/var/lib/ceph/mon/$cluster-$id/keyring'
1254+ },
1255+ 'mds': {
1256+ 'keyring': '/var/lib/ceph/mds/$cluster-$id/keyring'
1257+ },
1258+ 'osd': {
1259+ 'keyring': '/var/lib/ceph/osd/$cluster-$id/keyring',
1260+ 'osd journal size': '1024',
1261+ 'filestore xattr use omap': 'true'
1262+ },
1263+ }
1264+ if self._get_openstack_release() >= self.precise_grizzly:
1265+ expected['global']['auth cluster required'] = 'none'
1266+ expected['global']['auth service required'] = 'none'
1267+ expected['global']['auth client required'] = 'none'
1268+ else:
1269+ expected['global']['auth supported'] = 'none'
1270+
1271+ for section, pairs in expected.iteritems():
1272+ ret = u.validate_config_data(unit, conf, section, pairs)
1273+ if ret:
1274+ message = "ceph config error: {}".format(ret)
1275+ amulet.raise_status(amulet.FAIL, msg=message)
1276+
1277+ def test_restart_on_config_change(self):
1278+ """Verify the specified services are restarted on config change."""
1279+ # NOTE(coreycb): Test not implemented but should it be? ceph services
1280+ # aren't restarted by charm after config change. Should
1281+ # they be restarted?
1282+ if self._get_openstack_release() >= self.precise_essex:
1283+ u.log.error("Test not implemented")
1284+ return
1285
1286=== added directory 'tests/charmhelpers'
1287=== added file 'tests/charmhelpers/__init__.py'
1288=== added directory 'tests/charmhelpers/contrib'
1289=== added file 'tests/charmhelpers/contrib/__init__.py'
1290=== added directory 'tests/charmhelpers/contrib/amulet'
1291=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
1292=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
1293--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
1294+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-08-26 02:07:33 +0000
1295@@ -0,0 +1,71 @@
1296+import amulet
1297+
1298+import os
1299+
1300+
1301+class AmuletDeployment(object):
1302+ """Amulet deployment.
1303+
1304+ This class provides generic Amulet deployment and test runner
1305+ methods.
1306+ """
1307+
1308+ def __init__(self, series=None):
1309+ """Initialize the deployment environment."""
1310+ self.series = None
1311+
1312+ if series:
1313+ self.series = series
1314+ self.d = amulet.Deployment(series=self.series)
1315+ else:
1316+ self.d = amulet.Deployment()
1317+
1318+ def _add_services(self, this_service, other_services):
1319+ """Add services.
1320+
1321+ Add services to the deployment where this_service is the local charm
1322+ that we're focused on testing and other_services are the other
1323+ charms that come from the charm store.
1324+ """
1325+ name, units = range(2)
1326+
1327+ if this_service[name] != os.path.basename(os.getcwd()):
1328+ s = this_service[name]
1329+ msg = "The charm's root directory name needs to be {}".format(s)
1330+ amulet.raise_status(amulet.FAIL, msg=msg)
1331+
1332+ self.d.add(this_service[name], units=this_service[units])
1333+
1334+ for svc in other_services:
1335+ if self.series:
1336+ self.d.add(svc[name],
1337+ charm='cs:{}/{}'.format(self.series, svc[name]),
1338+ units=svc[units])
1339+ else:
1340+ self.d.add(svc[name], units=svc[units])
1341+
1342+ def _add_relations(self, relations):
1343+ """Add all of the relations for the services."""
1344+ for k, v in relations.iteritems():
1345+ self.d.relate(k, v)
1346+
1347+ def _configure_services(self, configs):
1348+ """Configure all of the services."""
1349+ for service, config in configs.iteritems():
1350+ self.d.configure(service, config)
1351+
1352+ def _deploy(self):
1353+ """Deploy environment and wait for all hooks to finish executing."""
1354+ try:
1355+ self.d.setup()
1356+ self.d.sentry.wait(timeout=900)
1357+ except amulet.helpers.TimeoutError:
1358+ amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1359+ except Exception:
1360+ raise
1361+
1362+ def run_tests(self):
1363+ """Run all of the methods that are prefixed with 'test_'."""
1364+ for test in dir(self):
1365+ if test.startswith('test_'):
1366+ getattr(self, test)()
1367
1368=== added file 'tests/charmhelpers/contrib/amulet/utils.py'
1369--- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000
1370+++ tests/charmhelpers/contrib/amulet/utils.py 2014-08-26 02:07:33 +0000
1371@@ -0,0 +1,176 @@
1372+import ConfigParser
1373+import io
1374+import logging
1375+import re
1376+import sys
1377+import time
1378+
1379+
1380+class AmuletUtils(object):
1381+ """Amulet utilities.
1382+
1383+ This class provides common utility functions that are used by Amulet
1384+ tests.
1385+ """
1386+
1387+ def __init__(self, log_level=logging.ERROR):
1388+ self.log = self.get_logger(level=log_level)
1389+
1390+ def get_logger(self, name="amulet-logger", level=logging.DEBUG):
1391+ """Get a logger object that will log to stdout."""
1392+ log = logging
1393+ logger = log.getLogger(name)
1394+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1395+ "%(levelname)s: %(message)s")
1396+
1397+ handler = log.StreamHandler(stream=sys.stdout)
1398+ handler.setLevel(level)
1399+ handler.setFormatter(fmt)
1400+
1401+ logger.addHandler(handler)
1402+ logger.setLevel(level)
1403+
1404+ return logger
1405+
1406+ def valid_ip(self, ip):
1407+ if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
1408+ return True
1409+ else:
1410+ return False
1411+
1412+ def valid_url(self, url):
1413+ p = re.compile(
1414+ r'^(?:http|ftp)s?://'
1415+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
1416+ r'localhost|'
1417+ r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1418+ r'(?::\d+)?'
1419+ r'(?:/?|[/?]\S+)$',
1420+ re.IGNORECASE)
1421+ if p.match(url):
1422+ return True
1423+ else:
1424+ return False
1425+
1426+ def validate_services(self, commands):
1427+ """Validate services.
1428+
1429+ Verify the specified services are running on the corresponding
1430+ service units.
1431+ """
1432+ for k, v in commands.iteritems():
1433+ for cmd in v:
1434+ output, code = k.run(cmd)
1435+ if code != 0:
1436+ return "command `{}` returned {}".format(cmd, str(code))
1437+ return None
1438+
1439+ def _get_config(self, unit, filename):
1440+ """Get a ConfigParser object for parsing a unit's config file."""
1441+ file_contents = unit.file_contents(filename)
1442+ config = ConfigParser.ConfigParser()
1443+ config.readfp(io.StringIO(file_contents))
1444+ return config
1445+
1446+ def validate_config_data(self, sentry_unit, config_file, section,
1447+ expected):
1448+ """Validate config file data.
1449+
1450+ Verify that the specified section of the config file contains
1451+ the expected option key:value pairs.
1452+ """
1453+ config = self._get_config(sentry_unit, config_file)
1454+
1455+ if section != 'DEFAULT' and not config.has_section(section):
1456+ return "section [{}] does not exist".format(section)
1457+
1458+ for k in expected.keys():
1459+ if not config.has_option(section, k):
1460+ return "section [{}] is missing option {}".format(section, k)
1461+ if config.get(section, k) != expected[k]:
1462+ return "section [{}] {}:{} != expected {}:{}".format(
1463+ section, k, config.get(section, k), k, expected[k])
1464+ return None
1465+
1466+ def _validate_dict_data(self, expected, actual):
1467+ """Validate dictionary data.
1468+
1469+ Compare expected dictionary data vs actual dictionary data.
1470+ The values in the 'expected' dictionary can be strings, bools, ints,
1471+ longs, or can be a function that evaluate a variable and returns a
1472+ bool.
1473+ """
1474+ for k, v in expected.iteritems():
1475+ if k in actual:
1476+ if (isinstance(v, basestring) or
1477+ isinstance(v, bool) or
1478+ isinstance(v, (int, long))):
1479+ if v != actual[k]:
1480+ return "{}:{}".format(k, actual[k])
1481+ elif not v(actual[k]):
1482+ return "{}:{}".format(k, actual[k])
1483+ else:
1484+ return "key '{}' does not exist".format(k)
1485+ return None
1486+
1487+ def validate_relation_data(self, sentry_unit, relation, expected):
1488+ """Validate actual relation data based on expected relation data."""
1489+ actual = sentry_unit.relation(relation[0], relation[1])
1490+ self.log.debug('actual: {}'.format(repr(actual)))
1491+ return self._validate_dict_data(expected, actual)
1492+
1493+ def _validate_list_data(self, expected, actual):
1494+ """Compare expected list vs actual list data."""
1495+ for e in expected:
1496+ if e not in actual:
1497+ return "expected item {} not found in actual list".format(e)
1498+ return None
1499+
1500+ def not_null(self, string):
1501+ if string is not None:
1502+ return True
1503+ else:
1504+ return False
1505+
1506+ def _get_file_mtime(self, sentry_unit, filename):
1507+ """Get last modification time of file."""
1508+ return sentry_unit.file_stat(filename)['mtime']
1509+
1510+ def _get_dir_mtime(self, sentry_unit, directory):
1511+ """Get last modification time of directory."""
1512+ return sentry_unit.directory_stat(directory)['mtime']
1513+
1514+ def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1515+ """Get process' start time.
1516+
1517+ Determine start time of the process based on the last modification
1518+ time of the /proc/pid directory. If pgrep_full is True, the process
1519+ name is matched against the full command line.
1520+ """
1521+ if pgrep_full:
1522+ cmd = 'pgrep -o -f {}'.format(service)
1523+ else:
1524+ cmd = 'pgrep -o {}'.format(service)
1525+ proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip())
1526+ return self._get_dir_mtime(sentry_unit, proc_dir)
1527+
1528+ def service_restarted(self, sentry_unit, service, filename,
1529+ pgrep_full=False, sleep_time=20):
1530+ """Check if service was restarted.
1531+
1532+ Compare a service's start time vs a file's last modification time
1533+ (such as a config file for that service) to determine if the service
1534+ has been restarted.
1535+ """
1536+ time.sleep(sleep_time)
1537+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
1538+ self._get_file_mtime(sentry_unit, filename)):
1539+ return True
1540+ else:
1541+ return False
1542+
1543+ def relation_error(self, name, data):
1544+ return 'unexpected relation data in {} - {}'.format(name, data)
1545+
1546+ def endpoint_error(self, name, data):
1547+ return 'unexpected endpoint data in {} - {}'.format(name, data)
1548
1549=== added directory 'tests/charmhelpers/contrib/openstack'
1550=== added file 'tests/charmhelpers/contrib/openstack/__init__.py'
1551=== added directory 'tests/charmhelpers/contrib/openstack/amulet'
1552=== added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py'
1553=== added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1554--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1555+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-26 02:07:33 +0000
1556@@ -0,0 +1,61 @@
1557+from charmhelpers.contrib.amulet.deployment import (
1558+ AmuletDeployment
1559+)
1560+
1561+
1562+class OpenStackAmuletDeployment(AmuletDeployment):
1563+ """OpenStack amulet deployment.
1564+
1565+ This class inherits from AmuletDeployment and has additional support
1566+ that is specifically for use by OpenStack charms.
1567+ """
1568+
1569+ def __init__(self, series=None, openstack=None, source=None):
1570+ """Initialize the deployment environment."""
1571+ super(OpenStackAmuletDeployment, self).__init__(series)
1572+ self.openstack = openstack
1573+ self.source = source
1574+
1575+ def _add_services(self, this_service, other_services):
1576+ """Add services to the deployment and set openstack-origin."""
1577+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1578+ other_services)
1579+ name = 0
1580+ services = other_services
1581+ services.append(this_service)
1582+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
1583+
1584+ if self.openstack:
1585+ for svc in services:
1586+ if svc[name] not in use_source:
1587+ config = {'openstack-origin': self.openstack}
1588+ self.d.configure(svc[name], config)
1589+
1590+ if self.source:
1591+ for svc in services:
1592+ if svc[name] in use_source:
1593+ config = {'source': self.source}
1594+ self.d.configure(svc[name], config)
1595+
1596+ def _configure_services(self, configs):
1597+ """Configure all of the services."""
1598+ for service, config in configs.iteritems():
1599+ self.d.configure(service, config)
1600+
1601+ def _get_openstack_release(self):
1602+ """Get openstack release.
1603+
1604+ Return an integer representing the enum value of the openstack
1605+ release.
1606+ """
1607+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1608+ self.precise_havana, self.precise_icehouse,
1609+ self.trusty_icehouse) = range(6)
1610+ releases = {
1611+ ('precise', None): self.precise_essex,
1612+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1613+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1614+ ('precise', 'cloud:precise-havana'): self.precise_havana,
1615+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1616+ ('trusty', None): self.trusty_icehouse}
1617+ return releases[(self.series, self.openstack)]
1618
1619=== added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
1620--- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
1621+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-26 02:07:33 +0000
1622@@ -0,0 +1,275 @@
1623+import logging
1624+import os
1625+import time
1626+import urllib
1627+
1628+import glanceclient.v1.client as glance_client
1629+import keystoneclient.v2_0 as keystone_client
1630+import novaclient.v1_1.client as nova_client
1631+
1632+from charmhelpers.contrib.amulet.utils import (
1633+ AmuletUtils
1634+)
1635+
1636+DEBUG = logging.DEBUG
1637+ERROR = logging.ERROR
1638+
1639+
1640+class OpenStackAmuletUtils(AmuletUtils):
1641+ """OpenStack amulet utilities.
1642+
1643+ This class inherits from AmuletUtils and has additional support
1644+ that is specifically for use by OpenStack charms.
1645+ """
1646+
1647+ def __init__(self, log_level=ERROR):
1648+ """Initialize the deployment environment."""
1649+ super(OpenStackAmuletUtils, self).__init__(log_level)
1650+
1651+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1652+ public_port, expected):
1653+ """Validate endpoint data.
1654+
1655+ Validate actual endpoint data vs expected endpoint data. The ports
1656+ are used to find the matching endpoint.
1657+ """
1658+ found = False
1659+ for ep in endpoints:
1660+ self.log.debug('endpoint: {}'.format(repr(ep)))
1661+ if (admin_port in ep.adminurl and
1662+ internal_port in ep.internalurl and
1663+ public_port in ep.publicurl):
1664+ found = True
1665+ actual = {'id': ep.id,
1666+ 'region': ep.region,
1667+ 'adminurl': ep.adminurl,
1668+ 'internalurl': ep.internalurl,
1669+ 'publicurl': ep.publicurl,
1670+ 'service_id': ep.service_id}
1671+ ret = self._validate_dict_data(expected, actual)
1672+ if ret:
1673+ return 'unexpected endpoint data - {}'.format(ret)
1674+
1675+ if not found:
1676+ return 'endpoint not found'
1677+
1678+ def validate_svc_catalog_endpoint_data(self, expected, actual):
1679+ """Validate service catalog endpoint data.
1680+
1681+ Validate a list of actual service catalog endpoints vs a list of
1682+ expected service catalog endpoints.
1683+ """
1684+ self.log.debug('actual: {}'.format(repr(actual)))
1685+ for k, v in expected.iteritems():
1686+ if k in actual:
1687+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
1688+ if ret:
1689+ return self.endpoint_error(k, ret)
1690+ else:
1691+ return "endpoint {} does not exist".format(k)
1692+ return ret
1693+
1694+ def validate_tenant_data(self, expected, actual):
1695+ """Validate tenant data.
1696+
1697+ Validate a list of actual tenant data vs list of expected tenant
1698+ data.
1699+ """
1700+ self.log.debug('actual: {}'.format(repr(actual)))
1701+ for e in expected:
1702+ found = False
1703+ for act in actual:
1704+ a = {'enabled': act.enabled, 'description': act.description,
1705+ 'name': act.name, 'id': act.id}
1706+ if e['name'] == a['name']:
1707+ found = True
1708+ ret = self._validate_dict_data(e, a)
1709+ if ret:
1710+ return "unexpected tenant data - {}".format(ret)
1711+ if not found:
1712+ return "tenant {} does not exist".format(e['name'])
1713+ return ret
1714+
1715+ def validate_role_data(self, expected, actual):
1716+ """Validate role data.
1717+
1718+ Validate a list of actual role data vs a list of expected role
1719+ data.
1720+ """
1721+ self.log.debug('actual: {}'.format(repr(actual)))
1722+ for e in expected:
1723+ found = False
1724+ for act in actual:
1725+ a = {'name': act.name, 'id': act.id}
1726+ if e['name'] == a['name']:
1727+ found = True
1728+ ret = self._validate_dict_data(e, a)
1729+ if ret:
1730+ return "unexpected role data - {}".format(ret)
1731+ if not found:
1732+ return "role {} does not exist".format(e['name'])
1733+ return ret
1734+
1735+ def validate_user_data(self, expected, actual):
1736+ """Validate user data.
1737+
1738+ Validate a list of actual user data vs a list of expected user
1739+ data.
1740+ """
1741+ self.log.debug('actual: {}'.format(repr(actual)))
1742+ for e in expected:
1743+ found = False
1744+ for act in actual:
1745+ a = {'enabled': act.enabled, 'name': act.name,
1746+ 'email': act.email, 'tenantId': act.tenantId,
1747+ 'id': act.id}
1748+ if e['name'] == a['name']:
1749+ found = True
1750+ ret = self._validate_dict_data(e, a)
1751+ if ret:
1752+ return "unexpected user data - {}".format(ret)
1753+ if not found:
1754+ return "user {} does not exist".format(e['name'])
1755+ return ret
1756+
1757+ def validate_flavor_data(self, expected, actual):
1758+ """Validate flavor data.
1759+
1760+ Validate a list of actual flavors vs a list of expected flavors.
1761+ """
1762+ self.log.debug('actual: {}'.format(repr(actual)))
1763+ act = [a.name for a in actual]
1764+ return self._validate_list_data(expected, act)
1765+
1766+ def tenant_exists(self, keystone, tenant):
1767+ """Return True if tenant exists."""
1768+ return tenant in [t.name for t in keystone.tenants.list()]
1769+
1770+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
1771+ tenant):
1772+ """Authenticates admin user with the keystone admin endpoint."""
1773+ unit = keystone_sentry
1774+ service_ip = unit.relation('shared-db',
1775+ 'mysql:shared-db')['private-address']
1776+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1777+ return keystone_client.Client(username=user, password=password,
1778+ tenant_name=tenant, auth_url=ep)
1779+
1780+ def authenticate_keystone_user(self, keystone, user, password, tenant):
1781+ """Authenticates a regular user with the keystone public endpoint."""
1782+ ep = keystone.service_catalog.url_for(service_type='identity',
1783+ endpoint_type='publicURL')
1784+ return keystone_client.Client(username=user, password=password,
1785+ tenant_name=tenant, auth_url=ep)
1786+
1787+ def authenticate_glance_admin(self, keystone):
1788+ """Authenticates admin user with glance."""
1789+ ep = keystone.service_catalog.url_for(service_type='image',
1790+ endpoint_type='adminURL')
1791+ return glance_client.Client(ep, token=keystone.auth_token)
1792+
1793+ def authenticate_nova_user(self, keystone, user, password, tenant):
1794+ """Authenticates a regular user with nova-api."""
1795+ ep = keystone.service_catalog.url_for(service_type='identity',
1796+ endpoint_type='publicURL')
1797+ return nova_client.Client(username=user, api_key=password,
1798+ project_id=tenant, auth_url=ep)
1799+
1800+ def create_cirros_image(self, glance, image_name):
1801+ """Download the latest cirros image and upload it to glance."""
1802+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
1803+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1804+ if http_proxy:
1805+ proxies = {'http': http_proxy}
1806+ opener = urllib.FancyURLopener(proxies)
1807+ else:
1808+ opener = urllib.FancyURLopener()
1809+
1810+ f = opener.open("http://download.cirros-cloud.net/version/released")
1811+ version = f.read().strip()
1812+ cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
1813+
1814+ if not os.path.exists(cirros_img):
1815+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1816+ version, cirros_img)
1817+ opener.retrieve(cirros_url, cirros_img)
1818+ f.close()
1819+
1820+ with open(cirros_img) as f:
1821+ image = glance.images.create(name=image_name, is_public=True,
1822+ disk_format='qcow2',
1823+ container_format='bare', data=f)
1824+ count = 1
1825+ status = image.status
1826+ while status != 'active' and count < 10:
1827+ time.sleep(3)
1828+ image = glance.images.get(image.id)
1829+ status = image.status
1830+ self.log.debug('image status: {}'.format(status))
1831+ count += 1
1832+
1833+ if status != 'active':
1834+ self.log.error('image creation timed out')
1835+ return None
1836+
1837+ return image
1838+
1839+ def delete_image(self, glance, image):
1840+ """Delete the specified image."""
1841+ num_before = len(list(glance.images.list()))
1842+ glance.images.delete(image)
1843+
1844+ count = 1
1845+ num_after = len(list(glance.images.list()))
1846+ while num_after != (num_before - 1) and count < 10:
1847+ time.sleep(3)
1848+ num_after = len(list(glance.images.list()))
1849+ self.log.debug('number of images: {}'.format(num_after))
1850+ count += 1
1851+
1852+ if num_after != (num_before - 1):
1853+ self.log.error('image deletion timed out')
1854+ return False
1855+
1856+ return True
1857+
1858+ def create_instance(self, nova, image_name, instance_name, flavor):
1859+ """Create the specified instance."""
1860+ image = nova.images.find(name=image_name)
1861+ flavor = nova.flavors.find(name=flavor)
1862+ instance = nova.servers.create(name=instance_name, image=image,
1863+ flavor=flavor)
1864+
1865+ count = 1
1866+ status = instance.status
1867+ while status != 'ACTIVE' and count < 60:
1868+ time.sleep(3)
1869+ instance = nova.servers.get(instance.id)
1870+ status = instance.status
1871+ self.log.debug('instance status: {}'.format(status))
1872+ count += 1
1873+
1874+ if status != 'ACTIVE':
1875+ self.log.error('instance creation timed out')
1876+ return None
1877+
1878+ return instance
1879+
1880+ def delete_instance(self, nova, instance):
1881+ """Delete the specified instance."""
1882+ num_before = len(list(nova.servers.list()))
1883+ nova.servers.delete(instance)
1884+
1885+ count = 1
1886+ num_after = len(list(nova.servers.list()))
1887+ while num_after != (num_before - 1) and count < 10:
1888+ time.sleep(3)
1889+ num_after = len(list(nova.servers.list()))
1890+ self.log.debug('number of instances: {}'.format(num_after))
1891+ count += 1
1892+
1893+ if num_after != (num_before - 1):
1894+ self.log.error('instance deletion timed out')
1895+ return False
1896+
1897+ return True

Subscribers

People subscribed via source and target branches