Merge lp:~gnuoy/charms/trusty/glance/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/glance/next

Proposed by Liam Young
Status: Merged
Merged at revision: 56
Proposed branch: lp:~gnuoy/charms/trusty/glance/next-charm-sync
Merge into: lp:~openstack-charmers-archive/charms/trusty/glance/next
Diff against target: 1530 lines (+897/-113)
17 files modified
hooks/charmhelpers/contrib/hahelpers/cluster.py (+55/-13)
hooks/charmhelpers/contrib/network/ip.py (+19/-1)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20)
hooks/charmhelpers/contrib/openstack/context.py (+31/-4)
hooks/charmhelpers/contrib/openstack/ip.py (+7/-3)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3)
hooks/charmhelpers/core/host.py (+34/-1)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+305/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+1/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+20/-7)
tests/charmhelpers/contrib/amulet/utils.py (+46/-27)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/glance/next-charm-sync
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+230636@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Approved by jamespage

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
2--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 09:37:25 +0000
3+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 13:59:42 +0000
4@@ -6,6 +6,11 @@
5 # Adam Gandelman <adamg@ubuntu.com>
6 #
7
8+"""
9+Helpers for clustering and determining "cluster leadership" and other
10+clustering-related helpers.
11+"""
12+
13 import subprocess
14 import os
15
16@@ -19,6 +24,7 @@
17 config as config_get,
18 INFO,
19 ERROR,
20+ WARNING,
21 unit_get,
22 )
23
24@@ -27,6 +33,29 @@
25 pass
26
27
28+def is_elected_leader(resource):
29+ """
30+ Returns True if the charm executing this is the elected cluster leader.
31+
32+ It relies on two mechanisms to determine leadership:
33+ 1. If the charm is part of a corosync cluster, call corosync to
34+ determine leadership.
35+ 2. If the charm is not part of a corosync cluster, the leader is
36+ determined as being "the alive unit with the lowest unit numer". In
37+ other words, the oldest surviving unit.
38+ """
39+ if is_clustered():
40+ if not is_crm_leader(resource):
41+ log('Deferring action to CRM leader.', level=INFO)
42+ return False
43+ else:
44+ peers = peer_units()
45+ if peers and not oldest_peer(peers):
46+ log('Deferring action to oldest service unit.', level=INFO)
47+ return False
48+ return True
49+
50+
51 def is_clustered():
52 for r_id in (relation_ids('ha') or []):
53 for unit in (relation_list(r_id) or []):
54@@ -38,7 +67,11 @@
55 return False
56
57
58-def is_leader(resource):
59+def is_crm_leader(resource):
60+ """
61+ Returns True if the charm calling this is the elected corosync leader,
62+ as returned by calling the external "crm" command.
63+ """
64 cmd = [
65 "crm", "resource",
66 "show", resource
67@@ -54,15 +87,31 @@
68 return False
69
70
71-def peer_units():
72+def is_leader(resource):
73+ log("is_leader is deprecated. Please consider using is_crm_leader "
74+ "instead.", level=WARNING)
75+ return is_crm_leader(resource)
76+
77+
78+def peer_units(peer_relation="cluster"):
79 peers = []
80- for r_id in (relation_ids('cluster') or []):
81+ for r_id in (relation_ids(peer_relation) or []):
82 for unit in (relation_list(r_id) or []):
83 peers.append(unit)
84 return peers
85
86
87+def peer_ips(peer_relation='cluster', addr_key='private-address'):
88+ '''Return a dict of peers and their private-address'''
89+ peers = {}
90+ for r_id in relation_ids(peer_relation):
91+ for unit in relation_list(r_id):
92+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
93+ return peers
94+
95+
96 def oldest_peer(peers):
97+ """Determines who the oldest peer is by comparing unit numbers."""
98 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
99 for peer in peers:
100 remote_unit_no = int(peer.split('/')[1])
101@@ -72,16 +121,9 @@
102
103
104 def eligible_leader(resource):
105- if is_clustered():
106- if not is_leader(resource):
107- log('Deferring action to CRM leader.', level=INFO)
108- return False
109- else:
110- peers = peer_units()
111- if peers and not oldest_peer(peers):
112- log('Deferring action to oldest service unit.', level=INFO)
113- return False
114- return True
115+ log("eligible_leader is deprecated. Please consider using "
116+ "is_elected_leader instead.", level=WARNING)
117+ return is_elected_leader(resource)
118
119
120 def https():
121
122=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
123--- hooks/charmhelpers/contrib/network/ip.py 2014-07-24 10:26:34 +0000
124+++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:59:42 +0000
125@@ -4,7 +4,7 @@
126
127 from charmhelpers.fetch import apt_install
128 from charmhelpers.core.hookenv import (
129- ERROR, log,
130+ ERROR, log, config,
131 )
132
133 try:
134@@ -154,3 +154,21 @@
135 get_iface_for_address = partial(_get_for_address, key='iface')
136
137 get_netmask_for_address = partial(_get_for_address, key='netmask')
138+
139+
140+def get_ipv6_addr(iface="eth0"):
141+ try:
142+ iface_addrs = netifaces.ifaddresses(iface)
143+ if netifaces.AF_INET6 not in iface_addrs:
144+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
145+
146+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
147+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
148+ and config('vip') != a['addr']]
149+ if not ipv6_addr:
150+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
151+
152+ return ipv6_addr[0]
153+
154+ except ValueError:
155+ raise ValueError("Invalid interface '%s'" % iface)
156
157=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
158--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-10 21:43:51 +0000
159+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:59:42 +0000
160@@ -4,8 +4,11 @@
161
162
163 class OpenStackAmuletDeployment(AmuletDeployment):
164- """This class inherits from AmuletDeployment and has additional support
165- that is specifically for use by OpenStack charms."""
166+ """OpenStack amulet deployment.
167+
168+ This class inherits from AmuletDeployment and has additional support
169+ that is specifically for use by OpenStack charms.
170+ """
171
172 def __init__(self, series=None, openstack=None, source=None):
173 """Initialize the deployment environment."""
174@@ -40,11 +43,14 @@
175 self.d.configure(service, config)
176
177 def _get_openstack_release(self):
178- """Return an integer representing the enum value of the openstack
179- release."""
180- self.precise_essex, self.precise_folsom, self.precise_grizzly, \
181- self.precise_havana, self.precise_icehouse, \
182- self.trusty_icehouse = range(6)
183+ """Get openstack release.
184+
185+ Return an integer representing the enum value of the openstack
186+ release.
187+ """
188+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
189+ self.precise_havana, self.precise_icehouse,
190+ self.trusty_icehouse) = range(6)
191 releases = {
192 ('precise', None): self.precise_essex,
193 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
194
195=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
196--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-10 21:43:51 +0000
197+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:59:42 +0000
198@@ -16,8 +16,11 @@
199
200
201 class OpenStackAmuletUtils(AmuletUtils):
202- """This class inherits from AmuletUtils and has additional support
203- that is specifically for use by OpenStack charms."""
204+ """OpenStack amulet utilities.
205+
206+ This class inherits from AmuletUtils and has additional support
207+ that is specifically for use by OpenStack charms.
208+ """
209
210 def __init__(self, log_level=ERROR):
211 """Initialize the deployment environment."""
212@@ -25,13 +28,17 @@
213
214 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
215 public_port, expected):
216- """Validate actual endpoint data vs expected endpoint data. The ports
217- are used to find the matching endpoint."""
218+ """Validate endpoint data.
219+
220+ Validate actual endpoint data vs expected endpoint data. The ports
221+ are used to find the matching endpoint.
222+ """
223 found = False
224 for ep in endpoints:
225 self.log.debug('endpoint: {}'.format(repr(ep)))
226- if admin_port in ep.adminurl and internal_port in ep.internalurl \
227- and public_port in ep.publicurl:
228+ if (admin_port in ep.adminurl and
229+ internal_port in ep.internalurl and
230+ public_port in ep.publicurl):
231 found = True
232 actual = {'id': ep.id,
233 'region': ep.region,
234@@ -47,8 +54,11 @@
235 return 'endpoint not found'
236
237 def validate_svc_catalog_endpoint_data(self, expected, actual):
238- """Validate a list of actual service catalog endpoints vs a list of
239- expected service catalog endpoints."""
240+ """Validate service catalog endpoint data.
241+
242+ Validate a list of actual service catalog endpoints vs a list of
243+ expected service catalog endpoints.
244+ """
245 self.log.debug('actual: {}'.format(repr(actual)))
246 for k, v in expected.iteritems():
247 if k in actual:
248@@ -60,8 +70,11 @@
249 return ret
250
251 def validate_tenant_data(self, expected, actual):
252- """Validate a list of actual tenant data vs list of expected tenant
253- data."""
254+ """Validate tenant data.
255+
256+ Validate a list of actual tenant data vs list of expected tenant
257+ data.
258+ """
259 self.log.debug('actual: {}'.format(repr(actual)))
260 for e in expected:
261 found = False
262@@ -78,8 +91,11 @@
263 return ret
264
265 def validate_role_data(self, expected, actual):
266- """Validate a list of actual role data vs a list of expected role
267- data."""
268+ """Validate role data.
269+
270+ Validate a list of actual role data vs a list of expected role
271+ data.
272+ """
273 self.log.debug('actual: {}'.format(repr(actual)))
274 for e in expected:
275 found = False
276@@ -95,8 +111,11 @@
277 return ret
278
279 def validate_user_data(self, expected, actual):
280- """Validate a list of actual user data vs a list of expected user
281- data."""
282+ """Validate user data.
283+
284+ Validate a list of actual user data vs a list of expected user
285+ data.
286+ """
287 self.log.debug('actual: {}'.format(repr(actual)))
288 for e in expected:
289 found = False
290@@ -114,21 +133,24 @@
291 return ret
292
293 def validate_flavor_data(self, expected, actual):
294- """Validate a list of actual flavors vs a list of expected flavors."""
295+ """Validate flavor data.
296+
297+ Validate a list of actual flavors vs a list of expected flavors.
298+ """
299 self.log.debug('actual: {}'.format(repr(actual)))
300 act = [a.name for a in actual]
301 return self._validate_list_data(expected, act)
302
303 def tenant_exists(self, keystone, tenant):
304- """Return True if tenant exists"""
305+ """Return True if tenant exists."""
306 return tenant in [t.name for t in keystone.tenants.list()]
307
308 def authenticate_keystone_admin(self, keystone_sentry, user, password,
309 tenant):
310 """Authenticates admin user with the keystone admin endpoint."""
311- service_ip = \
312- keystone_sentry.relation('shared-db',
313- 'mysql:shared-db')['private-address']
314+ unit = keystone_sentry
315+ service_ip = unit.relation('shared-db',
316+ 'mysql:shared-db')['private-address']
317 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
318 return keystone_client.Client(username=user, password=password,
319 tenant_name=tenant, auth_url=ep)
320@@ -177,12 +199,40 @@
321 image = glance.images.create(name=image_name, is_public=True,
322 disk_format='qcow2',
323 container_format='bare', data=f)
324+ count = 1
325+ status = image.status
326+ while status != 'active' and count < 10:
327+ time.sleep(3)
328+ image = glance.images.get(image.id)
329+ status = image.status
330+ self.log.debug('image status: {}'.format(status))
331+ count += 1
332+
333+ if status != 'active':
334+ self.log.error('image creation timed out')
335+ return None
336+
337 return image
338
339 def delete_image(self, glance, image):
340 """Delete the specified image."""
341+ num_before = len(list(glance.images.list()))
342 glance.images.delete(image)
343
344+ count = 1
345+ num_after = len(list(glance.images.list()))
346+ while num_after != (num_before - 1) and count < 10:
347+ time.sleep(3)
348+ num_after = len(list(glance.images.list()))
349+ self.log.debug('number of images: {}'.format(num_after))
350+ count += 1
351+
352+ if num_after != (num_before - 1):
353+ self.log.error('image deletion timed out')
354+ return False
355+
356+ return True
357+
358 def create_instance(self, nova, image_name, instance_name, flavor):
359 """Create the specified instance."""
360 image = nova.images.find(name=image_name)
361@@ -199,11 +249,27 @@
362 self.log.debug('instance status: {}'.format(status))
363 count += 1
364
365- if status == 'BUILD':
366+ if status != 'ACTIVE':
367+ self.log.error('instance creation timed out')
368 return None
369
370 return instance
371
372 def delete_instance(self, nova, instance):
373 """Delete the specified instance."""
374+ num_before = len(list(nova.servers.list()))
375 nova.servers.delete(instance)
376+
377+ count = 1
378+ num_after = len(list(nova.servers.list()))
379+ while num_after != (num_before - 1) and count < 10:
380+ time.sleep(3)
381+ num_after = len(list(nova.servers.list()))
382+ self.log.debug('number of instances: {}'.format(num_after))
383+ count += 1
384+
385+ if num_after != (num_before - 1):
386+ self.log.error('instance deletion timed out')
387+ return False
388+
389+ return True
390
391=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
392--- hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 09:37:25 +0000
393+++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:59:42 +0000
394@@ -44,7 +44,10 @@
395 neutron_plugin_attribute,
396 )
397
398-from charmhelpers.contrib.network.ip import get_address_in_network
399+from charmhelpers.contrib.network.ip import (
400+ get_address_in_network,
401+ get_ipv6_addr,
402+)
403
404 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
405
406@@ -401,9 +404,12 @@
407
408 cluster_hosts = {}
409 l_unit = local_unit().replace('/', '-')
410- cluster_hosts[l_unit] = \
411- get_address_in_network(config('os-internal-network'),
412- unit_get('private-address'))
413+ if config('prefer-ipv6'):
414+ addr = get_ipv6_addr()
415+ else:
416+ addr = unit_get('private-address')
417+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
418+ addr)
419
420 for rid in relation_ids('cluster'):
421 for unit in related_units(rid):
422@@ -414,6 +420,16 @@
423 ctxt = {
424 'units': cluster_hosts,
425 }
426+
427+ if config('prefer-ipv6'):
428+ ctxt['local_host'] = 'ip6-localhost'
429+ ctxt['haproxy_host'] = '::'
430+ ctxt['stat_port'] = ':::8888'
431+ else:
432+ ctxt['local_host'] = '127.0.0.1'
433+ ctxt['haproxy_host'] = '0.0.0.0'
434+ ctxt['stat_port'] = ':8888'
435+
436 if len(cluster_hosts.keys()) > 1:
437 # Enable haproxy when we have enough peers.
438 log('Ensuring haproxy enabled in /etc/default/haproxy.')
439@@ -753,6 +769,17 @@
440 return ctxt
441
442
443+class LogLevelContext(OSContextGenerator):
444+
445+ def __call__(self):
446+ ctxt = {}
447+ ctxt['debug'] = \
448+ False if config('debug') is None else config('debug')
449+ ctxt['verbose'] = \
450+ False if config('verbose') is None else config('verbose')
451+ return ctxt
452+
453+
454 class SyslogContext(OSContextGenerator):
455
456 def __call__(self):
457
458=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
459--- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-03 15:31:54 +0000
460+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:59:42 +0000
461@@ -7,6 +7,7 @@
462 get_address_in_network,
463 is_address_in_network,
464 is_ipv6,
465+ get_ipv6_addr,
466 )
467
468 from charmhelpers.contrib.hahelpers.cluster import is_clustered
469@@ -64,10 +65,13 @@
470 vip):
471 resolved_address = vip
472 else:
473+ if config('prefer-ipv6'):
474+ fallback_addr = get_ipv6_addr()
475+ else:
476+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
477 resolved_address = get_address_in_network(
478- config(_address_map[endpoint_type]['config']),
479- unit_get(_address_map[endpoint_type]['fallback'])
480- )
481+ config(_address_map[endpoint_type]['config']), fallback_addr)
482+
483 if resolved_address is None:
484 raise ValueError('Unable to resolve a suitable IP address'
485 ' based on charm state and configuration')
486
487=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
488--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-03 13:03:26 +0000
489+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:59:42 +0000
490@@ -1,6 +1,6 @@
491 global
492- log 127.0.0.1 local0
493- log 127.0.0.1 local1 notice
494+ log {{ local_host }} local0
495+ log {{ local_host }} local1 notice
496 maxconn 20000
497 user haproxy
498 group haproxy
499@@ -17,7 +17,7 @@
500 timeout client 30000
501 timeout server 30000
502
503-listen stats :8888
504+listen stats {{ stat_port }}
505 mode http
506 stats enable
507 stats hide-version
508
509=== modified file 'hooks/charmhelpers/core/host.py'
510--- hooks/charmhelpers/core/host.py 2014-07-25 09:37:25 +0000
511+++ hooks/charmhelpers/core/host.py 2014-08-13 13:59:42 +0000
512@@ -12,6 +12,8 @@
513 import string
514 import subprocess
515 import hashlib
516+import shutil
517+from contextlib import contextmanager
518
519 from collections import OrderedDict
520
521@@ -52,7 +54,7 @@
522 def service_running(service):
523 """Determine whether a system service is running"""
524 try:
525- output = subprocess.check_output(['service', service, 'status'])
526+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
527 except subprocess.CalledProcessError:
528 return False
529 else:
530@@ -62,6 +64,16 @@
531 return False
532
533
534+def service_available(service_name):
535+ """Determine whether a system service is available"""
536+ try:
537+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
538+ except subprocess.CalledProcessError:
539+ return False
540+ else:
541+ return True
542+
543+
544 def adduser(username, password=None, shell='/bin/bash', system_user=False):
545 """Add a user to the system"""
546 try:
547@@ -329,3 +341,24 @@
548 pkgcache = apt_pkg.Cache()
549 pkg = pkgcache[package]
550 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
551+
552+
553+@contextmanager
554+def chdir(d):
555+ cur = os.getcwd()
556+ try:
557+ yield os.chdir(d)
558+ finally:
559+ os.chdir(cur)
560+
561+
562+def chownr(path, owner, group):
563+ uid = pwd.getpwnam(owner).pw_uid
564+ gid = grp.getgrnam(group).gr_gid
565+
566+ for root, dirs, files in os.walk(path):
567+ for name in dirs + files:
568+ full = os.path.join(root, name)
569+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
570+ if not broken_symlink:
571+ os.chown(full, uid, gid)
572
573=== added directory 'hooks/charmhelpers/core/services'
574=== added file 'hooks/charmhelpers/core/services/__init__.py'
575--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
576+++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:59:42 +0000
577@@ -0,0 +1,2 @@
578+from .base import *
579+from .helpers import *
580
581=== added file 'hooks/charmhelpers/core/services/base.py'
582--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
583+++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:59:42 +0000
584@@ -0,0 +1,305 @@
585+import os
586+import re
587+import json
588+from collections import Iterable
589+
590+from charmhelpers.core import host
591+from charmhelpers.core import hookenv
592+
593+
594+__all__ = ['ServiceManager', 'ManagerCallback',
595+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
596+ 'service_restart', 'service_stop']
597+
598+
599+class ServiceManager(object):
600+ def __init__(self, services=None):
601+ """
602+ Register a list of services, given their definitions.
603+
604+ Traditional charm authoring is focused on implementing hooks. That is,
605+ the charm author is thinking in terms of "What hook am I handling; what
606+ does this hook need to do?" However, in most cases, the real question
607+ should be "Do I have the information I need to configure and start this
608+ piece of software and, if so, what are the steps for doing so?" The
609+ ServiceManager framework tries to bring the focus to the data and the
610+ setup tasks, in the most declarative way possible.
611+
612+ Service definitions are dicts in the following formats (all keys except
613+ 'service' are optional)::
614+
615+ {
616+ "service": <service name>,
617+ "required_data": <list of required data contexts>,
618+ "data_ready": <one or more callbacks>,
619+ "data_lost": <one or more callbacks>,
620+ "start": <one or more callbacks>,
621+ "stop": <one or more callbacks>,
622+ "ports": <list of ports to manage>,
623+ }
624+
625+ The 'required_data' list should contain dicts of required data (or
626+ dependency managers that act like dicts and know how to collect the data).
627+ Only when all items in the 'required_data' list are populated are the list
628+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
629+ information.
630+
631+ The 'data_ready' value should be either a single callback, or a list of
632+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
633+ Each callback will be called with the service name as the only parameter.
634+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
635+ are fired.
636+
637+ The 'data_lost' value should be either a single callback, or a list of
638+ callbacks, to be called when a 'required_data' item no longer passes
639+ `is_ready()`. Each callback will be called with the service name as the
640+ only parameter. After all of the 'data_lost' callbacks are called,
641+ the 'stop' callbacks are fired.
642+
643+ The 'start' value should be either a single callback, or a list of
644+ callbacks, to be called when starting the service, after the 'data_ready'
645+ callbacks are complete. Each callback will be called with the service
646+ name as the only parameter. This defaults to
647+ `[host.service_start, services.open_ports]`.
648+
649+ The 'stop' value should be either a single callback, or a list of
650+ callbacks, to be called when stopping the service. If the service is
651+ being stopped because it no longer has all of its 'required_data', this
652+ will be called after all of the 'data_lost' callbacks are complete.
653+ Each callback will be called with the service name as the only parameter.
654+ This defaults to `[services.close_ports, host.service_stop]`.
655+
656+ The 'ports' value should be a list of ports to manage. The default
657+ 'start' handler will open the ports after the service is started,
658+ and the default 'stop' handler will close the ports prior to stopping
659+ the service.
660+
661+
662+ Examples:
663+
664+ The following registers an Upstart service called bingod that depends on
665+ a mongodb relation and which runs a custom `db_migrate` function prior to
666+ restarting the service, and a Runit service called spadesd::
667+
668+ manager = services.ServiceManager([
669+ {
670+ 'service': 'bingod',
671+ 'ports': [80, 443],
672+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
673+ 'data_ready': [
674+ services.template(source='bingod.conf'),
675+ services.template(source='bingod.ini',
676+ target='/etc/bingod.ini',
677+ owner='bingo', perms=0400),
678+ ],
679+ },
680+ {
681+ 'service': 'spadesd',
682+ 'data_ready': services.template(source='spadesd_run.j2',
683+ target='/etc/sv/spadesd/run',
684+ perms=0555),
685+ 'start': runit_start,
686+ 'stop': runit_stop,
687+ },
688+ ])
689+ manager.manage()
690+ """
691+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
692+ self._ready = None
693+ self.services = {}
694+ for service in services or []:
695+ service_name = service['service']
696+ self.services[service_name] = service
697+
698+ def manage(self):
699+ """
700+ Handle the current hook by doing The Right Thing with the registered services.
701+ """
702+ hook_name = hookenv.hook_name()
703+ if hook_name == 'stop':
704+ self.stop_services()
705+ else:
706+ self.provide_data()
707+ self.reconfigure_services()
708+
709+ def provide_data(self):
710+ hook_name = hookenv.hook_name()
711+ for service in self.services.values():
712+ for provider in service.get('provided_data', []):
713+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
714+ data = provider.provide_data()
715+ if provider._is_ready(data):
716+ hookenv.relation_set(None, data)
717+
718+ def reconfigure_services(self, *service_names):
719+ """
720+ Update all files for one or more registered services, and,
721+ if ready, optionally restart them.
722+
723+ If no service names are given, reconfigures all registered services.
724+ """
725+ for service_name in service_names or self.services.keys():
726+ if self.is_ready(service_name):
727+ self.fire_event('data_ready', service_name)
728+ self.fire_event('start', service_name, default=[
729+ service_restart,
730+ manage_ports])
731+ self.save_ready(service_name)
732+ else:
733+ if self.was_ready(service_name):
734+ self.fire_event('data_lost', service_name)
735+ self.fire_event('stop', service_name, default=[
736+ manage_ports,
737+ service_stop])
738+ self.save_lost(service_name)
739+
740+ def stop_services(self, *service_names):
741+ """
742+ Stop one or more registered services, by name.
743+
744+ If no service names are given, stops all registered services.
745+ """
746+ for service_name in service_names or self.services.keys():
747+ self.fire_event('stop', service_name, default=[
748+ manage_ports,
749+ service_stop])
750+
751+ def get_service(self, service_name):
752+ """
753+ Given the name of a registered service, return its service definition.
754+ """
755+ service = self.services.get(service_name)
756+ if not service:
757+ raise KeyError('Service not registered: %s' % service_name)
758+ return service
759+
760+ def fire_event(self, event_name, service_name, default=None):
761+ """
762+ Fire a data_ready, data_lost, start, or stop event on a given service.
763+ """
764+ service = self.get_service(service_name)
765+ callbacks = service.get(event_name, default)
766+ if not callbacks:
767+ return
768+ if not isinstance(callbacks, Iterable):
769+ callbacks = [callbacks]
770+ for callback in callbacks:
771+ if isinstance(callback, ManagerCallback):
772+ callback(self, service_name, event_name)
773+ else:
774+ callback(service_name)
775+
776+ def is_ready(self, service_name):
777+ """
778+ Determine if a registered service is ready, by checking its 'required_data'.
779+
780+ A 'required_data' item can be any mapping type, and is considered ready
781+ if `bool(item)` evaluates as True.
782+ """
783+ service = self.get_service(service_name)
784+ reqs = service.get('required_data', [])
785+ return all(bool(req) for req in reqs)
786+
787+ def _load_ready_file(self):
788+ if self._ready is not None:
789+ return
790+ if os.path.exists(self._ready_file):
791+ with open(self._ready_file) as fp:
792+ self._ready = set(json.load(fp))
793+ else:
794+ self._ready = set()
795+
796+ def _save_ready_file(self):
797+ if self._ready is None:
798+ return
799+ with open(self._ready_file, 'w') as fp:
800+ json.dump(list(self._ready), fp)
801+
802+ def save_ready(self, service_name):
803+ """
804+ Save an indicator that the given service is now data_ready.
805+ """
806+ self._load_ready_file()
807+ self._ready.add(service_name)
808+ self._save_ready_file()
809+
810+ def save_lost(self, service_name):
811+ """
812+ Save an indicator that the given service is no longer data_ready.
813+ """
814+ self._load_ready_file()
815+ self._ready.discard(service_name)
816+ self._save_ready_file()
817+
818+ def was_ready(self, service_name):
819+ """
820+ Determine if the given service was previously data_ready.
821+ """
822+ self._load_ready_file()
823+ return service_name in self._ready
824+
825+
826+class ManagerCallback(object):
827+ """
828+ Special case of a callback that takes the `ServiceManager` instance
829+ in addition to the service name.
830+
831+ Subclasses should implement `__call__` which should accept three parameters:
832+
833+ * `manager` The `ServiceManager` instance
834+ * `service_name` The name of the service it's being triggered for
835+ * `event_name` The name of the event that this callback is handling
836+ """
837+ def __call__(self, manager, service_name, event_name):
838+ raise NotImplementedError()
839+
840+
841+class PortManagerCallback(ManagerCallback):
842+ """
843+ Callback class that will open or close ports, for use as either
844+ a start or stop action.
845+ """
846+ def __call__(self, manager, service_name, event_name):
847+ service = manager.get_service(service_name)
848+ new_ports = service.get('ports', [])
849+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
850+ if os.path.exists(port_file):
851+ with open(port_file) as fp:
852+ old_ports = fp.read().split(',')
853+ for old_port in old_ports:
854+ if bool(old_port):
855+ old_port = int(old_port)
856+ if old_port not in new_ports:
857+ hookenv.close_port(old_port)
858+ with open(port_file, 'w') as fp:
859+ fp.write(','.join(str(port) for port in new_ports))
860+ for port in new_ports:
861+ if event_name == 'start':
862+ hookenv.open_port(port)
863+ elif event_name == 'stop':
864+ hookenv.close_port(port)
865+
866+
867+def service_stop(service_name):
868+ """
869+ Wrapper around host.service_stop to prevent spurious "unknown service"
870+ messages in the logs.
871+ """
872+ if host.service_running(service_name):
873+ host.service_stop(service_name)
874+
875+
876+def service_restart(service_name):
877+ """
878+ Wrapper around host.service_restart to prevent spurious "unknown service"
879+ messages in the logs.
880+ """
881+ if host.service_available(service_name):
882+ if host.service_running(service_name):
883+ host.service_restart(service_name)
884+ else:
885+ host.service_start(service_name)
886+
887+
888+# Convenience aliases
889+open_ports = close_ports = manage_ports = PortManagerCallback()
890
891=== added file 'hooks/charmhelpers/core/services/helpers.py'
892--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
893+++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:59:42 +0000
894@@ -0,0 +1,125 @@
895+from charmhelpers.core import hookenv
896+from charmhelpers.core import templating
897+
898+from charmhelpers.core.services.base import ManagerCallback
899+
900+
901+__all__ = ['RelationContext', 'TemplateCallback',
902+ 'render_template', 'template']
903+
904+
905+class RelationContext(dict):
906+ """
907+ Base class for a context generator that gets relation data from juju.
908+
909+ Subclasses must provide the attributes `name`, which is the name of the
910+ interface of interest, `interface`, which is the type of the interface of
911+ interest, and `required_keys`, which is the set of keys required for the
912+ relation to be considered complete. The data for all interfaces matching
913+ the `name` attribute that are complete will used to populate the dictionary
914+ values (see `get_data`, below).
915+
916+ The generated context will be namespaced under the interface type, to prevent
917+ potential naming conflicts.
918+ """
919+ name = None
920+ interface = None
921+ required_keys = []
922+
923+ def __init__(self, *args, **kwargs):
924+ super(RelationContext, self).__init__(*args, **kwargs)
925+ self.get_data()
926+
927+ def __bool__(self):
928+ """
929+ Returns True if all of the required_keys are available.
930+ """
931+ return self.is_ready()
932+
933+ __nonzero__ = __bool__
934+
935+ def __repr__(self):
936+ return super(RelationContext, self).__repr__()
937+
938+ def is_ready(self):
939+ """
940+ Returns True if all of the `required_keys` are available from any units.
941+ """
942+ ready = len(self.get(self.name, [])) > 0
943+ if not ready:
944+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
945+ return ready
946+
947+ def _is_ready(self, unit_data):
948+ """
949+ Helper method that tests a set of relation data and returns True if
950+ all of the `required_keys` are present.
951+ """
952+ return set(unit_data.keys()).issuperset(set(self.required_keys))
953+
954+ def get_data(self):
955+ """
956+ Retrieve the relation data for each unit involved in a relation and,
957+ if complete, store it in a list under `self[self.name]`. This
958+ is automatically called when the RelationContext is instantiated.
959+
960+ The units are sorted lexographically first by the service ID, then by
961+ the unit ID. Thus, if an interface has two other services, 'db:1'
962+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
963+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
964+ set of data, the relation data for the units will be stored in the
965+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
966+
967+ If you only care about a single unit on the relation, you can just
968+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
969+ support multiple units on a relation, you should iterate over the list,
970+ like::
971+
972+ {% for unit in interface -%}
973+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
974+ {%- endfor %}
975+
976+ Note that since all sets of relation data from all related services and
977+ units are in a single list, if you need to know which service or unit a
978+ set of data came from, you'll need to extend this class to preserve
979+ that information.
980+ """
981+ if not hookenv.relation_ids(self.name):
982+ return
983+
984+ ns = self.setdefault(self.name, [])
985+ for rid in sorted(hookenv.relation_ids(self.name)):
986+ for unit in sorted(hookenv.related_units(rid)):
987+ reldata = hookenv.relation_get(rid=rid, unit=unit)
988+ if self._is_ready(reldata):
989+ ns.append(reldata)
990+
991+ def provide_data(self):
992+ """
993+ Return data to be relation_set for this interface.
994+ """
995+ return {}
996+
997+
998+class TemplateCallback(ManagerCallback):
999+ """
1000+ Callback class that will render a template, for use as a ready action.
1001+ """
1002+ def __init__(self, source, target, owner='root', group='root', perms=0444):
1003+ self.source = source
1004+ self.target = target
1005+ self.owner = owner
1006+ self.group = group
1007+ self.perms = perms
1008+
1009+ def __call__(self, manager, service_name, event_name):
1010+ service = manager.get_service(service_name)
1011+ context = {}
1012+ for ctx in service.get('required_data', []):
1013+ context.update(ctx)
1014+ templating.render(self.source, self.target, context,
1015+ self.owner, self.group, self.perms)
1016+
1017+
1018+# Convenience aliases for templates
1019+render_template = template = TemplateCallback
1020
1021=== added file 'hooks/charmhelpers/core/templating.py'
1022--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
1023+++ hooks/charmhelpers/core/templating.py 2014-08-13 13:59:42 +0000
1024@@ -0,0 +1,51 @@
1025+import os
1026+
1027+from charmhelpers.core import host
1028+from charmhelpers.core import hookenv
1029+
1030+
1031+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
1032+ """
1033+ Render a template.
1034+
1035+ The `source` path, if not absolute, is relative to the `templates_dir`.
1036+
1037+ The `target` path should be absolute.
1038+
1039+ The context should be a dict containing the values to be replaced in the
1040+ template.
1041+
1042+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
1043+
1044+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
1045+
1046+ Note: Using this requires python-jinja2; if it is not installed, calling
1047+ this will attempt to use charmhelpers.fetch.apt_install to install it.
1048+ """
1049+ try:
1050+ from jinja2 import FileSystemLoader, Environment, exceptions
1051+ except ImportError:
1052+ try:
1053+ from charmhelpers.fetch import apt_install
1054+ except ImportError:
1055+ hookenv.log('Could not import jinja2, and could not import '
1056+ 'charmhelpers.fetch to install it',
1057+ level=hookenv.ERROR)
1058+ raise
1059+ apt_install('python-jinja2', fatal=True)
1060+ from jinja2 import FileSystemLoader, Environment, exceptions
1061+
1062+ if templates_dir is None:
1063+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
1064+ loader = Environment(loader=FileSystemLoader(templates_dir))
1065+ try:
1066+ source = source
1067+ template = loader.get_template(source)
1068+ except exceptions.TemplateNotFound as e:
1069+ hookenv.log('Could not load template %s from %s.' %
1070+ (source, templates_dir),
1071+ level=hookenv.ERROR)
1072+ raise e
1073+ content = template.render(context)
1074+ host.mkdir(os.path.dirname(target))
1075+ host.write_file(target, content, owner, group, perms)
1076
1077=== modified file 'hooks/charmhelpers/fetch/__init__.py'
1078--- hooks/charmhelpers/fetch/__init__.py 2014-07-25 09:37:25 +0000
1079+++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:59:42 +0000
1080@@ -122,6 +122,7 @@
1081 # Tell apt to build an in-memory cache to prevent race conditions (if
1082 # another process is already building the cache).
1083 apt_pkg.config.set("Dir::Cache::pkgcache", "")
1084+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
1085
1086 cache = apt_pkg.Cache()
1087 _pkgs = []
1088
1089=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
1090--- tests/charmhelpers/contrib/amulet/deployment.py 2014-07-25 09:37:25 +0000
1091+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-08-13 13:59:42 +0000
1092@@ -1,9 +1,14 @@
1093 import amulet
1094
1095+import os
1096+
1097
1098 class AmuletDeployment(object):
1099- """This class provides generic Amulet deployment and test runner
1100- methods."""
1101+ """Amulet deployment.
1102+
1103+ This class provides generic Amulet deployment and test runner
1104+ methods.
1105+ """
1106
1107 def __init__(self, series=None):
1108 """Initialize the deployment environment."""
1109@@ -16,11 +21,19 @@
1110 self.d = amulet.Deployment()
1111
1112 def _add_services(self, this_service, other_services):
1113- """Add services to the deployment where this_service is the local charm
1114+ """Add services.
1115+
1116+ Add services to the deployment where this_service is the local charm
1117 that we're focused on testing and other_services are the other
1118- charms that come from the charm store."""
1119+ charms that come from the charm store.
1120+ """
1121 name, units = range(2)
1122- self.this_service = this_service[name]
1123+
1124+ if this_service[name] != os.path.basename(os.getcwd()):
1125+ s = this_service[name]
1126+ msg = "The charm's root directory name needs to be {}".format(s)
1127+ amulet.raise_status(amulet.FAIL, msg=msg)
1128+
1129 self.d.add(this_service[name], units=this_service[units])
1130
1131 for svc in other_services:
1132@@ -45,10 +58,10 @@
1133 """Deploy environment and wait for all hooks to finish executing."""
1134 try:
1135 self.d.setup()
1136- self.d.sentry.wait()
1137+ self.d.sentry.wait(timeout=900)
1138 except amulet.helpers.TimeoutError:
1139 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1140- except:
1141+ except Exception:
1142 raise
1143
1144 def run_tests(self):
1145
1146=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
1147--- tests/charmhelpers/contrib/amulet/utils.py 2014-07-25 09:37:25 +0000
1148+++ tests/charmhelpers/contrib/amulet/utils.py 2014-08-13 13:59:42 +0000
1149@@ -3,12 +3,15 @@
1150 import logging
1151 import re
1152 import sys
1153-from time import sleep
1154+import time
1155
1156
1157 class AmuletUtils(object):
1158- """This class provides common utility functions that are used by Amulet
1159- tests."""
1160+ """Amulet utilities.
1161+
1162+ This class provides common utility functions that are used by Amulet
1163+ tests.
1164+ """
1165
1166 def __init__(self, log_level=logging.ERROR):
1167 self.log = self.get_logger(level=log_level)
1168@@ -17,8 +20,8 @@
1169 """Get a logger object that will log to stdout."""
1170 log = logging
1171 logger = log.getLogger(name)
1172- fmt = \
1173- log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
1174+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1175+ "%(levelname)s: %(message)s")
1176
1177 handler = log.StreamHandler(stream=sys.stdout)
1178 handler.setLevel(level)
1179@@ -38,7 +41,7 @@
1180 def valid_url(self, url):
1181 p = re.compile(
1182 r'^(?:http|ftp)s?://'
1183- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
1184+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
1185 r'localhost|'
1186 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1187 r'(?::\d+)?'
1188@@ -50,8 +53,11 @@
1189 return False
1190
1191 def validate_services(self, commands):
1192- """Verify the specified services are running on the corresponding
1193- service units."""
1194+ """Validate services.
1195+
1196+ Verify the specified services are running on the corresponding
1197+ service units.
1198+ """
1199 for k, v in commands.iteritems():
1200 for cmd in v:
1201 output, code = k.run(cmd)
1202@@ -66,9 +72,13 @@
1203 config.readfp(io.StringIO(file_contents))
1204 return config
1205
1206- def validate_config_data(self, sentry_unit, config_file, section, expected):
1207- """Verify that the specified section of the config file contains
1208- the expected option key:value pairs."""
1209+ def validate_config_data(self, sentry_unit, config_file, section,
1210+ expected):
1211+ """Validate config file data.
1212+
1213+ Verify that the specified section of the config file contains
1214+ the expected option key:value pairs.
1215+ """
1216 config = self._get_config(sentry_unit, config_file)
1217
1218 if section != 'DEFAULT' and not config.has_section(section):
1219@@ -78,20 +88,23 @@
1220 if not config.has_option(section, k):
1221 return "section [{}] is missing option {}".format(section, k)
1222 if config.get(section, k) != expected[k]:
1223- return "section [{}] {}:{} != expected {}:{}".format(section,
1224- k, config.get(section, k), k, expected[k])
1225+ return "section [{}] {}:{} != expected {}:{}".format(
1226+ section, k, config.get(section, k), k, expected[k])
1227 return None
1228
1229 def _validate_dict_data(self, expected, actual):
1230- """Compare expected dictionary data vs actual dictionary data.
1231+ """Validate dictionary data.
1232+
1233+ Compare expected dictionary data vs actual dictionary data.
1234 The values in the 'expected' dictionary can be strings, bools, ints,
1235 longs, or can be a function that evaluate a variable and returns a
1236- bool."""
1237+ bool.
1238+ """
1239 for k, v in expected.iteritems():
1240 if k in actual:
1241- if isinstance(v, basestring) or \
1242- isinstance(v, bool) or \
1243- isinstance(v, (int, long)):
1244+ if (isinstance(v, basestring) or
1245+ isinstance(v, bool) or
1246+ isinstance(v, (int, long))):
1247 if v != actual[k]:
1248 return "{}:{}".format(k, actual[k])
1249 elif not v(actual[k]):
1250@@ -114,7 +127,7 @@
1251 return None
1252
1253 def not_null(self, string):
1254- if string != None:
1255+ if string is not None:
1256 return True
1257 else:
1258 return False
1259@@ -128,9 +141,12 @@
1260 return sentry_unit.directory_stat(directory)['mtime']
1261
1262 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1263- """Determine start time of the process based on the last modification
1264+ """Get process' start time.
1265+
1266+ Determine start time of the process based on the last modification
1267 time of the /proc/pid directory. If pgrep_full is True, the process
1268- name is matched against the full command line."""
1269+ name is matched against the full command line.
1270+ """
1271 if pgrep_full:
1272 cmd = 'pgrep -o -f {}'.format(service)
1273 else:
1274@@ -139,13 +155,16 @@
1275 return self._get_dir_mtime(sentry_unit, proc_dir)
1276
1277 def service_restarted(self, sentry_unit, service, filename,
1278- pgrep_full=False):
1279- """Compare a service's start time vs a file's last modification time
1280+ pgrep_full=False, sleep_time=20):
1281+ """Check if service was restarted.
1282+
1283+ Compare a service's start time vs a file's last modification time
1284 (such as a config file for that service) to determine if the service
1285- has been restarted."""
1286- sleep(10)
1287- if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
1288- self._get_file_mtime(sentry_unit, filename):
1289+ has been restarted.
1290+ """
1291+ time.sleep(sleep_time)
1292+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
1293+ self._get_file_mtime(sentry_unit, filename)):
1294 return True
1295 else:
1296 return False
1297
1298=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1299--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-25 09:37:25 +0000
1300+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:59:42 +0000
1301@@ -4,8 +4,11 @@
1302
1303
1304 class OpenStackAmuletDeployment(AmuletDeployment):
1305- """This class inherits from AmuletDeployment and has additional support
1306- that is specifically for use by OpenStack charms."""
1307+ """OpenStack amulet deployment.
1308+
1309+ This class inherits from AmuletDeployment and has additional support
1310+ that is specifically for use by OpenStack charms.
1311+ """
1312
1313 def __init__(self, series=None, openstack=None, source=None):
1314 """Initialize the deployment environment."""
1315@@ -40,11 +43,14 @@
1316 self.d.configure(service, config)
1317
1318 def _get_openstack_release(self):
1319- """Return an integer representing the enum value of the openstack
1320- release."""
1321- self.precise_essex, self.precise_folsom, self.precise_grizzly, \
1322- self.precise_havana, self.precise_icehouse, \
1323- self.trusty_icehouse = range(6)
1324+ """Get openstack release.
1325+
1326+ Return an integer representing the enum value of the openstack
1327+ release.
1328+ """
1329+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1330+ self.precise_havana, self.precise_icehouse,
1331+ self.trusty_icehouse) = range(6)
1332 releases = {
1333 ('precise', None): self.precise_essex,
1334 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1335
1336=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
1337--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-25 09:37:25 +0000
1338+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:59:42 +0000
1339@@ -16,8 +16,11 @@
1340
1341
1342 class OpenStackAmuletUtils(AmuletUtils):
1343- """This class inherits from AmuletUtils and has additional support
1344- that is specifically for use by OpenStack charms."""
1345+ """OpenStack amulet utilities.
1346+
1347+ This class inherits from AmuletUtils and has additional support
1348+ that is specifically for use by OpenStack charms.
1349+ """
1350
1351 def __init__(self, log_level=ERROR):
1352 """Initialize the deployment environment."""
1353@@ -25,13 +28,17 @@
1354
1355 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1356 public_port, expected):
1357- """Validate actual endpoint data vs expected endpoint data. The ports
1358- are used to find the matching endpoint."""
1359+ """Validate endpoint data.
1360+
1361+ Validate actual endpoint data vs expected endpoint data. The ports
1362+ are used to find the matching endpoint.
1363+ """
1364 found = False
1365 for ep in endpoints:
1366 self.log.debug('endpoint: {}'.format(repr(ep)))
1367- if admin_port in ep.adminurl and internal_port in ep.internalurl \
1368- and public_port in ep.publicurl:
1369+ if (admin_port in ep.adminurl and
1370+ internal_port in ep.internalurl and
1371+ public_port in ep.publicurl):
1372 found = True
1373 actual = {'id': ep.id,
1374 'region': ep.region,
1375@@ -47,8 +54,11 @@
1376 return 'endpoint not found'
1377
1378 def validate_svc_catalog_endpoint_data(self, expected, actual):
1379- """Validate a list of actual service catalog endpoints vs a list of
1380- expected service catalog endpoints."""
1381+ """Validate service catalog endpoint data.
1382+
1383+ Validate a list of actual service catalog endpoints vs a list of
1384+ expected service catalog endpoints.
1385+ """
1386 self.log.debug('actual: {}'.format(repr(actual)))
1387 for k, v in expected.iteritems():
1388 if k in actual:
1389@@ -60,8 +70,11 @@
1390 return ret
1391
1392 def validate_tenant_data(self, expected, actual):
1393- """Validate a list of actual tenant data vs list of expected tenant
1394- data."""
1395+ """Validate tenant data.
1396+
1397+ Validate a list of actual tenant data vs list of expected tenant
1398+ data.
1399+ """
1400 self.log.debug('actual: {}'.format(repr(actual)))
1401 for e in expected:
1402 found = False
1403@@ -78,8 +91,11 @@
1404 return ret
1405
1406 def validate_role_data(self, expected, actual):
1407- """Validate a list of actual role data vs a list of expected role
1408- data."""
1409+ """Validate role data.
1410+
1411+ Validate a list of actual role data vs a list of expected role
1412+ data.
1413+ """
1414 self.log.debug('actual: {}'.format(repr(actual)))
1415 for e in expected:
1416 found = False
1417@@ -95,8 +111,11 @@
1418 return ret
1419
1420 def validate_user_data(self, expected, actual):
1421- """Validate a list of actual user data vs a list of expected user
1422- data."""
1423+ """Validate user data.
1424+
1425+ Validate a list of actual user data vs a list of expected user
1426+ data.
1427+ """
1428 self.log.debug('actual: {}'.format(repr(actual)))
1429 for e in expected:
1430 found = False
1431@@ -114,21 +133,24 @@
1432 return ret
1433
1434 def validate_flavor_data(self, expected, actual):
1435- """Validate a list of actual flavors vs a list of expected flavors."""
1436+ """Validate flavor data.
1437+
1438+ Validate a list of actual flavors vs a list of expected flavors.
1439+ """
1440 self.log.debug('actual: {}'.format(repr(actual)))
1441 act = [a.name for a in actual]
1442 return self._validate_list_data(expected, act)
1443
1444 def tenant_exists(self, keystone, tenant):
1445- """Return True if tenant exists"""
1446+ """Return True if tenant exists."""
1447 return tenant in [t.name for t in keystone.tenants.list()]
1448
1449 def authenticate_keystone_admin(self, keystone_sentry, user, password,
1450 tenant):
1451 """Authenticates admin user with the keystone admin endpoint."""
1452- service_ip = \
1453- keystone_sentry.relation('shared-db',
1454- 'mysql:shared-db')['private-address']
1455+ unit = keystone_sentry
1456+ service_ip = unit.relation('shared-db',
1457+ 'mysql:shared-db')['private-address']
1458 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1459 return keystone_client.Client(username=user, password=password,
1460 tenant_name=tenant, auth_url=ep)
1461@@ -177,12 +199,40 @@
1462 image = glance.images.create(name=image_name, is_public=True,
1463 disk_format='qcow2',
1464 container_format='bare', data=f)
1465+ count = 1
1466+ status = image.status
1467+ while status != 'active' and count < 10:
1468+ time.sleep(3)
1469+ image = glance.images.get(image.id)
1470+ status = image.status
1471+ self.log.debug('image status: {}'.format(status))
1472+ count += 1
1473+
1474+ if status != 'active':
1475+ self.log.error('image creation timed out')
1476+ return None
1477+
1478 return image
1479
1480 def delete_image(self, glance, image):
1481 """Delete the specified image."""
1482+ num_before = len(list(glance.images.list()))
1483 glance.images.delete(image)
1484
1485+ count = 1
1486+ num_after = len(list(glance.images.list()))
1487+ while num_after != (num_before - 1) and count < 10:
1488+ time.sleep(3)
1489+ num_after = len(list(glance.images.list()))
1490+ self.log.debug('number of images: {}'.format(num_after))
1491+ count += 1
1492+
1493+ if num_after != (num_before - 1):
1494+ self.log.error('image deletion timed out')
1495+ return False
1496+
1497+ return True
1498+
1499 def create_instance(self, nova, image_name, instance_name, flavor):
1500 """Create the specified instance."""
1501 image = nova.images.find(name=image_name)
1502@@ -199,11 +249,27 @@
1503 self.log.debug('instance status: {}'.format(status))
1504 count += 1
1505
1506- if status == 'BUILD':
1507+ if status != 'ACTIVE':
1508+ self.log.error('instance creation timed out')
1509 return None
1510
1511 return instance
1512
1513 def delete_instance(self, nova, instance):
1514 """Delete the specified instance."""
1515+ num_before = len(list(nova.servers.list()))
1516 nova.servers.delete(instance)
1517+
1518+ count = 1
1519+ num_after = len(list(nova.servers.list()))
1520+ while num_after != (num_before - 1) and count < 10:
1521+ time.sleep(3)
1522+ num_after = len(list(nova.servers.list()))
1523+ self.log.debug('number of instances: {}'.format(num_after))
1524+ count += 1
1525+
1526+ if num_after != (num_before - 1):
1527+ self.log.error('instance deletion timed out')
1528+ return False
1529+
1530+ return True

Subscribers

People subscribed via source and target branches