Merge lp:~gnuoy/charms/trusty/swift-storage/next-charm-sync into lp:~openstack-charmers-archive/charms/trusty/swift-storage/next

Proposed by Liam Young
Status: Merged
Merged at revision: 39
Proposed branch: lp:~gnuoy/charms/trusty/swift-storage/next-charm-sync
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-storage/next
Diff against target: 1554 lines (+899/-116)
20 files modified
.bzrignore (+2/-0)
Makefile (+8/-3)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+55/-13)
hooks/charmhelpers/contrib/network/ip.py (+19/-1)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20)
hooks/charmhelpers/contrib/openstack/context.py (+20/-4)
hooks/charmhelpers/contrib/openstack/ip.py (+7/-3)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+3/-3)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-0)
hooks/charmhelpers/core/host.py (+34/-1)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+305/-0)
hooks/charmhelpers/core/services/helpers.py (+125/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+1/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+20/-7)
tests/charmhelpers/contrib/amulet/utils.py (+46/-27)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+13/-7)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+86/-20)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/swift-storage/next-charm-sync
Reviewer Review Type Date Requested Status
Liam Young (community) Approve
Review via email: mp+230624@code.launchpad.net
To post a comment you must log in.
Revision history for this message
Liam Young (gnuoy) wrote :

Approved by jamespage

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.bzrignore'
2--- .bzrignore 1970-01-01 00:00:00 +0000
3+++ .bzrignore 2014-08-13 13:52:53 +0000
4@@ -0,0 +1,2 @@
5+bin
6+.coverage
7
8=== modified file 'Makefile'
9--- Makefile 2014-07-11 16:41:12 +0000
10+++ Makefile 2014-08-13 13:52:53 +0000
11@@ -17,9 +17,14 @@
12 # https://bugs.launchpad.net/amulet/+bug/1320357
13 @juju test -v -p AMULET_HTTP_PROXY
14
15-sync:
16- @charm-helper-sync -c charm-helpers-hooks.yaml
17- @charm-helper-sync -c charm-helpers-tests.yaml
18+bin/charm_helpers_sync.py:
19+ @mkdir -p bin
20+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
21+ > bin/charm_helpers_sync.py
22+
23+sync: bin/charm_helpers_sync.py
24+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
25+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
26
27 publish: lint test
28 bzr push lp:charms/swift-storage
29
30=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
31--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-28 11:54:32 +0000
32+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 13:52:53 +0000
33@@ -6,6 +6,11 @@
34 # Adam Gandelman <adamg@ubuntu.com>
35 #
36
37+"""
38+Helpers for clustering and determining "cluster leadership" and other
39+clustering-related helpers.
40+"""
41+
42 import subprocess
43 import os
44
45@@ -19,6 +24,7 @@
46 config as config_get,
47 INFO,
48 ERROR,
49+ WARNING,
50 unit_get,
51 )
52
53@@ -27,6 +33,29 @@
54 pass
55
56
57+def is_elected_leader(resource):
58+ """
59+ Returns True if the charm executing this is the elected cluster leader.
60+
61+ It relies on two mechanisms to determine leadership:
62+ 1. If the charm is part of a corosync cluster, call corosync to
63+ determine leadership.
64+ 2. If the charm is not part of a corosync cluster, the leader is
65+ determined as being "the alive unit with the lowest unit numer". In
66+ other words, the oldest surviving unit.
67+ """
68+ if is_clustered():
69+ if not is_crm_leader(resource):
70+ log('Deferring action to CRM leader.', level=INFO)
71+ return False
72+ else:
73+ peers = peer_units()
74+ if peers and not oldest_peer(peers):
75+ log('Deferring action to oldest service unit.', level=INFO)
76+ return False
77+ return True
78+
79+
80 def is_clustered():
81 for r_id in (relation_ids('ha') or []):
82 for unit in (relation_list(r_id) or []):
83@@ -38,7 +67,11 @@
84 return False
85
86
87-def is_leader(resource):
88+def is_crm_leader(resource):
89+ """
90+ Returns True if the charm calling this is the elected corosync leader,
91+ as returned by calling the external "crm" command.
92+ """
93 cmd = [
94 "crm", "resource",
95 "show", resource
96@@ -54,15 +87,31 @@
97 return False
98
99
100-def peer_units():
101+def is_leader(resource):
102+ log("is_leader is deprecated. Please consider using is_crm_leader "
103+ "instead.", level=WARNING)
104+ return is_crm_leader(resource)
105+
106+
107+def peer_units(peer_relation="cluster"):
108 peers = []
109- for r_id in (relation_ids('cluster') or []):
110+ for r_id in (relation_ids(peer_relation) or []):
111 for unit in (relation_list(r_id) or []):
112 peers.append(unit)
113 return peers
114
115
116+def peer_ips(peer_relation='cluster', addr_key='private-address'):
117+ '''Return a dict of peers and their private-address'''
118+ peers = {}
119+ for r_id in relation_ids(peer_relation):
120+ for unit in relation_list(r_id):
121+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
122+ return peers
123+
124+
125 def oldest_peer(peers):
126+ """Determines who the oldest peer is by comparing unit numbers."""
127 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
128 for peer in peers:
129 remote_unit_no = int(peer.split('/')[1])
130@@ -72,16 +121,9 @@
131
132
133 def eligible_leader(resource):
134- if is_clustered():
135- if not is_leader(resource):
136- log('Deferring action to CRM leader.', level=INFO)
137- return False
138- else:
139- peers = peer_units()
140- if peers and not oldest_peer(peers):
141- log('Deferring action to oldest service unit.', level=INFO)
142- return False
143- return True
144+ log("eligible_leader is deprecated. Please consider using "
145+ "is_elected_leader instead.", level=WARNING)
146+ return is_elected_leader(resource)
147
148
149 def https():
150
151=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
152--- hooks/charmhelpers/contrib/network/ip.py 2014-07-28 12:08:48 +0000
153+++ hooks/charmhelpers/contrib/network/ip.py 2014-08-13 13:52:53 +0000
154@@ -4,7 +4,7 @@
155
156 from charmhelpers.fetch import apt_install
157 from charmhelpers.core.hookenv import (
158- ERROR, log,
159+ ERROR, log, config,
160 )
161
162 try:
163@@ -154,3 +154,21 @@
164 get_iface_for_address = partial(_get_for_address, key='iface')
165
166 get_netmask_for_address = partial(_get_for_address, key='netmask')
167+
168+
169+def get_ipv6_addr(iface="eth0"):
170+ try:
171+ iface_addrs = netifaces.ifaddresses(iface)
172+ if netifaces.AF_INET6 not in iface_addrs:
173+ raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
174+
175+ addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
176+ ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
177+ and config('vip') != a['addr']]
178+ if not ipv6_addr:
179+ raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
180+
181+ return ipv6_addr[0]
182+
183+ except ValueError:
184+ raise ValueError("Invalid interface '%s'" % iface)
185
186=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
187--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-28 11:54:32 +0000
188+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:52:53 +0000
189@@ -4,8 +4,11 @@
190
191
192 class OpenStackAmuletDeployment(AmuletDeployment):
193- """This class inherits from AmuletDeployment and has additional support
194- that is specifically for use by OpenStack charms."""
195+ """OpenStack amulet deployment.
196+
197+ This class inherits from AmuletDeployment and has additional support
198+ that is specifically for use by OpenStack charms.
199+ """
200
201 def __init__(self, series=None, openstack=None, source=None):
202 """Initialize the deployment environment."""
203@@ -40,11 +43,14 @@
204 self.d.configure(service, config)
205
206 def _get_openstack_release(self):
207- """Return an integer representing the enum value of the openstack
208- release."""
209- self.precise_essex, self.precise_folsom, self.precise_grizzly, \
210- self.precise_havana, self.precise_icehouse, \
211- self.trusty_icehouse = range(6)
212+ """Get openstack release.
213+
214+ Return an integer representing the enum value of the openstack
215+ release.
216+ """
217+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
218+ self.precise_havana, self.precise_icehouse,
219+ self.trusty_icehouse) = range(6)
220 releases = {
221 ('precise', None): self.precise_essex,
222 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
223
224=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
225--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-28 11:54:32 +0000
226+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:52:53 +0000
227@@ -16,8 +16,11 @@
228
229
230 class OpenStackAmuletUtils(AmuletUtils):
231- """This class inherits from AmuletUtils and has additional support
232- that is specifically for use by OpenStack charms."""
233+ """OpenStack amulet utilities.
234+
235+ This class inherits from AmuletUtils and has additional support
236+ that is specifically for use by OpenStack charms.
237+ """
238
239 def __init__(self, log_level=ERROR):
240 """Initialize the deployment environment."""
241@@ -25,13 +28,17 @@
242
243 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
244 public_port, expected):
245- """Validate actual endpoint data vs expected endpoint data. The ports
246- are used to find the matching endpoint."""
247+ """Validate endpoint data.
248+
249+ Validate actual endpoint data vs expected endpoint data. The ports
250+ are used to find the matching endpoint.
251+ """
252 found = False
253 for ep in endpoints:
254 self.log.debug('endpoint: {}'.format(repr(ep)))
255- if admin_port in ep.adminurl and internal_port in ep.internalurl \
256- and public_port in ep.publicurl:
257+ if (admin_port in ep.adminurl and
258+ internal_port in ep.internalurl and
259+ public_port in ep.publicurl):
260 found = True
261 actual = {'id': ep.id,
262 'region': ep.region,
263@@ -47,8 +54,11 @@
264 return 'endpoint not found'
265
266 def validate_svc_catalog_endpoint_data(self, expected, actual):
267- """Validate a list of actual service catalog endpoints vs a list of
268- expected service catalog endpoints."""
269+ """Validate service catalog endpoint data.
270+
271+ Validate a list of actual service catalog endpoints vs a list of
272+ expected service catalog endpoints.
273+ """
274 self.log.debug('actual: {}'.format(repr(actual)))
275 for k, v in expected.iteritems():
276 if k in actual:
277@@ -60,8 +70,11 @@
278 return ret
279
280 def validate_tenant_data(self, expected, actual):
281- """Validate a list of actual tenant data vs list of expected tenant
282- data."""
283+ """Validate tenant data.
284+
285+ Validate a list of actual tenant data vs list of expected tenant
286+ data.
287+ """
288 self.log.debug('actual: {}'.format(repr(actual)))
289 for e in expected:
290 found = False
291@@ -78,8 +91,11 @@
292 return ret
293
294 def validate_role_data(self, expected, actual):
295- """Validate a list of actual role data vs a list of expected role
296- data."""
297+ """Validate role data.
298+
299+ Validate a list of actual role data vs a list of expected role
300+ data.
301+ """
302 self.log.debug('actual: {}'.format(repr(actual)))
303 for e in expected:
304 found = False
305@@ -95,8 +111,11 @@
306 return ret
307
308 def validate_user_data(self, expected, actual):
309- """Validate a list of actual user data vs a list of expected user
310- data."""
311+ """Validate user data.
312+
313+ Validate a list of actual user data vs a list of expected user
314+ data.
315+ """
316 self.log.debug('actual: {}'.format(repr(actual)))
317 for e in expected:
318 found = False
319@@ -114,21 +133,24 @@
320 return ret
321
322 def validate_flavor_data(self, expected, actual):
323- """Validate a list of actual flavors vs a list of expected flavors."""
324+ """Validate flavor data.
325+
326+ Validate a list of actual flavors vs a list of expected flavors.
327+ """
328 self.log.debug('actual: {}'.format(repr(actual)))
329 act = [a.name for a in actual]
330 return self._validate_list_data(expected, act)
331
332 def tenant_exists(self, keystone, tenant):
333- """Return True if tenant exists"""
334+ """Return True if tenant exists."""
335 return tenant in [t.name for t in keystone.tenants.list()]
336
337 def authenticate_keystone_admin(self, keystone_sentry, user, password,
338 tenant):
339 """Authenticates admin user with the keystone admin endpoint."""
340- service_ip = \
341- keystone_sentry.relation('shared-db',
342- 'mysql:shared-db')['private-address']
343+ unit = keystone_sentry
344+ service_ip = unit.relation('shared-db',
345+ 'mysql:shared-db')['private-address']
346 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
347 return keystone_client.Client(username=user, password=password,
348 tenant_name=tenant, auth_url=ep)
349@@ -177,12 +199,40 @@
350 image = glance.images.create(name=image_name, is_public=True,
351 disk_format='qcow2',
352 container_format='bare', data=f)
353+ count = 1
354+ status = image.status
355+ while status != 'active' and count < 10:
356+ time.sleep(3)
357+ image = glance.images.get(image.id)
358+ status = image.status
359+ self.log.debug('image status: {}'.format(status))
360+ count += 1
361+
362+ if status != 'active':
363+ self.log.error('image creation timed out')
364+ return None
365+
366 return image
367
368 def delete_image(self, glance, image):
369 """Delete the specified image."""
370+ num_before = len(list(glance.images.list()))
371 glance.images.delete(image)
372
373+ count = 1
374+ num_after = len(list(glance.images.list()))
375+ while num_after != (num_before - 1) and count < 10:
376+ time.sleep(3)
377+ num_after = len(list(glance.images.list()))
378+ self.log.debug('number of images: {}'.format(num_after))
379+ count += 1
380+
381+ if num_after != (num_before - 1):
382+ self.log.error('image deletion timed out')
383+ return False
384+
385+ return True
386+
387 def create_instance(self, nova, image_name, instance_name, flavor):
388 """Create the specified instance."""
389 image = nova.images.find(name=image_name)
390@@ -199,11 +249,27 @@
391 self.log.debug('instance status: {}'.format(status))
392 count += 1
393
394- if status == 'BUILD':
395+ if status != 'ACTIVE':
396+ self.log.error('instance creation timed out')
397 return None
398
399 return instance
400
401 def delete_instance(self, nova, instance):
402 """Delete the specified instance."""
403+ num_before = len(list(nova.servers.list()))
404 nova.servers.delete(instance)
405+
406+ count = 1
407+ num_after = len(list(nova.servers.list()))
408+ while num_after != (num_before - 1) and count < 10:
409+ time.sleep(3)
410+ num_after = len(list(nova.servers.list()))
411+ self.log.debug('number of instances: {}'.format(num_after))
412+ count += 1
413+
414+ if num_after != (num_before - 1):
415+ self.log.error('instance deletion timed out')
416+ return False
417+
418+ return True
419
420=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
421--- hooks/charmhelpers/contrib/openstack/context.py 2014-07-28 11:54:32 +0000
422+++ hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 13:52:53 +0000
423@@ -44,7 +44,10 @@
424 neutron_plugin_attribute,
425 )
426
427-from charmhelpers.contrib.network.ip import get_address_in_network
428+from charmhelpers.contrib.network.ip import (
429+ get_address_in_network,
430+ get_ipv6_addr,
431+)
432
433 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
434
435@@ -401,9 +404,12 @@
436
437 cluster_hosts = {}
438 l_unit = local_unit().replace('/', '-')
439- cluster_hosts[l_unit] = \
440- get_address_in_network(config('os-internal-network'),
441- unit_get('private-address'))
442+ if config('prefer-ipv6'):
443+ addr = get_ipv6_addr()
444+ else:
445+ addr = unit_get('private-address')
446+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
447+ addr)
448
449 for rid in relation_ids('cluster'):
450 for unit in related_units(rid):
451@@ -414,6 +420,16 @@
452 ctxt = {
453 'units': cluster_hosts,
454 }
455+
456+ if config('prefer-ipv6'):
457+ ctxt['local_host'] = 'ip6-localhost'
458+ ctxt['haproxy_host'] = '::'
459+ ctxt['stat_port'] = ':::8888'
460+ else:
461+ ctxt['local_host'] = '127.0.0.1'
462+ ctxt['haproxy_host'] = '0.0.0.0'
463+ ctxt['stat_port'] = ':8888'
464+
465 if len(cluster_hosts.keys()) > 1:
466 # Enable haproxy when we have enough peers.
467 log('Ensuring haproxy enabled in /etc/default/haproxy.')
468
469=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
470--- hooks/charmhelpers/contrib/openstack/ip.py 2014-07-28 11:54:32 +0000
471+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:52:53 +0000
472@@ -7,6 +7,7 @@
473 get_address_in_network,
474 is_address_in_network,
475 is_ipv6,
476+ get_ipv6_addr,
477 )
478
479 from charmhelpers.contrib.hahelpers.cluster import is_clustered
480@@ -64,10 +65,13 @@
481 vip):
482 resolved_address = vip
483 else:
484+ if config('prefer-ipv6'):
485+ fallback_addr = get_ipv6_addr()
486+ else:
487+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
488 resolved_address = get_address_in_network(
489- config(_address_map[endpoint_type]['config']),
490- unit_get(_address_map[endpoint_type]['fallback'])
491- )
492+ config(_address_map[endpoint_type]['config']), fallback_addr)
493+
494 if resolved_address is None:
495 raise ValueError('Unable to resolve a suitable IP address'
496 ' based on charm state and configuration')
497
498=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
499--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-28 11:54:32 +0000
500+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-08-13 13:52:53 +0000
501@@ -1,6 +1,6 @@
502 global
503- log 127.0.0.1 local0
504- log 127.0.0.1 local1 notice
505+ log {{ local_host }} local0
506+ log {{ local_host }} local1 notice
507 maxconn 20000
508 user haproxy
509 group haproxy
510@@ -17,7 +17,7 @@
511 timeout client 30000
512 timeout server 30000
513
514-listen stats :8888
515+listen stats {{ stat_port }}
516 mode http
517 stats enable
518 stats hide-version
519
520=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
521--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-10 18:04:35 +0000
522+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:52:53 +0000
523@@ -46,5 +46,8 @@
524 :returns: boolean: True if the path represents a mounted device, False if
525 it doesn't.
526 '''
527+ is_partition = bool(re.search(r".*[0-9]+\b", device))
528 out = check_output(['mount'])
529+ if is_partition:
530+ return bool(re.search(device + r"\b", out))
531 return bool(re.search(device + r"[0-9]+\b", out))
532
533=== modified file 'hooks/charmhelpers/core/host.py'
534--- hooks/charmhelpers/core/host.py 2014-07-28 11:54:32 +0000
535+++ hooks/charmhelpers/core/host.py 2014-08-13 13:52:53 +0000
536@@ -12,6 +12,8 @@
537 import string
538 import subprocess
539 import hashlib
540+import shutil
541+from contextlib import contextmanager
542
543 from collections import OrderedDict
544
545@@ -52,7 +54,7 @@
546 def service_running(service):
547 """Determine whether a system service is running"""
548 try:
549- output = subprocess.check_output(['service', service, 'status'])
550+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
551 except subprocess.CalledProcessError:
552 return False
553 else:
554@@ -62,6 +64,16 @@
555 return False
556
557
558+def service_available(service_name):
559+ """Determine whether a system service is available"""
560+ try:
561+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
562+ except subprocess.CalledProcessError:
563+ return False
564+ else:
565+ return True
566+
567+
568 def adduser(username, password=None, shell='/bin/bash', system_user=False):
569 """Add a user to the system"""
570 try:
571@@ -329,3 +341,24 @@
572 pkgcache = apt_pkg.Cache()
573 pkg = pkgcache[package]
574 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
575+
576+
577+@contextmanager
578+def chdir(d):
579+ cur = os.getcwd()
580+ try:
581+ yield os.chdir(d)
582+ finally:
583+ os.chdir(cur)
584+
585+
586+def chownr(path, owner, group):
587+ uid = pwd.getpwnam(owner).pw_uid
588+ gid = grp.getgrnam(group).gr_gid
589+
590+ for root, dirs, files in os.walk(path):
591+ for name in dirs + files:
592+ full = os.path.join(root, name)
593+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
594+ if not broken_symlink:
595+ os.chown(full, uid, gid)
596
597=== added directory 'hooks/charmhelpers/core/services'
598=== added file 'hooks/charmhelpers/core/services/__init__.py'
599--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
600+++ hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:52:53 +0000
601@@ -0,0 +1,2 @@
602+from .base import *
603+from .helpers import *
604
605=== added file 'hooks/charmhelpers/core/services/base.py'
606--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
607+++ hooks/charmhelpers/core/services/base.py 2014-08-13 13:52:53 +0000
608@@ -0,0 +1,305 @@
609+import os
610+import re
611+import json
612+from collections import Iterable
613+
614+from charmhelpers.core import host
615+from charmhelpers.core import hookenv
616+
617+
618+__all__ = ['ServiceManager', 'ManagerCallback',
619+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
620+ 'service_restart', 'service_stop']
621+
622+
623+class ServiceManager(object):
624+ def __init__(self, services=None):
625+ """
626+ Register a list of services, given their definitions.
627+
628+ Traditional charm authoring is focused on implementing hooks. That is,
629+ the charm author is thinking in terms of "What hook am I handling; what
630+ does this hook need to do?" However, in most cases, the real question
631+ should be "Do I have the information I need to configure and start this
632+ piece of software and, if so, what are the steps for doing so?" The
633+ ServiceManager framework tries to bring the focus to the data and the
634+ setup tasks, in the most declarative way possible.
635+
636+ Service definitions are dicts in the following formats (all keys except
637+ 'service' are optional)::
638+
639+ {
640+ "service": <service name>,
641+ "required_data": <list of required data contexts>,
642+ "data_ready": <one or more callbacks>,
643+ "data_lost": <one or more callbacks>,
644+ "start": <one or more callbacks>,
645+ "stop": <one or more callbacks>,
646+ "ports": <list of ports to manage>,
647+ }
648+
649+ The 'required_data' list should contain dicts of required data (or
650+ dependency managers that act like dicts and know how to collect the data).
651+ Only when all items in the 'required_data' list are populated are the list
652+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
653+ information.
654+
655+ The 'data_ready' value should be either a single callback, or a list of
656+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
657+ Each callback will be called with the service name as the only parameter.
658+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
659+ are fired.
660+
661+ The 'data_lost' value should be either a single callback, or a list of
662+ callbacks, to be called when a 'required_data' item no longer passes
663+ `is_ready()`. Each callback will be called with the service name as the
664+ only parameter. After all of the 'data_lost' callbacks are called,
665+ the 'stop' callbacks are fired.
666+
667+ The 'start' value should be either a single callback, or a list of
668+ callbacks, to be called when starting the service, after the 'data_ready'
669+ callbacks are complete. Each callback will be called with the service
670+ name as the only parameter. This defaults to
671+ `[host.service_start, services.open_ports]`.
672+
673+ The 'stop' value should be either a single callback, or a list of
674+ callbacks, to be called when stopping the service. If the service is
675+ being stopped because it no longer has all of its 'required_data', this
676+ will be called after all of the 'data_lost' callbacks are complete.
677+ Each callback will be called with the service name as the only parameter.
678+ This defaults to `[services.close_ports, host.service_stop]`.
679+
680+ The 'ports' value should be a list of ports to manage. The default
681+ 'start' handler will open the ports after the service is started,
682+ and the default 'stop' handler will close the ports prior to stopping
683+ the service.
684+
685+
686+ Examples:
687+
688+ The following registers an Upstart service called bingod that depends on
689+ a mongodb relation and which runs a custom `db_migrate` function prior to
690+ restarting the service, and a Runit service called spadesd::
691+
692+ manager = services.ServiceManager([
693+ {
694+ 'service': 'bingod',
695+ 'ports': [80, 443],
696+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
697+ 'data_ready': [
698+ services.template(source='bingod.conf'),
699+ services.template(source='bingod.ini',
700+ target='/etc/bingod.ini',
701+ owner='bingo', perms=0400),
702+ ],
703+ },
704+ {
705+ 'service': 'spadesd',
706+ 'data_ready': services.template(source='spadesd_run.j2',
707+ target='/etc/sv/spadesd/run',
708+ perms=0555),
709+ 'start': runit_start,
710+ 'stop': runit_stop,
711+ },
712+ ])
713+ manager.manage()
714+ """
715+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
716+ self._ready = None
717+ self.services = {}
718+ for service in services or []:
719+ service_name = service['service']
720+ self.services[service_name] = service
721+
722+ def manage(self):
723+ """
724+ Handle the current hook by doing The Right Thing with the registered services.
725+ """
726+ hook_name = hookenv.hook_name()
727+ if hook_name == 'stop':
728+ self.stop_services()
729+ else:
730+ self.provide_data()
731+ self.reconfigure_services()
732+
733+ def provide_data(self):
734+ hook_name = hookenv.hook_name()
735+ for service in self.services.values():
736+ for provider in service.get('provided_data', []):
737+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
738+ data = provider.provide_data()
739+ if provider._is_ready(data):
740+ hookenv.relation_set(None, data)
741+
742+ def reconfigure_services(self, *service_names):
743+ """
744+ Update all files for one or more registered services, and,
745+ if ready, optionally restart them.
746+
747+ If no service names are given, reconfigures all registered services.
748+ """
749+ for service_name in service_names or self.services.keys():
750+ if self.is_ready(service_name):
751+ self.fire_event('data_ready', service_name)
752+ self.fire_event('start', service_name, default=[
753+ service_restart,
754+ manage_ports])
755+ self.save_ready(service_name)
756+ else:
757+ if self.was_ready(service_name):
758+ self.fire_event('data_lost', service_name)
759+ self.fire_event('stop', service_name, default=[
760+ manage_ports,
761+ service_stop])
762+ self.save_lost(service_name)
763+
764+ def stop_services(self, *service_names):
765+ """
766+ Stop one or more registered services, by name.
767+
768+ If no service names are given, stops all registered services.
769+ """
770+ for service_name in service_names or self.services.keys():
771+ self.fire_event('stop', service_name, default=[
772+ manage_ports,
773+ service_stop])
774+
775+ def get_service(self, service_name):
776+ """
777+ Given the name of a registered service, return its service definition.
778+ """
779+ service = self.services.get(service_name)
780+ if not service:
781+ raise KeyError('Service not registered: %s' % service_name)
782+ return service
783+
784+ def fire_event(self, event_name, service_name, default=None):
785+ """
786+ Fire a data_ready, data_lost, start, or stop event on a given service.
787+ """
788+ service = self.get_service(service_name)
789+ callbacks = service.get(event_name, default)
790+ if not callbacks:
791+ return
792+ if not isinstance(callbacks, Iterable):
793+ callbacks = [callbacks]
794+ for callback in callbacks:
795+ if isinstance(callback, ManagerCallback):
796+ callback(self, service_name, event_name)
797+ else:
798+ callback(service_name)
799+
800+ def is_ready(self, service_name):
801+ """
802+ Determine if a registered service is ready, by checking its 'required_data'.
803+
804+ A 'required_data' item can be any mapping type, and is considered ready
805+ if `bool(item)` evaluates as True.
806+ """
807+ service = self.get_service(service_name)
808+ reqs = service.get('required_data', [])
809+ return all(bool(req) for req in reqs)
810+
811+ def _load_ready_file(self):
812+ if self._ready is not None:
813+ return
814+ if os.path.exists(self._ready_file):
815+ with open(self._ready_file) as fp:
816+ self._ready = set(json.load(fp))
817+ else:
818+ self._ready = set()
819+
820+ def _save_ready_file(self):
821+ if self._ready is None:
822+ return
823+ with open(self._ready_file, 'w') as fp:
824+ json.dump(list(self._ready), fp)
825+
826+ def save_ready(self, service_name):
827+ """
828+ Save an indicator that the given service is now data_ready.
829+ """
830+ self._load_ready_file()
831+ self._ready.add(service_name)
832+ self._save_ready_file()
833+
834+ def save_lost(self, service_name):
835+ """
836+ Save an indicator that the given service is no longer data_ready.
837+ """
838+ self._load_ready_file()
839+ self._ready.discard(service_name)
840+ self._save_ready_file()
841+
842+ def was_ready(self, service_name):
843+ """
844+ Determine if the given service was previously data_ready.
845+ """
846+ self._load_ready_file()
847+ return service_name in self._ready
848+
849+
850+class ManagerCallback(object):
851+ """
852+ Special case of a callback that takes the `ServiceManager` instance
853+ in addition to the service name.
854+
855+ Subclasses should implement `__call__` which should accept three parameters:
856+
857+ * `manager` The `ServiceManager` instance
858+ * `service_name` The name of the service it's being triggered for
859+ * `event_name` The name of the event that this callback is handling
860+ """
861+ def __call__(self, manager, service_name, event_name):
862+ raise NotImplementedError()
863+
864+
865+class PortManagerCallback(ManagerCallback):
866+ """
867+ Callback class that will open or close ports, for use as either
868+ a start or stop action.
869+ """
870+ def __call__(self, manager, service_name, event_name):
871+ service = manager.get_service(service_name)
872+ new_ports = service.get('ports', [])
873+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
874+ if os.path.exists(port_file):
875+ with open(port_file) as fp:
876+ old_ports = fp.read().split(',')
877+ for old_port in old_ports:
878+ if bool(old_port):
879+ old_port = int(old_port)
880+ if old_port not in new_ports:
881+ hookenv.close_port(old_port)
882+ with open(port_file, 'w') as fp:
883+ fp.write(','.join(str(port) for port in new_ports))
884+ for port in new_ports:
885+ if event_name == 'start':
886+ hookenv.open_port(port)
887+ elif event_name == 'stop':
888+ hookenv.close_port(port)
889+
890+
891+def service_stop(service_name):
892+ """
893+ Wrapper around host.service_stop to prevent spurious "unknown service"
894+ messages in the logs.
895+ """
896+ if host.service_running(service_name):
897+ host.service_stop(service_name)
898+
899+
900+def service_restart(service_name):
901+ """
902+ Wrapper around host.service_restart to prevent spurious "unknown service"
903+ messages in the logs.
904+ """
905+ if host.service_available(service_name):
906+ if host.service_running(service_name):
907+ host.service_restart(service_name)
908+ else:
909+ host.service_start(service_name)
910+
911+
912+# Convenience aliases
913+open_ports = close_ports = manage_ports = PortManagerCallback()
914
915=== added file 'hooks/charmhelpers/core/services/helpers.py'
916--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
917+++ hooks/charmhelpers/core/services/helpers.py 2014-08-13 13:52:53 +0000
918@@ -0,0 +1,125 @@
919+from charmhelpers.core import hookenv
920+from charmhelpers.core import templating
921+
922+from charmhelpers.core.services.base import ManagerCallback
923+
924+
925+__all__ = ['RelationContext', 'TemplateCallback',
926+ 'render_template', 'template']
927+
928+
929+class RelationContext(dict):
930+ """
931+ Base class for a context generator that gets relation data from juju.
932+
933+ Subclasses must provide the attributes `name`, which is the name of the
934+ interface of interest, `interface`, which is the type of the interface of
935+ interest, and `required_keys`, which is the set of keys required for the
936+ relation to be considered complete. The data for all interfaces matching
937+ the `name` attribute that are complete will used to populate the dictionary
938+ values (see `get_data`, below).
939+
940+ The generated context will be namespaced under the interface type, to prevent
941+ potential naming conflicts.
942+ """
943+ name = None
944+ interface = None
945+ required_keys = []
946+
947+ def __init__(self, *args, **kwargs):
948+ super(RelationContext, self).__init__(*args, **kwargs)
949+ self.get_data()
950+
951+ def __bool__(self):
952+ """
953+ Returns True if all of the required_keys are available.
954+ """
955+ return self.is_ready()
956+
957+ __nonzero__ = __bool__
958+
959+ def __repr__(self):
960+ return super(RelationContext, self).__repr__()
961+
962+ def is_ready(self):
963+ """
964+ Returns True if all of the `required_keys` are available from any units.
965+ """
966+ ready = len(self.get(self.name, [])) > 0
967+ if not ready:
968+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
969+ return ready
970+
971+ def _is_ready(self, unit_data):
972+ """
973+ Helper method that tests a set of relation data and returns True if
974+ all of the `required_keys` are present.
975+ """
976+ return set(unit_data.keys()).issuperset(set(self.required_keys))
977+
978+ def get_data(self):
979+ """
980+ Retrieve the relation data for each unit involved in a relation and,
981+ if complete, store it in a list under `self[self.name]`. This
982+ is automatically called when the RelationContext is instantiated.
983+
984+ The units are sorted lexographically first by the service ID, then by
985+ the unit ID. Thus, if an interface has two other services, 'db:1'
986+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
987+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
988+ set of data, the relation data for the units will be stored in the
989+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
990+
991+ If you only care about a single unit on the relation, you can just
992+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
993+ support multiple units on a relation, you should iterate over the list,
994+ like::
995+
996+ {% for unit in interface -%}
997+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
998+ {%- endfor %}
999+
1000+ Note that since all sets of relation data from all related services and
1001+ units are in a single list, if you need to know which service or unit a
1002+ set of data came from, you'll need to extend this class to preserve
1003+ that information.
1004+ """
1005+ if not hookenv.relation_ids(self.name):
1006+ return
1007+
1008+ ns = self.setdefault(self.name, [])
1009+ for rid in sorted(hookenv.relation_ids(self.name)):
1010+ for unit in sorted(hookenv.related_units(rid)):
1011+ reldata = hookenv.relation_get(rid=rid, unit=unit)
1012+ if self._is_ready(reldata):
1013+ ns.append(reldata)
1014+
1015+ def provide_data(self):
1016+ """
1017+ Return data to be relation_set for this interface.
1018+ """
1019+ return {}
1020+
1021+
1022+class TemplateCallback(ManagerCallback):
1023+ """
1024+ Callback class that will render a template, for use as a ready action.
1025+ """
1026+ def __init__(self, source, target, owner='root', group='root', perms=0444):
1027+ self.source = source
1028+ self.target = target
1029+ self.owner = owner
1030+ self.group = group
1031+ self.perms = perms
1032+
1033+ def __call__(self, manager, service_name, event_name):
1034+ service = manager.get_service(service_name)
1035+ context = {}
1036+ for ctx in service.get('required_data', []):
1037+ context.update(ctx)
1038+ templating.render(self.source, self.target, context,
1039+ self.owner, self.group, self.perms)
1040+
1041+
1042+# Convenience aliases for templates
1043+render_template = template = TemplateCallback
1044
1045=== added file 'hooks/charmhelpers/core/templating.py'
1046--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
1047+++ hooks/charmhelpers/core/templating.py 2014-08-13 13:52:53 +0000
1048@@ -0,0 +1,51 @@
1049+import os
1050+
1051+from charmhelpers.core import host
1052+from charmhelpers.core import hookenv
1053+
1054+
1055+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
1056+ """
1057+ Render a template.
1058+
1059+ The `source` path, if not absolute, is relative to the `templates_dir`.
1060+
1061+ The `target` path should be absolute.
1062+
1063+ The context should be a dict containing the values to be replaced in the
1064+ template.
1065+
1066+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
1067+
1068+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
1069+
1070+ Note: Using this requires python-jinja2; if it is not installed, calling
1071+ this will attempt to use charmhelpers.fetch.apt_install to install it.
1072+ """
1073+ try:
1074+ from jinja2 import FileSystemLoader, Environment, exceptions
1075+ except ImportError:
1076+ try:
1077+ from charmhelpers.fetch import apt_install
1078+ except ImportError:
1079+ hookenv.log('Could not import jinja2, and could not import '
1080+ 'charmhelpers.fetch to install it',
1081+ level=hookenv.ERROR)
1082+ raise
1083+ apt_install('python-jinja2', fatal=True)
1084+ from jinja2 import FileSystemLoader, Environment, exceptions
1085+
1086+ if templates_dir is None:
1087+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
1088+ loader = Environment(loader=FileSystemLoader(templates_dir))
1089+ try:
1090+ source = source
1091+ template = loader.get_template(source)
1092+ except exceptions.TemplateNotFound as e:
1093+ hookenv.log('Could not load template %s from %s.' %
1094+ (source, templates_dir),
1095+ level=hookenv.ERROR)
1096+ raise e
1097+ content = template.render(context)
1098+ host.mkdir(os.path.dirname(target))
1099+ host.write_file(target, content, owner, group, perms)
1100
1101=== modified file 'hooks/charmhelpers/fetch/__init__.py'
1102--- hooks/charmhelpers/fetch/__init__.py 2014-07-10 18:04:35 +0000
1103+++ hooks/charmhelpers/fetch/__init__.py 2014-08-13 13:52:53 +0000
1104@@ -122,6 +122,7 @@
1105 # Tell apt to build an in-memory cache to prevent race conditions (if
1106 # another process is already building the cache).
1107 apt_pkg.config.set("Dir::Cache::pkgcache", "")
1108+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
1109
1110 cache = apt_pkg.Cache()
1111 _pkgs = []
1112
1113=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
1114--- tests/charmhelpers/contrib/amulet/deployment.py 2014-07-28 11:54:32 +0000
1115+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-08-13 13:52:53 +0000
1116@@ -1,9 +1,14 @@
1117 import amulet
1118
1119+import os
1120+
1121
1122 class AmuletDeployment(object):
1123- """This class provides generic Amulet deployment and test runner
1124- methods."""
1125+ """Amulet deployment.
1126+
1127+ This class provides generic Amulet deployment and test runner
1128+ methods.
1129+ """
1130
1131 def __init__(self, series=None):
1132 """Initialize the deployment environment."""
1133@@ -16,11 +21,19 @@
1134 self.d = amulet.Deployment()
1135
1136 def _add_services(self, this_service, other_services):
1137- """Add services to the deployment where this_service is the local charm
1138+ """Add services.
1139+
1140+ Add services to the deployment where this_service is the local charm
1141 that we're focused on testing and other_services are the other
1142- charms that come from the charm store."""
1143+ charms that come from the charm store.
1144+ """
1145 name, units = range(2)
1146- self.this_service = this_service[name]
1147+
1148+ if this_service[name] != os.path.basename(os.getcwd()):
1149+ s = this_service[name]
1150+ msg = "The charm's root directory name needs to be {}".format(s)
1151+ amulet.raise_status(amulet.FAIL, msg=msg)
1152+
1153 self.d.add(this_service[name], units=this_service[units])
1154
1155 for svc in other_services:
1156@@ -45,10 +58,10 @@
1157 """Deploy environment and wait for all hooks to finish executing."""
1158 try:
1159 self.d.setup()
1160- self.d.sentry.wait()
1161+ self.d.sentry.wait(timeout=900)
1162 except amulet.helpers.TimeoutError:
1163 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
1164- except:
1165+ except Exception:
1166 raise
1167
1168 def run_tests(self):
1169
1170=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
1171--- tests/charmhelpers/contrib/amulet/utils.py 2014-07-28 11:54:32 +0000
1172+++ tests/charmhelpers/contrib/amulet/utils.py 2014-08-13 13:52:53 +0000
1173@@ -3,12 +3,15 @@
1174 import logging
1175 import re
1176 import sys
1177-from time import sleep
1178+import time
1179
1180
1181 class AmuletUtils(object):
1182- """This class provides common utility functions that are used by Amulet
1183- tests."""
1184+ """Amulet utilities.
1185+
1186+ This class provides common utility functions that are used by Amulet
1187+ tests.
1188+ """
1189
1190 def __init__(self, log_level=logging.ERROR):
1191 self.log = self.get_logger(level=log_level)
1192@@ -17,8 +20,8 @@
1193 """Get a logger object that will log to stdout."""
1194 log = logging
1195 logger = log.getLogger(name)
1196- fmt = \
1197- log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s")
1198+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1199+ "%(levelname)s: %(message)s")
1200
1201 handler = log.StreamHandler(stream=sys.stdout)
1202 handler.setLevel(level)
1203@@ -38,7 +41,7 @@
1204 def valid_url(self, url):
1205 p = re.compile(
1206 r'^(?:http|ftp)s?://'
1207- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa
1208+ r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
1209 r'localhost|'
1210 r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
1211 r'(?::\d+)?'
1212@@ -50,8 +53,11 @@
1213 return False
1214
1215 def validate_services(self, commands):
1216- """Verify the specified services are running on the corresponding
1217- service units."""
1218+ """Validate services.
1219+
1220+ Verify the specified services are running on the corresponding
1221+ service units.
1222+ """
1223 for k, v in commands.iteritems():
1224 for cmd in v:
1225 output, code = k.run(cmd)
1226@@ -66,9 +72,13 @@
1227 config.readfp(io.StringIO(file_contents))
1228 return config
1229
1230- def validate_config_data(self, sentry_unit, config_file, section, expected):
1231- """Verify that the specified section of the config file contains
1232- the expected option key:value pairs."""
1233+ def validate_config_data(self, sentry_unit, config_file, section,
1234+ expected):
1235+ """Validate config file data.
1236+
1237+ Verify that the specified section of the config file contains
1238+ the expected option key:value pairs.
1239+ """
1240 config = self._get_config(sentry_unit, config_file)
1241
1242 if section != 'DEFAULT' and not config.has_section(section):
1243@@ -78,20 +88,23 @@
1244 if not config.has_option(section, k):
1245 return "section [{}] is missing option {}".format(section, k)
1246 if config.get(section, k) != expected[k]:
1247- return "section [{}] {}:{} != expected {}:{}".format(section,
1248- k, config.get(section, k), k, expected[k])
1249+ return "section [{}] {}:{} != expected {}:{}".format(
1250+ section, k, config.get(section, k), k, expected[k])
1251 return None
1252
1253 def _validate_dict_data(self, expected, actual):
1254- """Compare expected dictionary data vs actual dictionary data.
1255+ """Validate dictionary data.
1256+
1257+ Compare expected dictionary data vs actual dictionary data.
1258 The values in the 'expected' dictionary can be strings, bools, ints,
1259 longs, or can be a function that evaluate a variable and returns a
1260- bool."""
1261+ bool.
1262+ """
1263 for k, v in expected.iteritems():
1264 if k in actual:
1265- if isinstance(v, basestring) or \
1266- isinstance(v, bool) or \
1267- isinstance(v, (int, long)):
1268+ if (isinstance(v, basestring) or
1269+ isinstance(v, bool) or
1270+ isinstance(v, (int, long))):
1271 if v != actual[k]:
1272 return "{}:{}".format(k, actual[k])
1273 elif not v(actual[k]):
1274@@ -114,7 +127,7 @@
1275 return None
1276
1277 def not_null(self, string):
1278- if string != None:
1279+ if string is not None:
1280 return True
1281 else:
1282 return False
1283@@ -128,9 +141,12 @@
1284 return sentry_unit.directory_stat(directory)['mtime']
1285
1286 def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
1287- """Determine start time of the process based on the last modification
1288+ """Get process' start time.
1289+
1290+ Determine start time of the process based on the last modification
1291 time of the /proc/pid directory. If pgrep_full is True, the process
1292- name is matched against the full command line."""
1293+ name is matched against the full command line.
1294+ """
1295 if pgrep_full:
1296 cmd = 'pgrep -o -f {}'.format(service)
1297 else:
1298@@ -139,13 +155,16 @@
1299 return self._get_dir_mtime(sentry_unit, proc_dir)
1300
1301 def service_restarted(self, sentry_unit, service, filename,
1302- pgrep_full=False):
1303- """Compare a service's start time vs a file's last modification time
1304+ pgrep_full=False, sleep_time=20):
1305+ """Check if service was restarted.
1306+
1307+ Compare a service's start time vs a file's last modification time
1308 (such as a config file for that service) to determine if the service
1309- has been restarted."""
1310- sleep(10)
1311- if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
1312- self._get_file_mtime(sentry_unit, filename):
1313+ has been restarted.
1314+ """
1315+ time.sleep(sleep_time)
1316+ if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
1317+ self._get_file_mtime(sentry_unit, filename)):
1318 return True
1319 else:
1320 return False
1321
1322=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
1323--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-28 11:54:32 +0000
1324+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-08-13 13:52:53 +0000
1325@@ -4,8 +4,11 @@
1326
1327
1328 class OpenStackAmuletDeployment(AmuletDeployment):
1329- """This class inherits from AmuletDeployment and has additional support
1330- that is specifically for use by OpenStack charms."""
1331+ """OpenStack amulet deployment.
1332+
1333+ This class inherits from AmuletDeployment and has additional support
1334+ that is specifically for use by OpenStack charms.
1335+ """
1336
1337 def __init__(self, series=None, openstack=None, source=None):
1338 """Initialize the deployment environment."""
1339@@ -40,11 +43,14 @@
1340 self.d.configure(service, config)
1341
1342 def _get_openstack_release(self):
1343- """Return an integer representing the enum value of the openstack
1344- release."""
1345- self.precise_essex, self.precise_folsom, self.precise_grizzly, \
1346- self.precise_havana, self.precise_icehouse, \
1347- self.trusty_icehouse = range(6)
1348+ """Get openstack release.
1349+
1350+ Return an integer representing the enum value of the openstack
1351+ release.
1352+ """
1353+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1354+ self.precise_havana, self.precise_icehouse,
1355+ self.trusty_icehouse) = range(6)
1356 releases = {
1357 ('precise', None): self.precise_essex,
1358 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1359
1360=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
1361--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-28 11:54:32 +0000
1362+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-08-13 13:52:53 +0000
1363@@ -16,8 +16,11 @@
1364
1365
1366 class OpenStackAmuletUtils(AmuletUtils):
1367- """This class inherits from AmuletUtils and has additional support
1368- that is specifically for use by OpenStack charms."""
1369+ """OpenStack amulet utilities.
1370+
1371+ This class inherits from AmuletUtils and has additional support
1372+ that is specifically for use by OpenStack charms.
1373+ """
1374
1375 def __init__(self, log_level=ERROR):
1376 """Initialize the deployment environment."""
1377@@ -25,13 +28,17 @@
1378
1379 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1380 public_port, expected):
1381- """Validate actual endpoint data vs expected endpoint data. The ports
1382- are used to find the matching endpoint."""
1383+ """Validate endpoint data.
1384+
1385+ Validate actual endpoint data vs expected endpoint data. The ports
1386+ are used to find the matching endpoint.
1387+ """
1388 found = False
1389 for ep in endpoints:
1390 self.log.debug('endpoint: {}'.format(repr(ep)))
1391- if admin_port in ep.adminurl and internal_port in ep.internalurl \
1392- and public_port in ep.publicurl:
1393+ if (admin_port in ep.adminurl and
1394+ internal_port in ep.internalurl and
1395+ public_port in ep.publicurl):
1396 found = True
1397 actual = {'id': ep.id,
1398 'region': ep.region,
1399@@ -47,8 +54,11 @@
1400 return 'endpoint not found'
1401
1402 def validate_svc_catalog_endpoint_data(self, expected, actual):
1403- """Validate a list of actual service catalog endpoints vs a list of
1404- expected service catalog endpoints."""
1405+ """Validate service catalog endpoint data.
1406+
1407+ Validate a list of actual service catalog endpoints vs a list of
1408+ expected service catalog endpoints.
1409+ """
1410 self.log.debug('actual: {}'.format(repr(actual)))
1411 for k, v in expected.iteritems():
1412 if k in actual:
1413@@ -60,8 +70,11 @@
1414 return ret
1415
1416 def validate_tenant_data(self, expected, actual):
1417- """Validate a list of actual tenant data vs list of expected tenant
1418- data."""
1419+ """Validate tenant data.
1420+
1421+ Validate a list of actual tenant data vs list of expected tenant
1422+ data.
1423+ """
1424 self.log.debug('actual: {}'.format(repr(actual)))
1425 for e in expected:
1426 found = False
1427@@ -78,8 +91,11 @@
1428 return ret
1429
1430 def validate_role_data(self, expected, actual):
1431- """Validate a list of actual role data vs a list of expected role
1432- data."""
1433+ """Validate role data.
1434+
1435+ Validate a list of actual role data vs a list of expected role
1436+ data.
1437+ """
1438 self.log.debug('actual: {}'.format(repr(actual)))
1439 for e in expected:
1440 found = False
1441@@ -95,8 +111,11 @@
1442 return ret
1443
1444 def validate_user_data(self, expected, actual):
1445- """Validate a list of actual user data vs a list of expected user
1446- data."""
1447+ """Validate user data.
1448+
1449+ Validate a list of actual user data vs a list of expected user
1450+ data.
1451+ """
1452 self.log.debug('actual: {}'.format(repr(actual)))
1453 for e in expected:
1454 found = False
1455@@ -114,21 +133,24 @@
1456 return ret
1457
1458 def validate_flavor_data(self, expected, actual):
1459- """Validate a list of actual flavors vs a list of expected flavors."""
1460+ """Validate flavor data.
1461+
1462+ Validate a list of actual flavors vs a list of expected flavors.
1463+ """
1464 self.log.debug('actual: {}'.format(repr(actual)))
1465 act = [a.name for a in actual]
1466 return self._validate_list_data(expected, act)
1467
1468 def tenant_exists(self, keystone, tenant):
1469- """Return True if tenant exists"""
1470+ """Return True if tenant exists."""
1471 return tenant in [t.name for t in keystone.tenants.list()]
1472
1473 def authenticate_keystone_admin(self, keystone_sentry, user, password,
1474 tenant):
1475 """Authenticates admin user with the keystone admin endpoint."""
1476- service_ip = \
1477- keystone_sentry.relation('shared-db',
1478- 'mysql:shared-db')['private-address']
1479+ unit = keystone_sentry
1480+ service_ip = unit.relation('shared-db',
1481+ 'mysql:shared-db')['private-address']
1482 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1483 return keystone_client.Client(username=user, password=password,
1484 tenant_name=tenant, auth_url=ep)
1485@@ -177,12 +199,40 @@
1486 image = glance.images.create(name=image_name, is_public=True,
1487 disk_format='qcow2',
1488 container_format='bare', data=f)
1489+ count = 1
1490+ status = image.status
1491+ while status != 'active' and count < 10:
1492+ time.sleep(3)
1493+ image = glance.images.get(image.id)
1494+ status = image.status
1495+ self.log.debug('image status: {}'.format(status))
1496+ count += 1
1497+
1498+ if status != 'active':
1499+ self.log.error('image creation timed out')
1500+ return None
1501+
1502 return image
1503
1504 def delete_image(self, glance, image):
1505 """Delete the specified image."""
1506+ num_before = len(list(glance.images.list()))
1507 glance.images.delete(image)
1508
1509+ count = 1
1510+ num_after = len(list(glance.images.list()))
1511+ while num_after != (num_before - 1) and count < 10:
1512+ time.sleep(3)
1513+ num_after = len(list(glance.images.list()))
1514+ self.log.debug('number of images: {}'.format(num_after))
1515+ count += 1
1516+
1517+ if num_after != (num_before - 1):
1518+ self.log.error('image deletion timed out')
1519+ return False
1520+
1521+ return True
1522+
1523 def create_instance(self, nova, image_name, instance_name, flavor):
1524 """Create the specified instance."""
1525 image = nova.images.find(name=image_name)
1526@@ -199,11 +249,27 @@
1527 self.log.debug('instance status: {}'.format(status))
1528 count += 1
1529
1530- if status == 'BUILD':
1531+ if status != 'ACTIVE':
1532+ self.log.error('instance creation timed out')
1533 return None
1534
1535 return instance
1536
1537 def delete_instance(self, nova, instance):
1538 """Delete the specified instance."""
1539+ num_before = len(list(nova.servers.list()))
1540 nova.servers.delete(instance)
1541+
1542+ count = 1
1543+ num_after = len(list(nova.servers.list()))
1544+ while num_after != (num_before - 1) and count < 10:
1545+ time.sleep(3)
1546+ num_after = len(list(nova.servers.list()))
1547+ self.log.debug('number of instances: {}'.format(num_after))
1548+ count += 1
1549+
1550+ if num_after != (num_before - 1):
1551+ self.log.error('instance deletion timed out')
1552+ return False
1553+
1554+ return True

Subscribers

People subscribed via source and target branches