Merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into lp:charms/trusty/rabbitmq-server

Proposed by Chris Glass
Status: Merged
Merged at revision: 60
Proposed branch: lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers
Merge into: lp:charms/trusty/rabbitmq-server
Diff against target: 3241 lines (+2078/-264)
25 files modified
hooks/charmhelpers/contrib/charmsupport/volumes.py (+5/-2)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
hooks/charmhelpers/contrib/openstack/context.py (+187/-47)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+31/-1)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+18/-7)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+77/-29)
hooks/charmhelpers/contrib/ssl/service.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-1)
hooks/charmhelpers/contrib/storage/linux/utils.py (+23/-5)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+132/-7)
hooks/charmhelpers/core/host.py (+100/-12)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+313/-0)
hooks/charmhelpers/core/services/helpers.py (+239/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+192/-90)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/charmhelpers/fetch/bzrurl.py (+2/-1)
hooks/rabbit_utils.py (+8/-16)
To merge this branch: bzr merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers
Reviewer Review Type Date Requested Status
David Britton (community) Approve
Review via email: mp+236072@code.launchpad.net

Description of the change

This branch resyncs charm-helpers to make the charm benefit from in-memeory apt-cache index, so as not to run into race-conditions with other charms.

It also uses the chram-helpers package version comparison instead of its own (again, to prevent grabbing the apt index lock for nothing).

Similar causes and fixes than https://bugs.launchpad.net/charms/+source/ceph/+bug/1346489

To post a comment you must log in.
Revision history for this message
David Britton (dpb) wrote :

This looks great! I deployed, it worked fine. (since it was more than just a charm-helpers sync I wanted to check). Thanks, Chris!

review: Approve
Revision history for this message
Michael Hudson-Doyle (mwhudson) wrote :

Hi, I'm afraid this broke the amqp-relation-changed hook https://bugs.launchpad.net/charms/+source/rabbitmq-server/+bug/1375084

Revision history for this message
David Britton (dpb) wrote :

Thanks @Michael, I put up a follow-on MP:

https://code.launchpad.net/~davidpbritton/charms/trusty/rabbitmq-server/compare-version-1375084/+merge/236279

On Sun, Sep 28, 2014 at 7:54 PM, Michael Hudson-Doyle <
<email address hidden>> wrote:

> Hi, I'm afraid this broke the amqp-relation-changed hook
> https://bugs.launchpad.net/charms/+source/rabbitmq-server/+bug/1375084
> --
>
> https://code.launchpad.net/~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers/+merge/236072
> You are reviewing the proposed merge of
> lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into
> lp:charms/trusty/rabbitmq-server.
>

--
David Britton <email address hidden>

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-09-26 08:15:24 +0000
@@ -2,7 +2,8 @@
2Functions for managing volumes in juju units. One volume is supported per unit.2Functions for managing volumes in juju units. One volume is supported per unit.
3Subordinates may have their own storage, provided it is on its own partition.3Subordinates may have their own storage, provided it is on its own partition.
44
5Configuration stanzas:5Configuration stanzas::
6
6 volume-ephemeral:7 volume-ephemeral:
7 type: boolean8 type: boolean
8 default: true9 default: true
@@ -20,7 +21,8 @@
20 is 'true' and no volume-map value is set. Use 'juju set' to set a21 is 'true' and no volume-map value is set. Use 'juju set' to set a
21 value and 'juju resolved' to complete configuration.22 value and 'juju resolved' to complete configuration.
2223
23Usage:24Usage::
25
24 from charmsupport.volumes import configure_volume, VolumeConfigurationError26 from charmsupport.volumes import configure_volume, VolumeConfigurationError
25 from charmsupport.hookenv import log, ERROR27 from charmsupport.hookenv import log, ERROR
26 def post_mount_hook():28 def post_mount_hook():
@@ -34,6 +36,7 @@
34 after_change=post_mount_hook)36 after_change=post_mount_hook)
35 except VolumeConfigurationError:37 except VolumeConfigurationError:
36 log('Storage could not be configured', ERROR)38 log('Storage could not be configured', ERROR)
39
37'''40'''
3841
39# XXX: Known limitations42# XXX: Known limitations
4043
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-26 08:15:24 +0000
@@ -6,6 +6,11 @@
6# Adam Gandelman <adamg@ubuntu.com>6# Adam Gandelman <adamg@ubuntu.com>
7#7#
88
9"""
10Helpers for clustering and determining "cluster leadership" and other
11clustering-related helpers.
12"""
13
9import subprocess14import subprocess
10import os15import os
1116
@@ -19,6 +24,7 @@
19 config as config_get,24 config as config_get,
20 INFO,25 INFO,
21 ERROR,26 ERROR,
27 WARNING,
22 unit_get,28 unit_get,
23)29)
2430
@@ -27,6 +33,29 @@
27 pass33 pass
2834
2935
36def is_elected_leader(resource):
37 """
38 Returns True if the charm executing this is the elected cluster leader.
39
40 It relies on two mechanisms to determine leadership:
41 1. If the charm is part of a corosync cluster, call corosync to
42 determine leadership.
43 2. If the charm is not part of a corosync cluster, the leader is
44 determined as being "the alive unit with the lowest unit numer". In
45 other words, the oldest surviving unit.
46 """
47 if is_clustered():
48 if not is_crm_leader(resource):
49 log('Deferring action to CRM leader.', level=INFO)
50 return False
51 else:
52 peers = peer_units()
53 if peers and not oldest_peer(peers):
54 log('Deferring action to oldest service unit.', level=INFO)
55 return False
56 return True
57
58
30def is_clustered():59def is_clustered():
31 for r_id in (relation_ids('ha') or []):60 for r_id in (relation_ids('ha') or []):
32 for unit in (relation_list(r_id) or []):61 for unit in (relation_list(r_id) or []):
@@ -38,7 +67,11 @@
38 return False67 return False
3968
4069
41def is_leader(resource):70def is_crm_leader(resource):
71 """
72 Returns True if the charm calling this is the elected corosync leader,
73 as returned by calling the external "crm" command.
74 """
42 cmd = [75 cmd = [
43 "crm", "resource",76 "crm", "resource",
44 "show", resource77 "show", resource
@@ -54,15 +87,31 @@
54 return False87 return False
5588
5689
57def peer_units():90def is_leader(resource):
91 log("is_leader is deprecated. Please consider using is_crm_leader "
92 "instead.", level=WARNING)
93 return is_crm_leader(resource)
94
95
96def peer_units(peer_relation="cluster"):
58 peers = []97 peers = []
59 for r_id in (relation_ids('cluster') or []):98 for r_id in (relation_ids(peer_relation) or []):
60 for unit in (relation_list(r_id) or []):99 for unit in (relation_list(r_id) or []):
61 peers.append(unit)100 peers.append(unit)
62 return peers101 return peers
63102
64103
104def peer_ips(peer_relation='cluster', addr_key='private-address'):
105 '''Return a dict of peers and their private-address'''
106 peers = {}
107 for r_id in relation_ids(peer_relation):
108 for unit in relation_list(r_id):
109 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
110 return peers
111
112
65def oldest_peer(peers):113def oldest_peer(peers):
114 """Determines who the oldest peer is by comparing unit numbers."""
66 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])115 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
67 for peer in peers:116 for peer in peers:
68 remote_unit_no = int(peer.split('/')[1])117 remote_unit_no = int(peer.split('/')[1])
@@ -72,16 +121,9 @@
72121
73122
74def eligible_leader(resource):123def eligible_leader(resource):
75 if is_clustered():124 log("eligible_leader is deprecated. Please consider using "
76 if not is_leader(resource):125 "is_elected_leader instead.", level=WARNING)
77 log('Deferring action to CRM leader.', level=INFO)126 return is_elected_leader(resource)
78 return False
79 else:
80 peers = peer_units()
81 if peers and not oldest_peer(peers):
82 log('Deferring action to oldest service unit.', level=INFO)
83 return False
84 return True
85127
86128
87def https():129def https():
@@ -97,10 +139,9 @@
97 return True139 return True
98 for r_id in relation_ids('identity-service'):140 for r_id in relation_ids('identity-service'):
99 for unit in relation_list(r_id):141 for unit in relation_list(r_id):
142 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
100 rel_state = [143 rel_state = [
101 relation_get('https_keystone', rid=r_id, unit=unit),144 relation_get('https_keystone', rid=r_id, unit=unit),
102 relation_get('ssl_cert', rid=r_id, unit=unit),
103 relation_get('ssl_key', rid=r_id, unit=unit),
104 relation_get('ca_cert', rid=r_id, unit=unit),145 relation_get('ca_cert', rid=r_id, unit=unit),
105 ]146 ]
106 # NOTE: works around (LP: #1203241)147 # NOTE: works around (LP: #1203241)
@@ -146,12 +187,12 @@
146 Obtains all relevant configuration from charm configuration required187 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:188 for initiating a relation to hacluster:
148189
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr190 ha-bindiface, ha-mcastport, vip
150191
151 returns: dict: A dict containing settings keyed by setting name.192 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.193 raises: HAIncompleteConfig if settings are missing.
153 '''194 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']195 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
155 conf = {}196 conf = {}
156 for setting in settings:197 for setting in settings:
157 conf[setting] = config_get(setting)198 conf[setting] = config_get(setting)
@@ -170,6 +211,7 @@
170211
171 :configs : OSTemplateRenderer: A config tempating object to inspect for212 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.213 a complete https context.
214
173 :vip_setting: str: Setting in charm config that specifies215 :vip_setting: str: Setting in charm config that specifies
174 VIP address.216 VIP address.
175 '''217 '''
176218
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,94 @@
1from bzrlib.branch import Branch
2import os
3import re
4from charmhelpers.contrib.amulet.deployment import (
5 AmuletDeployment
6)
7
8
9class OpenStackAmuletDeployment(AmuletDeployment):
10 """OpenStack amulet deployment.
11
12 This class inherits from AmuletDeployment and has additional support
13 that is specifically for use by OpenStack charms.
14 """
15
16 def __init__(self, series=None, openstack=None, source=None):
17 """Initialize the deployment environment."""
18 super(OpenStackAmuletDeployment, self).__init__(series)
19 self.openstack = openstack
20 self.source = source
21
22 def _is_dev_branch(self):
23 """Determine if branch being tested is a dev (i.e. next) branch."""
24 branch = Branch.open(os.getcwd())
25 parent = branch.get_parent()
26 pattern = re.compile("^.*/next/$")
27 if (pattern.match(parent)):
28 return True
29 else:
30 return False
31
32 def _determine_branch_locations(self, other_services):
33 """Determine the branch locations for the other services.
34
35 If the branch being tested is a dev branch, then determine the
36 development branch locations for the other services. Otherwise,
37 the default charm store branches will be used."""
38 name = 0
39 if self._is_dev_branch():
40 updated_services = []
41 for svc in other_services:
42 if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
43 location = 'lp:charms/{}'.format(svc[name])
44 else:
45 temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
46 location = temp.format(svc[name])
47 updated_services.append(svc + (location,))
48 other_services = updated_services
49 return other_services
50
51 def _add_services(self, this_service, other_services):
52 """Add services to the deployment and set openstack-origin/source."""
53 name = 0
54 other_services = self._determine_branch_locations(other_services)
55 super(OpenStackAmuletDeployment, self)._add_services(this_service,
56 other_services)
57 services = other_services
58 services.append(this_service)
59 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
60
61 if self.openstack:
62 for svc in services:
63 if svc[name] not in use_source:
64 config = {'openstack-origin': self.openstack}
65 self.d.configure(svc[name], config)
66
67 if self.source:
68 for svc in services:
69 if svc[name] in use_source:
70 config = {'source': self.source}
71 self.d.configure(svc[name], config)
72
73 def _configure_services(self, configs):
74 """Configure all of the services."""
75 for service, config in configs.iteritems():
76 self.d.configure(service, config)
77
78 def _get_openstack_release(self):
79 """Get openstack release.
80
81 Return an integer representing the enum value of the openstack
82 release.
83 """
84 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
85 self.precise_havana, self.precise_icehouse,
86 self.trusty_icehouse) = range(6)
87 releases = {
88 ('precise', None): self.precise_essex,
89 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
90 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
91 ('precise', 'cloud:precise-havana'): self.precise_havana,
92 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
93 ('trusty', None): self.trusty_icehouse}
94 return releases[(self.series, self.openstack)]
095
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,276 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """OpenStack amulet utilities.
20
21 This class inherits from AmuletUtils and has additional support
22 that is specifically for use by OpenStack charms.
23 """
24
25 def __init__(self, log_level=ERROR):
26 """Initialize the deployment environment."""
27 super(OpenStackAmuletUtils, self).__init__(log_level)
28
29 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
30 public_port, expected):
31 """Validate endpoint data.
32
33 Validate actual endpoint data vs expected endpoint data. The ports
34 are used to find the matching endpoint.
35 """
36 found = False
37 for ep in endpoints:
38 self.log.debug('endpoint: {}'.format(repr(ep)))
39 if (admin_port in ep.adminurl and
40 internal_port in ep.internalurl and
41 public_port in ep.publicurl):
42 found = True
43 actual = {'id': ep.id,
44 'region': ep.region,
45 'adminurl': ep.adminurl,
46 'internalurl': ep.internalurl,
47 'publicurl': ep.publicurl,
48 'service_id': ep.service_id}
49 ret = self._validate_dict_data(expected, actual)
50 if ret:
51 return 'unexpected endpoint data - {}'.format(ret)
52
53 if not found:
54 return 'endpoint not found'
55
56 def validate_svc_catalog_endpoint_data(self, expected, actual):
57 """Validate service catalog endpoint data.
58
59 Validate a list of actual service catalog endpoints vs a list of
60 expected service catalog endpoints.
61 """
62 self.log.debug('actual: {}'.format(repr(actual)))
63 for k, v in expected.iteritems():
64 if k in actual:
65 ret = self._validate_dict_data(expected[k][0], actual[k][0])
66 if ret:
67 return self.endpoint_error(k, ret)
68 else:
69 return "endpoint {} does not exist".format(k)
70 return ret
71
72 def validate_tenant_data(self, expected, actual):
73 """Validate tenant data.
74
75 Validate a list of actual tenant data vs list of expected tenant
76 data.
77 """
78 self.log.debug('actual: {}'.format(repr(actual)))
79 for e in expected:
80 found = False
81 for act in actual:
82 a = {'enabled': act.enabled, 'description': act.description,
83 'name': act.name, 'id': act.id}
84 if e['name'] == a['name']:
85 found = True
86 ret = self._validate_dict_data(e, a)
87 if ret:
88 return "unexpected tenant data - {}".format(ret)
89 if not found:
90 return "tenant {} does not exist".format(e['name'])
91 return ret
92
93 def validate_role_data(self, expected, actual):
94 """Validate role data.
95
96 Validate a list of actual role data vs a list of expected role
97 data.
98 """
99 self.log.debug('actual: {}'.format(repr(actual)))
100 for e in expected:
101 found = False
102 for act in actual:
103 a = {'name': act.name, 'id': act.id}
104 if e['name'] == a['name']:
105 found = True
106 ret = self._validate_dict_data(e, a)
107 if ret:
108 return "unexpected role data - {}".format(ret)
109 if not found:
110 return "role {} does not exist".format(e['name'])
111 return ret
112
113 def validate_user_data(self, expected, actual):
114 """Validate user data.
115
116 Validate a list of actual user data vs a list of expected user
117 data.
118 """
119 self.log.debug('actual: {}'.format(repr(actual)))
120 for e in expected:
121 found = False
122 for act in actual:
123 a = {'enabled': act.enabled, 'name': act.name,
124 'email': act.email, 'tenantId': act.tenantId,
125 'id': act.id}
126 if e['name'] == a['name']:
127 found = True
128 ret = self._validate_dict_data(e, a)
129 if ret:
130 return "unexpected user data - {}".format(ret)
131 if not found:
132 return "user {} does not exist".format(e['name'])
133 return ret
134
135 def validate_flavor_data(self, expected, actual):
136 """Validate flavor data.
137
138 Validate a list of actual flavors vs a list of expected flavors.
139 """
140 self.log.debug('actual: {}'.format(repr(actual)))
141 act = [a.name for a in actual]
142 return self._validate_list_data(expected, act)
143
144 def tenant_exists(self, keystone, tenant):
145 """Return True if tenant exists."""
146 return tenant in [t.name for t in keystone.tenants.list()]
147
148 def authenticate_keystone_admin(self, keystone_sentry, user, password,
149 tenant):
150 """Authenticates admin user with the keystone admin endpoint."""
151 unit = keystone_sentry
152 service_ip = unit.relation('shared-db',
153 'mysql:shared-db')['private-address']
154 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
155 return keystone_client.Client(username=user, password=password,
156 tenant_name=tenant, auth_url=ep)
157
158 def authenticate_keystone_user(self, keystone, user, password, tenant):
159 """Authenticates a regular user with the keystone public endpoint."""
160 ep = keystone.service_catalog.url_for(service_type='identity',
161 endpoint_type='publicURL')
162 return keystone_client.Client(username=user, password=password,
163 tenant_name=tenant, auth_url=ep)
164
165 def authenticate_glance_admin(self, keystone):
166 """Authenticates admin user with glance."""
167 ep = keystone.service_catalog.url_for(service_type='image',
168 endpoint_type='adminURL')
169 return glance_client.Client(ep, token=keystone.auth_token)
170
171 def authenticate_nova_user(self, keystone, user, password, tenant):
172 """Authenticates a regular user with nova-api."""
173 ep = keystone.service_catalog.url_for(service_type='identity',
174 endpoint_type='publicURL')
175 return nova_client.Client(username=user, api_key=password,
176 project_id=tenant, auth_url=ep)
177
178 def create_cirros_image(self, glance, image_name):
179 """Download the latest cirros image and upload it to glance."""
180 http_proxy = os.getenv('AMULET_HTTP_PROXY')
181 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
182 if http_proxy:
183 proxies = {'http': http_proxy}
184 opener = urllib.FancyURLopener(proxies)
185 else:
186 opener = urllib.FancyURLopener()
187
188 f = opener.open("http://download.cirros-cloud.net/version/released")
189 version = f.read().strip()
190 cirros_img = "cirros-{}-x86_64-disk.img".format(version)
191 local_path = os.path.join('tests', cirros_img)
192
193 if not os.path.exists(local_path):
194 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
195 version, cirros_img)
196 opener.retrieve(cirros_url, local_path)
197 f.close()
198
199 with open(local_path) as f:
200 image = glance.images.create(name=image_name, is_public=True,
201 disk_format='qcow2',
202 container_format='bare', data=f)
203 count = 1
204 status = image.status
205 while status != 'active' and count < 10:
206 time.sleep(3)
207 image = glance.images.get(image.id)
208 status = image.status
209 self.log.debug('image status: {}'.format(status))
210 count += 1
211
212 if status != 'active':
213 self.log.error('image creation timed out')
214 return None
215
216 return image
217
218 def delete_image(self, glance, image):
219 """Delete the specified image."""
220 num_before = len(list(glance.images.list()))
221 glance.images.delete(image)
222
223 count = 1
224 num_after = len(list(glance.images.list()))
225 while num_after != (num_before - 1) and count < 10:
226 time.sleep(3)
227 num_after = len(list(glance.images.list()))
228 self.log.debug('number of images: {}'.format(num_after))
229 count += 1
230
231 if num_after != (num_before - 1):
232 self.log.error('image deletion timed out')
233 return False
234
235 return True
236
237 def create_instance(self, nova, image_name, instance_name, flavor):
238 """Create the specified instance."""
239 image = nova.images.find(name=image_name)
240 flavor = nova.flavors.find(name=flavor)
241 instance = nova.servers.create(name=instance_name, image=image,
242 flavor=flavor)
243
244 count = 1
245 status = instance.status
246 while status != 'ACTIVE' and count < 60:
247 time.sleep(3)
248 instance = nova.servers.get(instance.id)
249 status = instance.status
250 self.log.debug('instance status: {}'.format(status))
251 count += 1
252
253 if status != 'ACTIVE':
254 self.log.error('instance creation timed out')
255 return None
256
257 return instance
258
259 def delete_instance(self, nova, instance):
260 """Delete the specified instance."""
261 num_before = len(list(nova.servers.list()))
262 nova.servers.delete(instance)
263
264 count = 1
265 num_after = len(list(nova.servers.list()))
266 while num_after != (num_before - 1) and count < 10:
267 time.sleep(3)
268 num_after = len(list(nova.servers.list()))
269 self.log.debug('number of instances: {}'.format(num_after))
270 count += 1
271
272 if num_after != (num_before - 1):
273 self.log.error('instance deletion timed out')
274 return False
275
276 return True
0277
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-04-10 16:56:26 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-26 08:15:24 +0000
@@ -8,7 +8,6 @@
8 check_call8 check_call
9)9)
1010
11
12from charmhelpers.fetch import (11from charmhelpers.fetch import (
13 apt_install,12 apt_install,
14 filter_installed_packages,13 filter_installed_packages,
@@ -21,9 +20,16 @@
21 relation_get,20 relation_get,
22 relation_ids,21 relation_ids,
23 related_units,22 related_units,
23 relation_set,
24 unit_get,24 unit_get,
25 unit_private_ip,25 unit_private_ip,
26 ERROR,26 ERROR,
27 INFO
28)
29
30from charmhelpers.core.host import (
31 mkdir,
32 write_file
27)33)
2834
29from charmhelpers.contrib.hahelpers.cluster import (35from charmhelpers.contrib.hahelpers.cluster import (
@@ -36,12 +42,19 @@
36from charmhelpers.contrib.hahelpers.apache import (42from charmhelpers.contrib.hahelpers.apache import (
37 get_cert,43 get_cert,
38 get_ca_cert,44 get_ca_cert,
45 install_ca_cert,
39)46)
4047
41from charmhelpers.contrib.openstack.neutron import (48from charmhelpers.contrib.openstack.neutron import (
42 neutron_plugin_attribute,49 neutron_plugin_attribute,
43)50)
4451
52from charmhelpers.contrib.network.ip import (
53 get_address_in_network,
54 get_ipv6_addr,
55 is_address_in_network
56)
57
45CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'58CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
4659
4760
@@ -134,8 +147,26 @@
134 'Missing required charm config options. '147 'Missing required charm config options. '
135 '(database name and user)')148 '(database name and user)')
136 raise OSContextError149 raise OSContextError
150
137 ctxt = {}151 ctxt = {}
138152
153 # NOTE(jamespage) if mysql charm provides a network upon which
154 # access to the database should be made, reconfigure relation
155 # with the service units local address and defer execution
156 access_network = relation_get('access-network')
157 if access_network is not None:
158 if self.relation_prefix is not None:
159 hostname_key = "{}_hostname".format(self.relation_prefix)
160 else:
161 hostname_key = "hostname"
162 access_hostname = get_address_in_network(access_network,
163 unit_get('private-address'))
164 set_hostname = relation_get(attribute=hostname_key,
165 unit=local_unit())
166 if set_hostname != access_hostname:
167 relation_set(relation_settings={hostname_key: access_hostname})
168 return ctxt # Defer any further hook execution for now....
169
139 password_setting = 'password'170 password_setting = 'password'
140 if self.relation_prefix:171 if self.relation_prefix:
141 password_setting = self.relation_prefix + '_password'172 password_setting = self.relation_prefix + '_password'
@@ -243,23 +274,31 @@
243274
244275
245class AMQPContext(OSContextGenerator):276class AMQPContext(OSContextGenerator):
246 interfaces = ['amqp']
247277
248 def __init__(self, ssl_dir=None):278 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
249 self.ssl_dir = ssl_dir279 self.ssl_dir = ssl_dir
280 self.rel_name = rel_name
281 self.relation_prefix = relation_prefix
282 self.interfaces = [rel_name]
250283
251 def __call__(self):284 def __call__(self):
252 log('Generating template context for amqp')285 log('Generating template context for amqp')
253 conf = config()286 conf = config()
287 user_setting = 'rabbit-user'
288 vhost_setting = 'rabbit-vhost'
289 if self.relation_prefix:
290 user_setting = self.relation_prefix + '-rabbit-user'
291 vhost_setting = self.relation_prefix + '-rabbit-vhost'
292
254 try:293 try:
255 username = conf['rabbit-user']294 username = conf[user_setting]
256 vhost = conf['rabbit-vhost']295 vhost = conf[vhost_setting]
257 except KeyError as e:296 except KeyError as e:
258 log('Could not generate shared_db context. '297 log('Could not generate shared_db context. '
259 'Missing required charm config options: %s.' % e)298 'Missing required charm config options: %s.' % e)
260 raise OSContextError299 raise OSContextError
261 ctxt = {}300 ctxt = {}
262 for rid in relation_ids('amqp'):301 for rid in relation_ids(self.rel_name):
263 ha_vip_only = False302 ha_vip_only = False
264 for unit in related_units(rid):303 for unit in related_units(rid):
265 if relation_get('clustered', rid=rid, unit=unit):304 if relation_get('clustered', rid=rid, unit=unit):
@@ -332,10 +371,12 @@
332 use_syslog = str(config('use-syslog')).lower()371 use_syslog = str(config('use-syslog')).lower()
333 for rid in relation_ids('ceph'):372 for rid in relation_ids('ceph'):
334 for unit in related_units(rid):373 for unit in related_units(rid):
335 mon_hosts.append(relation_get('private-address', rid=rid,
336 unit=unit))
337 auth = relation_get('auth', rid=rid, unit=unit)374 auth = relation_get('auth', rid=rid, unit=unit)
338 key = relation_get('key', rid=rid, unit=unit)375 key = relation_get('key', rid=rid, unit=unit)
376 ceph_addr = \
377 relation_get('ceph-public-address', rid=rid, unit=unit) or \
378 relation_get('private-address', rid=rid, unit=unit)
379 mon_hosts.append(ceph_addr)
339380
340 ctxt = {381 ctxt = {
341 'mon_hosts': ' '.join(mon_hosts),382 'mon_hosts': ' '.join(mon_hosts),
@@ -369,7 +410,12 @@
369410
370 cluster_hosts = {}411 cluster_hosts = {}
371 l_unit = local_unit().replace('/', '-')412 l_unit = local_unit().replace('/', '-')
372 cluster_hosts[l_unit] = unit_get('private-address')413 if config('prefer-ipv6'):
414 addr = get_ipv6_addr()
415 else:
416 addr = unit_get('private-address')
417 cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
418 addr)
373419
374 for rid in relation_ids('cluster'):420 for rid in relation_ids('cluster'):
375 for unit in related_units(rid):421 for unit in related_units(rid):
@@ -380,6 +426,21 @@
380 ctxt = {426 ctxt = {
381 'units': cluster_hosts,427 'units': cluster_hosts,
382 }428 }
429
430 if config('haproxy-server-timeout'):
431 ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout')
432 if config('haproxy-client-timeout'):
433 ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout')
434
435 if config('prefer-ipv6'):
436 ctxt['local_host'] = 'ip6-localhost'
437 ctxt['haproxy_host'] = '::'
438 ctxt['stat_port'] = ':::8888'
439 else:
440 ctxt['local_host'] = '127.0.0.1'
441 ctxt['haproxy_host'] = '0.0.0.0'
442 ctxt['stat_port'] = ':8888'
443
383 if len(cluster_hosts.keys()) > 1:444 if len(cluster_hosts.keys()) > 1:
384 # Enable haproxy when we have enough peers.445 # Enable haproxy when we have enough peers.
385 log('Ensuring haproxy enabled in /etc/default/haproxy.')446 log('Ensuring haproxy enabled in /etc/default/haproxy.')
@@ -418,12 +479,13 @@
418 """479 """
419 Generates a context for an apache vhost configuration that configures480 Generates a context for an apache vhost configuration that configures
420 HTTPS reverse proxying for one or many endpoints. Generated context481 HTTPS reverse proxying for one or many endpoints. Generated context
421 looks something like:482 looks something like::
422 {483
423 'namespace': 'cinder',484 {
424 'private_address': 'iscsi.mycinderhost.com',485 'namespace': 'cinder',
425 'endpoints': [(8776, 8766), (8777, 8767)]486 'private_address': 'iscsi.mycinderhost.com',
426 }487 'endpoints': [(8776, 8766), (8777, 8767)]
488 }
427489
428 The endpoints list consists of a tuples mapping external ports490 The endpoints list consists of a tuples mapping external ports
429 to internal ports.491 to internal ports.
@@ -439,22 +501,36 @@
439 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']501 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
440 check_call(cmd)502 check_call(cmd)
441503
442 def configure_cert(self):504 def configure_cert(self, cn=None):
443 if not os.path.isdir('/etc/apache2/ssl'):
444 os.mkdir('/etc/apache2/ssl')
445 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)505 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
446 if not os.path.isdir(ssl_dir):506 mkdir(path=ssl_dir)
447 os.mkdir(ssl_dir)507 cert, key = get_cert(cn)
448 cert, key = get_cert()508 if cn:
449 with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:509 cert_filename = 'cert_{}'.format(cn)
450 cert_out.write(b64decode(cert))510 key_filename = 'key_{}'.format(cn)
451 with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:511 else:
452 key_out.write(b64decode(key))512 cert_filename = 'cert'
513 key_filename = 'key'
514 write_file(path=os.path.join(ssl_dir, cert_filename),
515 content=b64decode(cert))
516 write_file(path=os.path.join(ssl_dir, key_filename),
517 content=b64decode(key))
518
519 def configure_ca(self):
453 ca_cert = get_ca_cert()520 ca_cert = get_ca_cert()
454 if ca_cert:521 if ca_cert:
455 with open(CA_CERT_PATH, 'w') as ca_out:522 install_ca_cert(b64decode(ca_cert))
456 ca_out.write(b64decode(ca_cert))523
457 check_call(['update-ca-certificates'])524 def canonical_names(self):
525 '''Figure out which canonical names clients will access this service'''
526 cns = []
527 for r_id in relation_ids('identity-service'):
528 for unit in related_units(r_id):
529 rdata = relation_get(rid=r_id, unit=unit)
530 for k in rdata:
531 if k.startswith('ssl_key_'):
532 cns.append(k.lstrip('ssl_key_'))
533 return list(set(cns))
458534
459 def __call__(self):535 def __call__(self):
460 if isinstance(self.external_ports, basestring):536 if isinstance(self.external_ports, basestring):
@@ -462,21 +538,47 @@
462 if (not self.external_ports or not https()):538 if (not self.external_ports or not https()):
463 return {}539 return {}
464540
465 self.configure_cert()541 self.configure_ca()
466 self.enable_modules()542 self.enable_modules()
467543
468 ctxt = {544 ctxt = {
469 'namespace': self.service_namespace,545 'namespace': self.service_namespace,
470 'private_address': unit_get('private-address'),546 'endpoints': [],
471 'endpoints': []547 'ext_ports': []
472 }548 }
473 if is_clustered():549
474 ctxt['private_address'] = config('vip')550 for cn in self.canonical_names():
475 for api_port in self.external_ports:551 self.configure_cert(cn)
476 ext_port = determine_apache_port(api_port)552
477 int_port = determine_api_port(api_port)553 addresses = []
478 portmap = (int(ext_port), int(int_port))554 vips = []
479 ctxt['endpoints'].append(portmap)555 if config('vip'):
556 vips = config('vip').split()
557
558 for network_type in ['os-internal-network',
559 'os-admin-network',
560 'os-public-network']:
561 address = get_address_in_network(config(network_type),
562 unit_get('private-address'))
563 if len(vips) > 0 and is_clustered():
564 for vip in vips:
565 if is_address_in_network(config(network_type),
566 vip):
567 addresses.append((address, vip))
568 break
569 elif is_clustered():
570 addresses.append((address, config('vip')))
571 else:
572 addresses.append((address, address))
573
574 for address, endpoint in set(addresses):
575 for api_port in self.external_ports:
576 ext_port = determine_apache_port(api_port)
577 int_port = determine_api_port(api_port)
578 portmap = (address, endpoint, int(ext_port), int(int_port))
579 ctxt['endpoints'].append(portmap)
580 ctxt['ext_ports'].append(int(ext_port))
581 ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
480 return ctxt582 return ctxt
481583
482584
@@ -541,6 +643,26 @@
541643
542 return nvp_ctxt644 return nvp_ctxt
543645
646 def n1kv_ctxt(self):
647 driver = neutron_plugin_attribute(self.plugin, 'driver',
648 self.network_manager)
649 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
650 self.network_manager)
651 n1kv_ctxt = {
652 'core_plugin': driver,
653 'neutron_plugin': 'n1kv',
654 'neutron_security_groups': self.neutron_security_groups,
655 'local_ip': unit_private_ip(),
656 'config': n1kv_config,
657 'vsm_ip': config('n1kv-vsm-ip'),
658 'vsm_username': config('n1kv-vsm-username'),
659 'vsm_password': config('n1kv-vsm-password'),
660 'restrict_policy_profiles': config(
661 'n1kv_restrict_policy_profiles'),
662 }
663
664 return n1kv_ctxt
665
544 def neutron_ctxt(self):666 def neutron_ctxt(self):
545 if https():667 if https():
546 proto = 'https'668 proto = 'https'
@@ -570,8 +692,10 @@
570692
571 if self.plugin == 'ovs':693 if self.plugin == 'ovs':
572 ctxt.update(self.ovs_ctxt())694 ctxt.update(self.ovs_ctxt())
573 elif self.plugin == 'nvp':695 elif self.plugin in ['nvp', 'nsx']:
574 ctxt.update(self.nvp_ctxt())696 ctxt.update(self.nvp_ctxt())
697 elif self.plugin == 'n1kv':
698 ctxt.update(self.n1kv_ctxt())
575699
576 alchemy_flags = config('neutron-alchemy-flags')700 alchemy_flags = config('neutron-alchemy-flags')
577 if alchemy_flags:701 if alchemy_flags:
@@ -611,7 +735,7 @@
611 The subordinate interface allows subordinates to export their735 The subordinate interface allows subordinates to export their
612 configuration requirements to the principle for multiple config736 configuration requirements to the principle for multiple config
613 files and multiple serivces. Ie, a subordinate that has interfaces737 files and multiple serivces. Ie, a subordinate that has interfaces
614 to both glance and nova may export to following yaml blob as json:738 to both glance and nova may export to following yaml blob as json::
615739
616 glance:740 glance:
617 /etc/glance/glance-api.conf:741 /etc/glance/glance-api.conf:
@@ -630,7 +754,8 @@
630754
631 It is then up to the principle charms to subscribe this context to755 It is then up to the principle charms to subscribe this context to
632 the service+config file it is interestd in. Configuration data will756 the service+config file it is interestd in. Configuration data will
633 be available in the template context, in glance's case, as:757 be available in the template context, in glance's case, as::
758
634 ctxt = {759 ctxt = {
635 ... other context ...760 ... other context ...
636 'subordinate_config': {761 'subordinate_config': {
@@ -657,7 +782,7 @@
657 self.interface = interface782 self.interface = interface
658783
659 def __call__(self):784 def __call__(self):
660 ctxt = {}785 ctxt = {'sections': {}}
661 for rid in relation_ids(self.interface):786 for rid in relation_ids(self.interface):
662 for unit in related_units(rid):787 for unit in related_units(rid):
663 sub_config = relation_get('subordinate_configuration',788 sub_config = relation_get('subordinate_configuration',
@@ -683,11 +808,26 @@
683808
684 sub_config = sub_config[self.config_file]809 sub_config = sub_config[self.config_file]
685 for k, v in sub_config.iteritems():810 for k, v in sub_config.iteritems():
686 ctxt[k] = v811 if k == 'sections':
687812 for section, config_dict in v.iteritems():
688 if not ctxt:813 log("adding section '%s'" % (section))
689 ctxt['sections'] = {}814 ctxt[k][section] = config_dict
690815 else:
816 ctxt[k] = v
817
818 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
819
820 return ctxt
821
822
823class LogLevelContext(OSContextGenerator):
824
825 def __call__(self):
826 ctxt = {}
827 ctxt['debug'] = \
828 False if config('debug') is None else config('debug')
829 ctxt['verbose'] = \
830 False if config('verbose') is None else config('verbose')
691 return ctxt831 return ctxt
692832
693833
694834
=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,79 @@
1from charmhelpers.core.hookenv import (
2 config,
3 unit_get,
4)
5
6from charmhelpers.contrib.network.ip import (
7 get_address_in_network,
8 is_address_in_network,
9 is_ipv6,
10 get_ipv6_addr,
11)
12
13from charmhelpers.contrib.hahelpers.cluster import is_clustered
14
15PUBLIC = 'public'
16INTERNAL = 'int'
17ADMIN = 'admin'
18
19_address_map = {
20 PUBLIC: {
21 'config': 'os-public-network',
22 'fallback': 'public-address'
23 },
24 INTERNAL: {
25 'config': 'os-internal-network',
26 'fallback': 'private-address'
27 },
28 ADMIN: {
29 'config': 'os-admin-network',
30 'fallback': 'private-address'
31 }
32}
33
34
35def canonical_url(configs, endpoint_type=PUBLIC):
36 '''
37 Returns the correct HTTP URL to this host given the state of HTTPS
38 configuration, hacluster and charm configuration.
39
40 :configs OSTemplateRenderer: A config tempating object to inspect for
41 a complete https context.
42 :endpoint_type str: The endpoint type to resolve.
43
44 :returns str: Base URL for services on the current service unit.
45 '''
46 scheme = 'http'
47 if 'https' in configs.complete_contexts():
48 scheme = 'https'
49 address = resolve_address(endpoint_type)
50 if is_ipv6(address):
51 address = "[{}]".format(address)
52 return '%s://%s' % (scheme, address)
53
54
55def resolve_address(endpoint_type=PUBLIC):
56 resolved_address = None
57 if is_clustered():
58 if config(_address_map[endpoint_type]['config']) is None:
59 # Assume vip is simple and pass back directly
60 resolved_address = config('vip')
61 else:
62 for vip in config('vip').split():
63 if is_address_in_network(
64 config(_address_map[endpoint_type]['config']),
65 vip):
66 resolved_address = vip
67 else:
68 if config('prefer-ipv6'):
69 fallback_addr = get_ipv6_addr()
70 else:
71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
72 resolved_address = get_address_in_network(
73 config(_address_map[endpoint_type]['config']), fallback_addr)
74
75 if resolved_address is None:
76 raise ValueError('Unable to resolve a suitable IP address'
77 ' based on charm state and configuration')
78 else:
79 return resolved_address
080
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-03-27 12:33:12 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-26 08:15:24 +0000
@@ -114,14 +114,44 @@
114 'server_packages': ['neutron-server',114 'server_packages': ['neutron-server',
115 'neutron-plugin-nicira'],115 'neutron-plugin-nicira'],
116 'server_services': ['neutron-server']116 'server_services': ['neutron-server']
117 },
118 'nsx': {
119 'config': '/etc/neutron/plugins/vmware/nsx.ini',
120 'driver': 'vmware',
121 'contexts': [
122 context.SharedDBContext(user=config('neutron-database-user'),
123 database=config('neutron-database'),
124 relation_prefix='neutron',
125 ssl_dir=NEUTRON_CONF_DIR)],
126 'services': [],
127 'packages': [],
128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']
117 }145 }
118 }146 }
119 # NOTE: patch in ml2 plugin for icehouse onwards
120 if release >= 'icehouse':147 if release >= 'icehouse':
148 # NOTE: patch in ml2 plugin for icehouse onwards
121 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'149 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
122 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'150 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
123 plugins['ovs']['server_packages'] = ['neutron-server',151 plugins['ovs']['server_packages'] = ['neutron-server',
124 'neutron-plugin-ml2']152 'neutron-plugin-ml2']
153 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
154 plugins['nvp'] = plugins['nsx']
125 return plugins155 return plugins
126156
127157
128158
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-26 08:15:24 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-04-10 16:56:26 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-26 08:15:24 +0000
@@ -3,7 +3,6 @@
3# Common python helper functions used for OpenStack charms.3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict4from collections import OrderedDict
55
6import apt_pkg as apt
7import subprocess6import subprocess
8import os7import os
9import socket8import socket
@@ -24,7 +23,7 @@
24)23)
2524
26from charmhelpers.core.host import lsb_release, mounts, umount25from charmhelpers.core.host import lsb_release, mounts, umount
27from charmhelpers.fetch import apt_install26from charmhelpers.fetch import apt_install, apt_cache
28from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk27from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
29from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device28from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
3029
@@ -41,7 +40,8 @@
41 ('quantal', 'folsom'),40 ('quantal', 'folsom'),
42 ('raring', 'grizzly'),41 ('raring', 'grizzly'),
43 ('saucy', 'havana'),42 ('saucy', 'havana'),
44 ('trusty', 'icehouse')43 ('trusty', 'icehouse'),
44 ('utopic', 'juno'),
45])45])
4646
4747
@@ -52,6 +52,7 @@
52 ('2013.1', 'grizzly'),52 ('2013.1', 'grizzly'),
53 ('2013.2', 'havana'),53 ('2013.2', 'havana'),
54 ('2014.1', 'icehouse'),54 ('2014.1', 'icehouse'),
55 ('2014.2', 'juno'),
55])56])
5657
57# The ugly duckling58# The ugly duckling
@@ -69,6 +70,7 @@
69 ('1.13.0', 'icehouse'),70 ('1.13.0', 'icehouse'),
70 ('1.12.0', 'icehouse'),71 ('1.12.0', 'icehouse'),
71 ('1.11.0', 'icehouse'),72 ('1.11.0', 'icehouse'),
73 ('2.0.0', 'juno'),
72])74])
7375
74DEFAULT_LOOPBACK_SIZE = '5G'76DEFAULT_LOOPBACK_SIZE = '5G'
@@ -83,6 +85,8 @@
83 '''Derive OpenStack release codename from a given installation source.'''85 '''Derive OpenStack release codename from a given installation source.'''
84 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']86 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
85 rel = ''87 rel = ''
88 if src is None:
89 return rel
86 if src in ['distro', 'distro-proposed']:90 if src in ['distro', 'distro-proposed']:
87 try:91 try:
88 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]92 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@@ -130,8 +134,9 @@
130134
131def get_os_codename_package(package, fatal=True):135def get_os_codename_package(package, fatal=True):
132 '''Derive OpenStack release codename from an installed package.'''136 '''Derive OpenStack release codename from an installed package.'''
133 apt.init()137 import apt_pkg as apt
134 cache = apt.Cache()138
139 cache = apt_cache()
135140
136 try:141 try:
137 pkg = cache[package]142 pkg = cache[package]
@@ -182,8 +187,8 @@
182 for version, cname in vers_map.iteritems():187 for version, cname in vers_map.iteritems():
183 if cname == codename:188 if cname == codename:
184 return version189 return version
185 #e = "Could not determine OpenStack version for package: %s" % pkg190 # e = "Could not determine OpenStack version for package: %s" % pkg
186 #error_out(e)191 # error_out(e)
187192
188193
189os_rel = None194os_rel = None
@@ -268,6 +273,9 @@
268 'icehouse': 'precise-updates/icehouse',273 'icehouse': 'precise-updates/icehouse',
269 'icehouse/updates': 'precise-updates/icehouse',274 'icehouse/updates': 'precise-updates/icehouse',
270 'icehouse/proposed': 'precise-proposed/icehouse',275 'icehouse/proposed': 'precise-proposed/icehouse',
276 'juno': 'trusty-updates/juno',
277 'juno/updates': 'trusty-updates/juno',
278 'juno/proposed': 'trusty-proposed/juno',
271 }279 }
272280
273 try:281 try:
@@ -315,6 +323,7 @@
315323
316 """324 """
317325
326 import apt_pkg as apt
318 src = config('openstack-origin')327 src = config('openstack-origin')
319 cur_vers = get_os_version_package(package)328 cur_vers = get_os_version_package(package)
320 available_vers = get_os_version_install_source(src)329 available_vers = get_os_version_install_source(src)
@@ -401,6 +410,8 @@
401 rtype = 'PTR'410 rtype = 'PTR'
402 elif isinstance(address, basestring):411 elif isinstance(address, basestring):
403 rtype = 'A'412 rtype = 'A'
413 else:
414 return None
404415
405 answers = dns.resolver.query(address, rtype)416 answers = dns.resolver.query(address, rtype)
406 if answers:417 if answers:
407418
=== modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-03-10 11:38:19 +0000
+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-26 08:15:24 +0000
@@ -1,44 +1,44 @@
1from charmhelpers.core.hookenv import relation_id as current_relation_id
1from charmhelpers.core.hookenv import (2from charmhelpers.core.hookenv import (
3 is_relation_made,
2 relation_ids,4 relation_ids,
3 relation_get,5 relation_get,
4 local_unit,6 local_unit,
5 relation_set,7 relation_set,
6)8)
79
10
8"""11"""
9This helper provides functions to support use of a peer relation12This helper provides functions to support use of a peer relation
10for basic key/value storage, with the added benefit that all storage13for basic key/value storage, with the added benefit that all storage
11can be replicated across peer units, so this is really useful for14can be replicated across peer units.
12services that issue usernames/passwords to remote services.15
1316Requirement to use:
14def shared_db_changed()17
15 # Only the lead unit should create passwords18To use this, the "peer_echo()" method has to be called form the peer
16 if not is_leader():19relation's relation-changed hook:
17 return20
18 username = relation_get('username')21@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
19 key = '{}.password'.format(username)22def cluster_relation_changed():
20 # Attempt to retrieve any existing password for this user
21 password = peer_retrieve(key)
22 if password is None:
23 # New user, create password and store
24 password = pwgen(length=64)
25 peer_store(key, password)
26 create_access(username, password)
27 relation_set(password=password)
28
29
30def cluster_changed()
31 # Echo any relation data other that *-address
32 # back onto the peer relation so all units have
33 # all *.password keys stored on their local relation
34 # for later retrieval.
35 peer_echo()23 peer_echo()
3624
25Once this is done, you can use peer storage from anywhere:
26
27@hooks.hook("some-hook")
28def some_hook():
29 # You can store and retrieve key/values this way:
30 if is_relation_made("cluster"): # from charmhelpers.core.hookenv
31 # There are peers available so we can work with peer storage
32 peer_store("mykey", "myvalue")
33 value = peer_retrieve("mykey")
34 print value
35 else:
36 print "No peers joind the relation, cannot share key/values :("
37"""37"""
3838
3939
40def peer_retrieve(key, relation_name='cluster'):40def peer_retrieve(key, relation_name='cluster'):
41 """ Retrieve a named key from peer relation relation_name """41 """Retrieve a named key from peer relation `relation_name`."""
42 cluster_rels = relation_ids(relation_name)42 cluster_rels = relation_ids(relation_name)
43 if len(cluster_rels) > 0:43 if len(cluster_rels) > 0:
44 cluster_rid = cluster_rels[0]44 cluster_rid = cluster_rels[0]
@@ -49,8 +49,26 @@
49 'peer relation {}'.format(relation_name))49 'peer relation {}'.format(relation_name))
5050
5151
52def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
53 inc_list=None, exc_list=None):
54 """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
55 inc_list = inc_list if inc_list else []
56 exc_list = exc_list if exc_list else []
57 peerdb_settings = peer_retrieve('-', relation_name=relation_name)
58 matched = {}
59 for k, v in peerdb_settings.items():
60 full_prefix = prefix + delimiter
61 if k.startswith(full_prefix):
62 new_key = k.replace(full_prefix, '')
63 if new_key in exc_list:
64 continue
65 if new_key in inc_list or len(inc_list) == 0:
66 matched[new_key] = v
67 return matched
68
69
52def peer_store(key, value, relation_name='cluster'):70def peer_store(key, value, relation_name='cluster'):
53 """ Store the key/value pair on the named peer relation relation_name """71 """Store the key/value pair on the named peer relation `relation_name`."""
54 cluster_rels = relation_ids(relation_name)72 cluster_rels = relation_ids(relation_name)
55 if len(cluster_rels) > 0:73 if len(cluster_rels) > 0:
56 cluster_rid = cluster_rels[0]74 cluster_rid = cluster_rels[0]
@@ -62,10 +80,10 @@
6280
6381
64def peer_echo(includes=None):82def peer_echo(includes=None):
65 """Echo filtered attributes back onto the same relation for storage83 """Echo filtered attributes back onto the same relation for storage.
6684
67 Note that this helper must only be called within a peer relation85 This is a requirement to use the peerstorage module - it needs to be called
68 changed hook86 from the peer relation's changed hook.
69 """87 """
70 rdata = relation_get()88 rdata = relation_get()
71 echo_data = {}89 echo_data = {}
@@ -81,3 +99,33 @@
81 echo_data[attribute] = value99 echo_data[attribute] = value
82 if len(echo_data) > 0:100 if len(echo_data) > 0:
83 relation_set(relation_settings=echo_data)101 relation_set(relation_settings=echo_data)
102
103
104def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
105 peer_store_fatal=False, relation_settings=None,
106 delimiter='_', **kwargs):
107 """Store passed-in arguments both in argument relation and in peer storage.
108
109 It functions like doing relation_set() and peer_store() at the same time,
110 with the same data.
111
112 @param relation_id: the id of the relation to store the data on. Defaults
113 to the current relation.
114 @param peer_store_fatal: Set to True, the function will raise an exception
115 should the peer sotrage not be avialable."""
116
117 relation_settings = relation_settings if relation_settings else {}
118 relation_set(relation_id=relation_id,
119 relation_settings=relation_settings,
120 **kwargs)
121 if is_relation_made(peer_relation_name):
122 for key, value in dict(kwargs.items() +
123 relation_settings.items()).iteritems():
124 key_prefix = relation_id or current_relation_id()
125 peer_store(key_prefix + delimiter + key,
126 value,
127 relation_name=peer_relation_name)
128 else:
129 if peer_store_fatal:
130 raise ValueError('Unable to detect '
131 'peer relation {}'.format(peer_relation_name))
84132
=== modified file 'hooks/charmhelpers/contrib/ssl/service.py'
--- hooks/charmhelpers/contrib/ssl/service.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/ssl/service.py 2014-09-26 08:15:24 +0000
@@ -127,7 +127,7 @@
127 return self.get_certificate(common_name)127 return self.get_certificate(common_name)
128128
129 def get_certificate(self, common_name):129 def get_certificate(self, common_name):
130 if not common_name in self:130 if common_name not in self:
131 raise ValueError("No certificate for %s" % common_name)131 raise ValueError("No certificate for %s" % common_name)
132 key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)132 key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
133 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)133 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
134134
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-26 08:15:24 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-09-26 08:15:24 +0000
@@ -62,7 +62,7 @@
62 pvd = check_output(['pvdisplay', block_device]).splitlines()62 pvd = check_output(['pvdisplay', block_device]).splitlines()
63 for l in pvd:63 for l in pvd:
64 if l.strip().startswith('VG Name'):64 if l.strip().startswith('VG Name'):
65 vg = ' '.join(l.split()).split(' ').pop()65 vg = ' '.join(l.strip().split()[2:])
66 return vg66 return vg
6767
6868
6969
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-02 13:03:56 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-26 08:15:24 +0000
@@ -1,4 +1,5 @@
1from os import stat1import os
2import re
2from stat import S_ISBLK3from stat import S_ISBLK
34
4from subprocess import (5from subprocess import (
@@ -14,7 +15,9 @@
1415
15 :returns: boolean: True if path is a block device, False if not.16 :returns: boolean: True if path is a block device, False if not.
16 '''17 '''
17 return S_ISBLK(stat(path).st_mode)18 if not os.path.exists(path):
19 return False
20 return S_ISBLK(os.stat(path).st_mode)
1821
1922
20def zap_disk(block_device):23def zap_disk(block_device):
@@ -29,7 +32,22 @@
29 '--clear', block_device])32 '--clear', block_device])
30 dev_end = check_output(['blockdev', '--getsz', block_device])33 dev_end = check_output(['blockdev', '--getsz', block_device])
31 gpt_end = int(dev_end.split()[0]) - 10034 gpt_end = int(dev_end.split()[0]) - 100
32 check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),35 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
33 'bs=1M', 'count=1'])36 'bs=1M', 'count=1'])
34 check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
35 'bs=512', 'count=100', 'seek=%s'%(gpt_end)])38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
39
40
41def is_device_mounted(device):
42 '''Given a device path, return True if that device is mounted, and False
43 if it isn't.
44
45 :param device: str: Full path of the device to check.
46 :returns: boolean: True if the path represents a mounted device, False if
47 it doesn't.
48 '''
49 is_partition = bool(re.search(r".*[0-9]+\b", device))
50 out = check_output(['mount'])
51 if is_partition:
52 return bool(re.search(device + r"\b", out))
53 return bool(re.search(device + r"[0-9]+\b", out))
3654
=== added file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,116 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import os
7
8
9class Fstab(file):
10 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`
12 """
13
14 class Entry(object):
15 """Entry class represents a non-comment line on the `/etc/fstab` file
16 """
17 def __init__(self, device, mountpoint, filesystem,
18 options, d=0, p=0):
19 self.device = device
20 self.mountpoint = mountpoint
21 self.filesystem = filesystem
22
23 if not options:
24 options = "defaults"
25
26 self.options = options
27 self.d = d
28 self.p = p
29
30 def __eq__(self, o):
31 return str(self) == str(o)
32
33 def __str__(self):
34 return "{} {} {} {} {} {}".format(self.device,
35 self.mountpoint,
36 self.filesystem,
37 self.options,
38 self.d,
39 self.p)
40
41 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
42
43 def __init__(self, path=None):
44 if path:
45 self._path = path
46 else:
47 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')
49
50 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any
52 # whitespace including tabs
53 return Fstab.Entry(*filter(
54 lambda x: x not in ('', None),
55 line.strip("\n").split()))
56
57 @property
58 def entries(self):
59 self.seek(0)
60 for line in self.readlines():
61 try:
62 if not line.startswith("#"):
63 yield self._hydrate_entry(line)
64 except ValueError:
65 pass
66
67 def get_entry_by_attr(self, attr, value):
68 for entry in self.entries:
69 e_attr = getattr(entry, attr)
70 if e_attr == value:
71 return entry
72 return None
73
74 def add_entry(self, entry):
75 if self.get_entry_by_attr('device', entry.device):
76 return False
77
78 self.write(str(entry) + '\n')
79 self.truncate()
80 return entry
81
82 def remove_entry(self, entry):
83 self.seek(0)
84
85 lines = self.readlines()
86
87 found = False
88 for index, line in enumerate(lines):
89 if not line.startswith("#"):
90 if self._hydrate_entry(line) == entry:
91 found = True
92 break
93
94 if not found:
95 return False
96
97 lines.remove(line)
98
99 self.seek(0)
100 self.write(''.join(lines))
101 self.truncate()
102 return True
103
104 @classmethod
105 def remove_by_mountpoint(cls, mountpoint, path=None):
106 fstab = cls(path=path)
107 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
108 if entry:
109 return fstab.remove_entry(entry)
110 return False
111
112 @classmethod
113 def add(cls, device, mountpoint, filesystem, options=None, path=None):
114 return cls(path=path).add_entry(Fstab.Entry(device,
115 mountpoint, filesystem,
116 options=options))
0117
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-09-26 08:15:24 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -155,6 +155,121 @@
155 return os.path.basename(sys.argv[0])155 return os.path.basename(sys.argv[0])
156156
157157
158class Config(dict):
159 """A dictionary representation of the charm's config.yaml, with some
160 extra features:
161
162 - See which values in the dictionary have changed since the previous hook.
163 - For values that have changed, see what the previous value was.
164 - Store arbitrary data for use in a later hook.
165
166 NOTE: Do not instantiate this object directly - instead call
167 ``hookenv.config()``, which will return an instance of :class:`Config`.
168
169 Example usage::
170
171 >>> # inside a hook
172 >>> from charmhelpers.core import hookenv
173 >>> config = hookenv.config()
174 >>> config['foo']
175 'bar'
176 >>> # store a new key/value for later use
177 >>> config['mykey'] = 'myval'
178
179
180 >>> # user runs `juju set mycharm foo=baz`
181 >>> # now we're inside subsequent config-changed hook
182 >>> config = hookenv.config()
183 >>> config['foo']
184 'baz'
185 >>> # test to see if this val has changed since last hook
186 >>> config.changed('foo')
187 True
188 >>> # what was the previous value?
189 >>> config.previous('foo')
190 'bar'
191 >>> # keys/values that we add are preserved across hooks
192 >>> config['mykey']
193 'myval'
194
195 """
196 CONFIG_FILE_NAME = '.juju-persistent-config'
197
198 def __init__(self, *args, **kw):
199 super(Config, self).__init__(*args, **kw)
200 self.implicit_save = True
201 self._prev_dict = None
202 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
203 if os.path.exists(self.path):
204 self.load_previous()
205
206 def __getitem__(self, key):
207 """For regular dict lookups, check the current juju config first,
208 then the previous (saved) copy. This ensures that user-saved values
209 will be returned by a dict lookup.
210
211 """
212 try:
213 return dict.__getitem__(self, key)
214 except KeyError:
215 return (self._prev_dict or {})[key]
216
217 def load_previous(self, path=None):
218 """Load previous copy of config from disk.
219
220 In normal usage you don't need to call this method directly - it
221 is called automatically at object initialization.
222
223 :param path:
224
225 File path from which to load the previous config. If `None`,
226 config is loaded from the default location. If `path` is
227 specified, subsequent `save()` calls will write to the same
228 path.
229
230 """
231 self.path = path or self.path
232 with open(self.path) as f:
233 self._prev_dict = json.load(f)
234
235 def changed(self, key):
236 """Return True if the current value for this key is different from
237 the previous value.
238
239 """
240 if self._prev_dict is None:
241 return True
242 return self.previous(key) != self.get(key)
243
244 def previous(self, key):
245 """Return previous value for this key, or None if there
246 is no previous value.
247
248 """
249 if self._prev_dict:
250 return self._prev_dict.get(key)
251 return None
252
253 def save(self):
254 """Save this config to disk.
255
256 If the charm is using the :mod:`Services Framework <services.base>`
257 or :meth:'@hook <Hooks.hook>' decorator, this
258 is called automatically at the end of successful hook execution.
259 Otherwise, it should be called directly by user code.
260
261 To disable automatic saves, set ``implicit_save=False`` on this
262 instance.
263
264 """
265 if self._prev_dict:
266 for k, v in self._prev_dict.iteritems():
267 if k not in self:
268 self[k] = v
269 with open(self.path, 'w') as f:
270 json.dump(self, f)
271
272
158@cached273@cached
159def config(scope=None):274def config(scope=None):
160 """Juju charm configuration"""275 """Juju charm configuration"""
@@ -163,7 +278,10 @@
163 config_cmd_line.append(scope)278 config_cmd_line.append(scope)
164 config_cmd_line.append('--format=json')279 config_cmd_line.append('--format=json')
165 try:280 try:
166 return json.loads(subprocess.check_output(config_cmd_line))281 config_data = json.loads(subprocess.check_output(config_cmd_line))
282 if scope is not None:
283 return config_data
284 return Config(config_data)
167 except ValueError:285 except ValueError:
168 return None286 return None
169287
@@ -188,8 +306,9 @@
188 raise306 raise
189307
190308
191def relation_set(relation_id=None, relation_settings={}, **kwargs):309def relation_set(relation_id=None, relation_settings=None, **kwargs):
192 """Set relation information for the current unit"""310 """Set relation information for the current unit"""
311 relation_settings = relation_settings if relation_settings else {}
193 relation_cmd_line = ['relation-set']312 relation_cmd_line = ['relation-set']
194 if relation_id is not None:313 if relation_id is not None:
195 relation_cmd_line.extend(('-r', relation_id))314 relation_cmd_line.extend(('-r', relation_id))
@@ -348,27 +467,29 @@
348class Hooks(object):467class Hooks(object):
349 """A convenient handler for hook functions.468 """A convenient handler for hook functions.
350469
351 Example:470 Example::
471
352 hooks = Hooks()472 hooks = Hooks()
353473
354 # register a hook, taking its name from the function name474 # register a hook, taking its name from the function name
355 @hooks.hook()475 @hooks.hook()
356 def install():476 def install():
357 ...477 pass # your code here
358478
359 # register a hook, providing a custom hook name479 # register a hook, providing a custom hook name
360 @hooks.hook("config-changed")480 @hooks.hook("config-changed")
361 def config_changed():481 def config_changed():
362 ...482 pass # your code here
363483
364 if __name__ == "__main__":484 if __name__ == "__main__":
365 # execute a hook based on the name the program is called by485 # execute a hook based on the name the program is called by
366 hooks.execute(sys.argv)486 hooks.execute(sys.argv)
367 """487 """
368488
369 def __init__(self):489 def __init__(self, config_save=True):
370 super(Hooks, self).__init__()490 super(Hooks, self).__init__()
371 self._hooks = {}491 self._hooks = {}
492 self._config_save = config_save
372493
373 def register(self, name, function):494 def register(self, name, function):
374 """Register a hook"""495 """Register a hook"""
@@ -379,6 +500,10 @@
379 hook_name = os.path.basename(args[0])500 hook_name = os.path.basename(args[0])
380 if hook_name in self._hooks:501 if hook_name in self._hooks:
381 self._hooks[hook_name]()502 self._hooks[hook_name]()
503 if self._config_save:
504 cfg = config()
505 if cfg.implicit_save:
506 cfg.save()
382 else:507 else:
383 raise UnregisteredHookError(hook_name)508 raise UnregisteredHookError(hook_name)
384509
385510
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/core/host.py 2014-09-26 08:15:24 +0000
@@ -12,10 +12,13 @@
12import string12import string
13import subprocess13import subprocess
14import hashlib14import hashlib
15import shutil
16from contextlib import contextmanager
1517
16from collections import OrderedDict18from collections import OrderedDict
1719
18from hookenv import log20from hookenv import log
21from fstab import Fstab
1922
2023
21def service_start(service_name):24def service_start(service_name):
@@ -34,7 +37,8 @@
3437
3538
36def service_reload(service_name, restart_on_failure=False):39def service_reload(service_name, restart_on_failure=False):
37 """Reload a system service, optionally falling back to restart if reload fails"""40 """Reload a system service, optionally falling back to restart if
41 reload fails"""
38 service_result = service('reload', service_name)42 service_result = service('reload', service_name)
39 if not service_result and restart_on_failure:43 if not service_result and restart_on_failure:
40 service_result = service('restart', service_name)44 service_result = service('restart', service_name)
@@ -50,7 +54,7 @@
50def service_running(service):54def service_running(service):
51 """Determine whether a system service is running"""55 """Determine whether a system service is running"""
52 try:56 try:
53 output = subprocess.check_output(['service', service, 'status'])57 output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
54 except subprocess.CalledProcessError:58 except subprocess.CalledProcessError:
55 return False59 return False
56 else:60 else:
@@ -60,6 +64,16 @@
60 return False64 return False
6165
6266
67def service_available(service_name):
68 """Determine whether a system service is available"""
69 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
71 except subprocess.CalledProcessError as e:
72 return 'unrecognized service' not in e.output
73 else:
74 return True
75
76
63def adduser(username, password=None, shell='/bin/bash', system_user=False):77def adduser(username, password=None, shell='/bin/bash', system_user=False):
64 """Add a user to the system"""78 """Add a user to the system"""
65 try:79 try:
@@ -143,7 +157,19 @@
143 target.write(content)157 target.write(content)
144158
145159
146def mount(device, mountpoint, options=None, persist=False):160def fstab_remove(mp):
161 """Remove the given mountpoint entry from /etc/fstab
162 """
163 return Fstab.remove_by_mountpoint(mp)
164
165
166def fstab_add(dev, mp, fs, options=None):
167 """Adds the given device entry to the /etc/fstab file
168 """
169 return Fstab.add(dev, mp, fs, options=options)
170
171
172def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
147 """Mount a filesystem at a particular mountpoint"""173 """Mount a filesystem at a particular mountpoint"""
148 cmd_args = ['mount']174 cmd_args = ['mount']
149 if options is not None:175 if options is not None:
@@ -154,9 +180,9 @@
154 except subprocess.CalledProcessError, e:180 except subprocess.CalledProcessError, e:
155 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))181 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
156 return False182 return False
183
157 if persist:184 if persist:
158 # TODO: update fstab185 return fstab_add(device, mountpoint, filesystem, options=options)
159 pass
160 return True186 return True
161187
162188
@@ -168,9 +194,9 @@
168 except subprocess.CalledProcessError, e:194 except subprocess.CalledProcessError, e:
169 log('Error unmounting {}\n{}'.format(mountpoint, e.output))195 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
170 return False196 return False
197
171 if persist:198 if persist:
172 # TODO: update fstab199 return fstab_remove(mountpoint)
173 pass
174 return True200 return True
175201
176202
@@ -183,10 +209,15 @@
183 return system_mounts209 return system_mounts
184210
185211
186def file_hash(path):212def file_hash(path, hash_type='md5'):
187 """Generate a md5 hash of the contents of 'path' or None if not found """213 """
214 Generate a hash checksum of the contents of 'path' or None if not found.
215
216 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
217 such as md5, sha1, sha256, sha512, etc.
218 """
188 if os.path.exists(path):219 if os.path.exists(path):
189 h = hashlib.md5()220 h = getattr(hashlib, hash_type)()
190 with open(path, 'r') as source:221 with open(path, 'r') as source:
191 h.update(source.read()) # IGNORE:E1101 - it does have update222 h.update(source.read()) # IGNORE:E1101 - it does have update
192 return h.hexdigest()223 return h.hexdigest()
@@ -194,16 +225,36 @@
194 return None225 return None
195226
196227
228def check_hash(path, checksum, hash_type='md5'):
229 """
230 Validate a file using a cryptographic checksum.
231
232 :param str checksum: Value of the checksum used to validate the file.
233 :param str hash_type: Hash algorithm used to generate `checksum`.
234 Can be any hash alrgorithm supported by :mod:`hashlib`,
235 such as md5, sha1, sha256, sha512, etc.
236 :raises ChecksumError: If the file fails the checksum
237
238 """
239 actual_checksum = file_hash(path, hash_type)
240 if checksum != actual_checksum:
241 raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
242
243
244class ChecksumError(ValueError):
245 pass
246
247
197def restart_on_change(restart_map, stopstart=False):248def restart_on_change(restart_map, stopstart=False):
198 """Restart services based on configuration files changing249 """Restart services based on configuration files changing
199250
200 This function is used a decorator, for example251 This function is used a decorator, for example::
201252
202 @restart_on_change({253 @restart_on_change({
203 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]254 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
204 })255 })
205 def ceph_client_changed():256 def ceph_client_changed():
206 ...257 pass # your code here
207258
208 In this example, the cinder-api and cinder-volume services259 In this example, the cinder-api and cinder-volume services
209 would be restarted if /etc/ceph/ceph.conf is changed by the260 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -295,3 +346,40 @@
295 if 'link/ether' in words:346 if 'link/ether' in words:
296 hwaddr = words[words.index('link/ether') + 1]347 hwaddr = words[words.index('link/ether') + 1]
297 return hwaddr348 return hwaddr
349
350
351def cmp_pkgrevno(package, revno, pkgcache=None):
352 '''Compare supplied revno with the revno of the installed package
353
354 * 1 => Installed revno is greater than supplied arg
355 * 0 => Installed revno is the same as supplied arg
356 * -1 => Installed revno is less than supplied arg
357
358 '''
359 import apt_pkg
360 from charmhelpers.fetch import apt_cache
361 if not pkgcache:
362 pkgcache = apt_cache()
363 pkg = pkgcache[package]
364 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
365
366
367@contextmanager
368def chdir(d):
369 cur = os.getcwd()
370 try:
371 yield os.chdir(d)
372 finally:
373 os.chdir(cur)
374
375
376def chownr(path, owner, group):
377 uid = pwd.getpwnam(owner).pw_uid
378 gid = grp.getgrnam(group).gr_gid
379
380 for root, dirs, files in os.walk(path):
381 for name in dirs + files:
382 full = os.path.join(root, name)
383 broken_symlink = os.path.lexists(full) and not os.path.exists(full)
384 if not broken_symlink:
385 os.chown(full, uid, gid)
298386
=== added directory 'hooks/charmhelpers/core/services'
=== added file 'hooks/charmhelpers/core/services/__init__.py'
--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/__init__.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,2 @@
1from .base import *
2from .helpers import *
03
=== added file 'hooks/charmhelpers/core/services/base.py'
--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/base.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,313 @@
1import os
2import re
3import json
4from collections import Iterable
5
6from charmhelpers.core import host
7from charmhelpers.core import hookenv
8
9
10__all__ = ['ServiceManager', 'ManagerCallback',
11 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
12 'service_restart', 'service_stop']
13
14
15class ServiceManager(object):
16 def __init__(self, services=None):
17 """
18 Register a list of services, given their definitions.
19
20 Service definitions are dicts in the following formats (all keys except
21 'service' are optional)::
22
23 {
24 "service": <service name>,
25 "required_data": <list of required data contexts>,
26 "provided_data": <list of provided data contexts>,
27 "data_ready": <one or more callbacks>,
28 "data_lost": <one or more callbacks>,
29 "start": <one or more callbacks>,
30 "stop": <one or more callbacks>,
31 "ports": <list of ports to manage>,
32 }
33
34 The 'required_data' list should contain dicts of required data (or
35 dependency managers that act like dicts and know how to collect the data).
36 Only when all items in the 'required_data' list are populated are the list
37 of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
38 information.
39
40 The 'provided_data' list should contain relation data providers, most likely
41 a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
42 that will indicate a set of data to set on a given relation.
43
44 The 'data_ready' value should be either a single callback, or a list of
45 callbacks, to be called when all items in 'required_data' pass `is_ready()`.
46 Each callback will be called with the service name as the only parameter.
47 After all of the 'data_ready' callbacks are called, the 'start' callbacks
48 are fired.
49
50 The 'data_lost' value should be either a single callback, or a list of
51 callbacks, to be called when a 'required_data' item no longer passes
52 `is_ready()`. Each callback will be called with the service name as the
53 only parameter. After all of the 'data_lost' callbacks are called,
54 the 'stop' callbacks are fired.
55
56 The 'start' value should be either a single callback, or a list of
57 callbacks, to be called when starting the service, after the 'data_ready'
58 callbacks are complete. Each callback will be called with the service
59 name as the only parameter. This defaults to
60 `[host.service_start, services.open_ports]`.
61
62 The 'stop' value should be either a single callback, or a list of
63 callbacks, to be called when stopping the service. If the service is
64 being stopped because it no longer has all of its 'required_data', this
65 will be called after all of the 'data_lost' callbacks are complete.
66 Each callback will be called with the service name as the only parameter.
67 This defaults to `[services.close_ports, host.service_stop]`.
68
69 The 'ports' value should be a list of ports to manage. The default
70 'start' handler will open the ports after the service is started,
71 and the default 'stop' handler will close the ports prior to stopping
72 the service.
73
74
75 Examples:
76
77 The following registers an Upstart service called bingod that depends on
78 a mongodb relation and which runs a custom `db_migrate` function prior to
79 restarting the service, and a Runit service called spadesd::
80
81 manager = services.ServiceManager([
82 {
83 'service': 'bingod',
84 'ports': [80, 443],
85 'required_data': [MongoRelation(), config(), {'my': 'data'}],
86 'data_ready': [
87 services.template(source='bingod.conf'),
88 services.template(source='bingod.ini',
89 target='/etc/bingod.ini',
90 owner='bingo', perms=0400),
91 ],
92 },
93 {
94 'service': 'spadesd',
95 'data_ready': services.template(source='spadesd_run.j2',
96 target='/etc/sv/spadesd/run',
97 perms=0555),
98 'start': runit_start,
99 'stop': runit_stop,
100 },
101 ])
102 manager.manage()
103 """
104 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
105 self._ready = None
106 self.services = {}
107 for service in services or []:
108 service_name = service['service']
109 self.services[service_name] = service
110
111 def manage(self):
112 """
113 Handle the current hook by doing The Right Thing with the registered services.
114 """
115 hook_name = hookenv.hook_name()
116 if hook_name == 'stop':
117 self.stop_services()
118 else:
119 self.provide_data()
120 self.reconfigure_services()
121 cfg = hookenv.config()
122 if cfg.implicit_save:
123 cfg.save()
124
125 def provide_data(self):
126 """
127 Set the relation data for each provider in the ``provided_data`` list.
128
129 A provider must have a `name` attribute, which indicates which relation
130 to set data on, and a `provide_data()` method, which returns a dict of
131 data to set.
132 """
133 hook_name = hookenv.hook_name()
134 for service in self.services.values():
135 for provider in service.get('provided_data', []):
136 if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
137 data = provider.provide_data()
138 _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
139 if _ready:
140 hookenv.relation_set(None, data)
141
142 def reconfigure_services(self, *service_names):
143 """
144 Update all files for one or more registered services, and,
145 if ready, optionally restart them.
146
147 If no service names are given, reconfigures all registered services.
148 """
149 for service_name in service_names or self.services.keys():
150 if self.is_ready(service_name):
151 self.fire_event('data_ready', service_name)
152 self.fire_event('start', service_name, default=[
153 service_restart,
154 manage_ports])
155 self.save_ready(service_name)
156 else:
157 if self.was_ready(service_name):
158 self.fire_event('data_lost', service_name)
159 self.fire_event('stop', service_name, default=[
160 manage_ports,
161 service_stop])
162 self.save_lost(service_name)
163
164 def stop_services(self, *service_names):
165 """
166 Stop one or more registered services, by name.
167
168 If no service names are given, stops all registered services.
169 """
170 for service_name in service_names or self.services.keys():
171 self.fire_event('stop', service_name, default=[
172 manage_ports,
173 service_stop])
174
175 def get_service(self, service_name):
176 """
177 Given the name of a registered service, return its service definition.
178 """
179 service = self.services.get(service_name)
180 if not service:
181 raise KeyError('Service not registered: %s' % service_name)
182 return service
183
184 def fire_event(self, event_name, service_name, default=None):
185 """
186 Fire a data_ready, data_lost, start, or stop event on a given service.
187 """
188 service = self.get_service(service_name)
189 callbacks = service.get(event_name, default)
190 if not callbacks:
191 return
192 if not isinstance(callbacks, Iterable):
193 callbacks = [callbacks]
194 for callback in callbacks:
195 if isinstance(callback, ManagerCallback):
196 callback(self, service_name, event_name)
197 else:
198 callback(service_name)
199
200 def is_ready(self, service_name):
201 """
202 Determine if a registered service is ready, by checking its 'required_data'.
203
204 A 'required_data' item can be any mapping type, and is considered ready
205 if `bool(item)` evaluates as True.
206 """
207 service = self.get_service(service_name)
208 reqs = service.get('required_data', [])
209 return all(bool(req) for req in reqs)
210
211 def _load_ready_file(self):
212 if self._ready is not None:
213 return
214 if os.path.exists(self._ready_file):
215 with open(self._ready_file) as fp:
216 self._ready = set(json.load(fp))
217 else:
218 self._ready = set()
219
220 def _save_ready_file(self):
221 if self._ready is None:
222 return
223 with open(self._ready_file, 'w') as fp:
224 json.dump(list(self._ready), fp)
225
226 def save_ready(self, service_name):
227 """
228 Save an indicator that the given service is now data_ready.
229 """
230 self._load_ready_file()
231 self._ready.add(service_name)
232 self._save_ready_file()
233
234 def save_lost(self, service_name):
235 """
236 Save an indicator that the given service is no longer data_ready.
237 """
238 self._load_ready_file()
239 self._ready.discard(service_name)
240 self._save_ready_file()
241
242 def was_ready(self, service_name):
243 """
244 Determine if the given service was previously data_ready.
245 """
246 self._load_ready_file()
247 return service_name in self._ready
248
249
250class ManagerCallback(object):
251 """
252 Special case of a callback that takes the `ServiceManager` instance
253 in addition to the service name.
254
255 Subclasses should implement `__call__` which should accept three parameters:
256
257 * `manager` The `ServiceManager` instance
258 * `service_name` The name of the service it's being triggered for
259 * `event_name` The name of the event that this callback is handling
260 """
261 def __call__(self, manager, service_name, event_name):
262 raise NotImplementedError()
263
264
265class PortManagerCallback(ManagerCallback):
266 """
267 Callback class that will open or close ports, for use as either
268 a start or stop action.
269 """
270 def __call__(self, manager, service_name, event_name):
271 service = manager.get_service(service_name)
272 new_ports = service.get('ports', [])
273 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
274 if os.path.exists(port_file):
275 with open(port_file) as fp:
276 old_ports = fp.read().split(',')
277 for old_port in old_ports:
278 if bool(old_port):
279 old_port = int(old_port)
280 if old_port not in new_ports:
281 hookenv.close_port(old_port)
282 with open(port_file, 'w') as fp:
283 fp.write(','.join(str(port) for port in new_ports))
284 for port in new_ports:
285 if event_name == 'start':
286 hookenv.open_port(port)
287 elif event_name == 'stop':
288 hookenv.close_port(port)
289
290
291def service_stop(service_name):
292 """
293 Wrapper around host.service_stop to prevent spurious "unknown service"
294 messages in the logs.
295 """
296 if host.service_running(service_name):
297 host.service_stop(service_name)
298
299
300def service_restart(service_name):
301 """
302 Wrapper around host.service_restart to prevent spurious "unknown service"
303 messages in the logs.
304 """
305 if host.service_available(service_name):
306 if host.service_running(service_name):
307 host.service_restart(service_name)
308 else:
309 host.service_start(service_name)
310
311
312# Convenience aliases
313open_ports = close_ports = manage_ports = PortManagerCallback()
0314
=== added file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,239 @@
1import os
2import yaml
3from charmhelpers.core import hookenv
4from charmhelpers.core import templating
5
6from charmhelpers.core.services.base import ManagerCallback
7
8
9__all__ = ['RelationContext', 'TemplateCallback',
10 'render_template', 'template']
11
12
13class RelationContext(dict):
14 """
15 Base class for a context generator that gets relation data from juju.
16
17 Subclasses must provide the attributes `name`, which is the name of the
18 interface of interest, `interface`, which is the type of the interface of
19 interest, and `required_keys`, which is the set of keys required for the
20 relation to be considered complete. The data for all interfaces matching
21 the `name` attribute that are complete will used to populate the dictionary
22 values (see `get_data`, below).
23
24 The generated context will be namespaced under the relation :attr:`name`,
25 to prevent potential naming conflicts.
26
27 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
28 :param list additional_required_keys: Extend the list of :attr:`required_keys`
29 """
30 name = None
31 interface = None
32 required_keys = []
33
34 def __init__(self, name=None, additional_required_keys=None):
35 if name is not None:
36 self.name = name
37 if additional_required_keys is not None:
38 self.required_keys.extend(additional_required_keys)
39 self.get_data()
40
41 def __bool__(self):
42 """
43 Returns True if all of the required_keys are available.
44 """
45 return self.is_ready()
46
47 __nonzero__ = __bool__
48
49 def __repr__(self):
50 return super(RelationContext, self).__repr__()
51
52 def is_ready(self):
53 """
54 Returns True if all of the `required_keys` are available from any units.
55 """
56 ready = len(self.get(self.name, [])) > 0
57 if not ready:
58 hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
59 return ready
60
61 def _is_ready(self, unit_data):
62 """
63 Helper method that tests a set of relation data and returns True if
64 all of the `required_keys` are present.
65 """
66 return set(unit_data.keys()).issuperset(set(self.required_keys))
67
68 def get_data(self):
69 """
70 Retrieve the relation data for each unit involved in a relation and,
71 if complete, store it in a list under `self[self.name]`. This
72 is automatically called when the RelationContext is instantiated.
73
74 The units are sorted lexographically first by the service ID, then by
75 the unit ID. Thus, if an interface has two other services, 'db:1'
76 and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
77 and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
78 set of data, the relation data for the units will be stored in the
79 order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
80
81 If you only care about a single unit on the relation, you can just
82 access it as `{{ interface[0]['key'] }}`. However, if you can at all
83 support multiple units on a relation, you should iterate over the list,
84 like::
85
86 {% for unit in interface -%}
87 {{ unit['key'] }}{% if not loop.last %},{% endif %}
88 {%- endfor %}
89
90 Note that since all sets of relation data from all related services and
91 units are in a single list, if you need to know which service or unit a
92 set of data came from, you'll need to extend this class to preserve
93 that information.
94 """
95 if not hookenv.relation_ids(self.name):
96 return
97
98 ns = self.setdefault(self.name, [])
99 for rid in sorted(hookenv.relation_ids(self.name)):
100 for unit in sorted(hookenv.related_units(rid)):
101 reldata = hookenv.relation_get(rid=rid, unit=unit)
102 if self._is_ready(reldata):
103 ns.append(reldata)
104
105 def provide_data(self):
106 """
107 Return data to be relation_set for this interface.
108 """
109 return {}
110
111
112class MysqlRelation(RelationContext):
113 """
114 Relation context for the `mysql` interface.
115
116 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
117 :param list additional_required_keys: Extend the list of :attr:`required_keys`
118 """
119 name = 'db'
120 interface = 'mysql'
121 required_keys = ['host', 'user', 'password', 'database']
122
123
124class HttpRelation(RelationContext):
125 """
126 Relation context for the `http` interface.
127
128 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
129 :param list additional_required_keys: Extend the list of :attr:`required_keys`
130 """
131 name = 'website'
132 interface = 'http'
133 required_keys = ['host', 'port']
134
135 def provide_data(self):
136 return {
137 'host': hookenv.unit_get('private-address'),
138 'port': 80,
139 }
140
141
142class RequiredConfig(dict):
143 """
144 Data context that loads config options with one or more mandatory options.
145
146 Once the required options have been changed from their default values, all
147 config options will be available, namespaced under `config` to prevent
148 potential naming conflicts (for example, between a config option and a
149 relation property).
150
151 :param list *args: List of options that must be changed from their default values.
152 """
153
154 def __init__(self, *args):
155 self.required_options = args
156 self['config'] = hookenv.config()
157 with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
158 self.config = yaml.load(fp).get('options', {})
159
160 def __bool__(self):
161 for option in self.required_options:
162 if option not in self['config']:
163 return False
164 current_value = self['config'][option]
165 default_value = self.config[option].get('default')
166 if current_value == default_value:
167 return False
168 if current_value in (None, '') and default_value in (None, ''):
169 return False
170 return True
171
172 def __nonzero__(self):
173 return self.__bool__()
174
175
176class StoredContext(dict):
177 """
178 A data context that always returns the data that it was first created with.
179
180 This is useful to do a one-time generation of things like passwords, that
181 will thereafter use the same value that was originally generated, instead
182 of generating a new value each time it is run.
183 """
184 def __init__(self, file_name, config_data):
185 """
186 If the file exists, populate `self` with the data from the file.
187 Otherwise, populate with the given data and persist it to the file.
188 """
189 if os.path.exists(file_name):
190 self.update(self.read_context(file_name))
191 else:
192 self.store_context(file_name, config_data)
193 self.update(config_data)
194
195 def store_context(self, file_name, config_data):
196 if not os.path.isabs(file_name):
197 file_name = os.path.join(hookenv.charm_dir(), file_name)
198 with open(file_name, 'w') as file_stream:
199 os.fchmod(file_stream.fileno(), 0600)
200 yaml.dump(config_data, file_stream)
201
202 def read_context(self, file_name):
203 if not os.path.isabs(file_name):
204 file_name = os.path.join(hookenv.charm_dir(), file_name)
205 with open(file_name, 'r') as file_stream:
206 data = yaml.load(file_stream)
207 if not data:
208 raise OSError("%s is empty" % file_name)
209 return data
210
211
212class TemplateCallback(ManagerCallback):
213 """
214 Callback class that will render a Jinja2 template, for use as a ready action.
215
216 :param str source: The template source file, relative to `$CHARM_DIR/templates`
217 :param str target: The target to write the rendered template to
218 :param str owner: The owner of the rendered file
219 :param str group: The group of the rendered file
220 :param int perms: The permissions of the rendered file
221 """
222 def __init__(self, source, target, owner='root', group='root', perms=0444):
223 self.source = source
224 self.target = target
225 self.owner = owner
226 self.group = group
227 self.perms = perms
228
229 def __call__(self, manager, service_name, event_name):
230 service = manager.get_service(service_name)
231 context = {}
232 for ctx in service.get('required_data', []):
233 context.update(ctx)
234 templating.render(self.source, self.target, context,
235 self.owner, self.group, self.perms)
236
237
238# Convenience aliases for templates
239render_template = template = TemplateCallback
0240
=== added file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/templating.py 2014-09-26 08:15:24 +0000
@@ -0,0 +1,51 @@
1import os
2
3from charmhelpers.core import host
4from charmhelpers.core import hookenv
5
6
7def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
8 """
9 Render a template.
10
11 The `source` path, if not absolute, is relative to the `templates_dir`.
12
13 The `target` path should be absolute.
14
15 The context should be a dict containing the values to be replaced in the
16 template.
17
18 The `owner`, `group`, and `perms` options will be passed to `write_file`.
19
20 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
21
22 Note: Using this requires python-jinja2; if it is not installed, calling
23 this will attempt to use charmhelpers.fetch.apt_install to install it.
24 """
25 try:
26 from jinja2 import FileSystemLoader, Environment, exceptions
27 except ImportError:
28 try:
29 from charmhelpers.fetch import apt_install
30 except ImportError:
31 hookenv.log('Could not import jinja2, and could not import '
32 'charmhelpers.fetch to install it',
33 level=hookenv.ERROR)
34 raise
35 apt_install('python-jinja2', fatal=True)
36 from jinja2 import FileSystemLoader, Environment, exceptions
37
38 if templates_dir is None:
39 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
40 loader = Environment(loader=FileSystemLoader(templates_dir))
41 try:
42 source = source
43 template = loader.get_template(source)
44 except exceptions.TemplateNotFound as e:
45 hookenv.log('Could not load template %s from %s.' %
46 (source, templates_dir),
47 level=hookenv.ERROR)
48 raise e
49 content = template.render(context)
50 host.mkdir(os.path.dirname(target))
51 host.write_file(target, content, owner, group, perms)
052
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-05-02 13:03:56 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-09-26 08:15:24 +0000
@@ -1,4 +1,6 @@
1import importlib1import importlib
2from tempfile import NamedTemporaryFile
3import time
2from yaml import safe_load4from yaml import safe_load
3from charmhelpers.core.host import (5from charmhelpers.core.host import (
4 lsb_release6 lsb_release
@@ -12,9 +14,9 @@
12 config,14 config,
13 log,15 log,
14)16)
15import apt_pkg
16import os17import os
1718
19
18CLOUD_ARCHIVE = """# Ubuntu Cloud Archive20CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
19deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main21deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
20"""22"""
@@ -54,13 +56,68 @@
54 'icehouse/proposed': 'precise-proposed/icehouse',56 'icehouse/proposed': 'precise-proposed/icehouse',
55 'precise-icehouse/proposed': 'precise-proposed/icehouse',57 'precise-icehouse/proposed': 'precise-proposed/icehouse',
56 'precise-proposed/icehouse': 'precise-proposed/icehouse',58 'precise-proposed/icehouse': 'precise-proposed/icehouse',
59 # Juno
60 'juno': 'trusty-updates/juno',
61 'trusty-juno': 'trusty-updates/juno',
62 'trusty-juno/updates': 'trusty-updates/juno',
63 'trusty-updates/juno': 'trusty-updates/juno',
64 'juno/proposed': 'trusty-proposed/juno',
65 'juno/proposed': 'trusty-proposed/juno',
66 'trusty-juno/proposed': 'trusty-proposed/juno',
67 'trusty-proposed/juno': 'trusty-proposed/juno',
57}68}
5869
70# The order of this list is very important. Handlers should be listed in from
71# least- to most-specific URL matching.
72FETCH_HANDLERS = (
73 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
74 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
75)
76
77APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
78APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
79APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
80
81
82class SourceConfigError(Exception):
83 pass
84
85
86class UnhandledSource(Exception):
87 pass
88
89
90class AptLockError(Exception):
91 pass
92
93
94class BaseFetchHandler(object):
95
96 """Base class for FetchHandler implementations in fetch plugins"""
97
98 def can_handle(self, source):
99 """Returns True if the source can be handled. Otherwise returns
100 a string explaining why it cannot"""
101 return "Wrong source type"
102
103 def install(self, source):
104 """Try to download and unpack the source. Return the path to the
105 unpacked files or raise UnhandledSource."""
106 raise UnhandledSource("Wrong source type {}".format(source))
107
108 def parse_url(self, url):
109 return urlparse(url)
110
111 def base_url(self, url):
112 """Return url without querystring or fragment"""
113 parts = list(self.parse_url(url))
114 parts[4:] = ['' for i in parts[4:]]
115 return urlunparse(parts)
116
59117
60def filter_installed_packages(packages):118def filter_installed_packages(packages):
61 """Returns a list of packages that require installation"""119 """Returns a list of packages that require installation"""
62 apt_pkg.init()120 cache = apt_cache()
63 cache = apt_pkg.Cache()
64 _pkgs = []121 _pkgs = []
65 for package in packages:122 for package in packages:
66 try:123 try:
@@ -73,6 +130,16 @@
73 return _pkgs130 return _pkgs
74131
75132
133def apt_cache(in_memory=True):
134 """Build and return an apt cache"""
135 import apt_pkg
136 apt_pkg.init()
137 if in_memory:
138 apt_pkg.config.set("Dir::Cache::pkgcache", "")
139 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
140 return apt_pkg.Cache()
141
142
76def apt_install(packages, options=None, fatal=False):143def apt_install(packages, options=None, fatal=False):
77 """Install one or more packages"""144 """Install one or more packages"""
78 if options is None:145 if options is None:
@@ -87,14 +154,7 @@
87 cmd.extend(packages)154 cmd.extend(packages)
88 log("Installing {} with options: {}".format(packages,155 log("Installing {} with options: {}".format(packages,
89 options))156 options))
90 env = os.environ.copy()157 _run_apt_command(cmd, fatal)
91 if 'DEBIAN_FRONTEND' not in env:
92 env['DEBIAN_FRONTEND'] = 'noninteractive'
93
94 if fatal:
95 subprocess.check_call(cmd, env=env)
96 else:
97 subprocess.call(cmd, env=env)
98158
99159
100def apt_upgrade(options=None, fatal=False, dist=False):160def apt_upgrade(options=None, fatal=False, dist=False):
@@ -109,24 +169,13 @@
109 else:169 else:
110 cmd.append('upgrade')170 cmd.append('upgrade')
111 log("Upgrading with options: {}".format(options))171 log("Upgrading with options: {}".format(options))
112172 _run_apt_command(cmd, fatal)
113 env = os.environ.copy()
114 if 'DEBIAN_FRONTEND' not in env:
115 env['DEBIAN_FRONTEND'] = 'noninteractive'
116
117 if fatal:
118 subprocess.check_call(cmd, env=env)
119 else:
120 subprocess.call(cmd, env=env)
121173
122174
123def apt_update(fatal=False):175def apt_update(fatal=False):
124 """Update local apt cache"""176 """Update local apt cache"""
125 cmd = ['apt-get', 'update']177 cmd = ['apt-get', 'update']
126 if fatal:178 _run_apt_command(cmd, fatal)
127 subprocess.check_call(cmd)
128 else:
129 subprocess.call(cmd)
130179
131180
132def apt_purge(packages, fatal=False):181def apt_purge(packages, fatal=False):
@@ -137,10 +186,7 @@
137 else:186 else:
138 cmd.extend(packages)187 cmd.extend(packages)
139 log("Purging {}".format(packages))188 log("Purging {}".format(packages))
140 if fatal:189 _run_apt_command(cmd, fatal)
141 subprocess.check_call(cmd)
142 else:
143 subprocess.call(cmd)
144190
145191
146def apt_hold(packages, fatal=False):192def apt_hold(packages, fatal=False):
@@ -151,6 +197,7 @@
151 else:197 else:
152 cmd.extend(packages)198 cmd.extend(packages)
153 log("Holding {}".format(packages))199 log("Holding {}".format(packages))
200
154 if fatal:201 if fatal:
155 subprocess.check_call(cmd)202 subprocess.check_call(cmd)
156 else:203 else:
@@ -158,6 +205,28 @@
158205
159206
160def add_source(source, key=None):207def add_source(source, key=None):
208 """Add a package source to this system.
209
210 @param source: a URL or sources.list entry, as supported by
211 add-apt-repository(1). Examples::
212
213 ppa:charmers/example
214 deb https://stub:key@private.example.com/ubuntu trusty main
215
216 In addition:
217 'proposed:' may be used to enable the standard 'proposed'
218 pocket for the release.
219 'cloud:' may be used to activate official cloud archive pockets,
220 such as 'cloud:icehouse'
221
222 @param key: A key to be added to the system's APT keyring and used
223 to verify the signatures on packages. Ideally, this should be an
224 ASCII format GPG public key including the block headers. A GPG key
225 id may also be used, but be aware that only insecure protocols are
226 available to retrieve the actual public key from a public keyserver
227 placing your Juju environment at risk. ppa and cloud archive keys
228 are securely added automtically, so sould not be provided.
229 """
161 if source is None:230 if source is None:
162 log('Source is not present. Skipping')231 log('Source is not present. Skipping')
163 return232 return
@@ -182,76 +251,96 @@
182 release = lsb_release()['DISTRIB_CODENAME']251 release = lsb_release()['DISTRIB_CODENAME']
183 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:252 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
184 apt.write(PROPOSED_POCKET.format(release))253 apt.write(PROPOSED_POCKET.format(release))
254 else:
255 raise SourceConfigError("Unknown source: {!r}".format(source))
256
185 if key:257 if key:
186 subprocess.check_call(['apt-key', 'adv', '--keyserver',258 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
187 'hkp://keyserver.ubuntu.com:80', '--recv',259 with NamedTemporaryFile() as key_file:
188 key])260 key_file.write(key)
189261 key_file.flush()
190262 key_file.seek(0)
191class SourceConfigError(Exception):263 subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
192 pass264 else:
265 # Note that hkp: is in no way a secure protocol. Using a
266 # GPG key id is pointless from a security POV unless you
267 # absolutely trust your network and DNS.
268 subprocess.check_call(['apt-key', 'adv', '--keyserver',
269 'hkp://keyserver.ubuntu.com:80', '--recv',
270 key])
193271
194272
195def configure_sources(update=False,273def configure_sources(update=False,
196 sources_var='install_sources',274 sources_var='install_sources',
197 keys_var='install_keys'):275 keys_var='install_keys'):
198 """276 """
199 Configure multiple sources from charm configuration277 Configure multiple sources from charm configuration.
278
279 The lists are encoded as yaml fragments in the configuration.
280 The frament needs to be included as a string. Sources and their
281 corresponding keys are of the types supported by add_source().
200282
201 Example config:283 Example config:
202 install_sources:284 install_sources: |
203 - "ppa:foo"285 - "ppa:foo"
204 - "http://example.com/repo precise main"286 - "http://example.com/repo precise main"
205 install_keys:287 install_keys: |
206 - null288 - null
207 - "a1b2c3d4"289 - "a1b2c3d4"
208290
209 Note that 'null' (a.k.a. None) should not be quoted.291 Note that 'null' (a.k.a. None) should not be quoted.
210 """292 """
211 sources = safe_load(config(sources_var))293 sources = safe_load((config(sources_var) or '').strip()) or []
212 keys = config(keys_var)294 keys = safe_load((config(keys_var) or '').strip()) or None
213 if keys is not None:295
214 keys = safe_load(keys)296 if isinstance(sources, basestring):
215 if isinstance(sources, basestring) and (297 sources = [sources]
216 keys is None or isinstance(keys, basestring)):298
217 add_source(sources, keys)299 if keys is None:
300 for source in sources:
301 add_source(source, None)
218 else:302 else:
219 if not len(sources) == len(keys):303 if isinstance(keys, basestring):
220 msg = 'Install sources and keys lists are different lengths'304 keys = [keys]
221 raise SourceConfigError(msg)305
222 for src_num in range(len(sources)):306 if len(sources) != len(keys):
223 add_source(sources[src_num], keys[src_num])307 raise SourceConfigError(
308 'Install sources and keys lists are different lengths')
309 for source, key in zip(sources, keys):
310 add_source(source, key)
224 if update:311 if update:
225 apt_update(fatal=True)312 apt_update(fatal=True)
226313
227# The order of this list is very important. Handlers should be listed in from314
228# least- to most-specific URL matching.315def install_remote(source, *args, **kwargs):
229FETCH_HANDLERS = (
230 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
231 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
232)
233
234
235class UnhandledSource(Exception):
236 pass
237
238
239def install_remote(source):
240 """316 """
241 Install a file tree from a remote source317 Install a file tree from a remote source
242318
243 The specified source should be a url of the form:319 The specified source should be a url of the form:
244 scheme://[host]/path[#[option=value][&...]]320 scheme://[host]/path[#[option=value][&...]]
245321
246 Schemes supported are based on this modules submodules322 Schemes supported are based on this modules submodules.
247 Options supported are submodule-specific"""323 Options supported are submodule-specific.
324 Additional arguments are passed through to the submodule.
325
326 For example::
327
328 dest = install_remote('http://example.com/archive.tgz',
329 checksum='deadbeef',
330 hash_type='sha1')
331
332 This will download `archive.tgz`, validate it using SHA1 and, if
333 the file is ok, extract it and return the directory in which it
334 was extracted. If the checksum fails, it will raise
335 :class:`charmhelpers.core.host.ChecksumError`.
336 """
248 # We ONLY check for True here because can_handle may return a string337 # We ONLY check for True here because can_handle may return a string
249 # explaining why it can't handle a given source.338 # explaining why it can't handle a given source.
250 handlers = [h for h in plugins() if h.can_handle(source) is True]339 handlers = [h for h in plugins() if h.can_handle(source) is True]
251 installed_to = None340 installed_to = None
252 for handler in handlers:341 for handler in handlers:
253 try:342 try:
254 installed_to = handler.install(source)343 installed_to = handler.install(source, *args, **kwargs)
255 except UnhandledSource:344 except UnhandledSource:
256 pass345 pass
257 if not installed_to:346 if not installed_to:
@@ -265,30 +354,6 @@
265 return install_remote(source)354 return install_remote(source)
266355
267356
268class BaseFetchHandler(object):
269
270 """Base class for FetchHandler implementations in fetch plugins"""
271
272 def can_handle(self, source):
273 """Returns True if the source can be handled. Otherwise returns
274 a string explaining why it cannot"""
275 return "Wrong source type"
276
277 def install(self, source):
278 """Try to download and unpack the source. Return the path to the
279 unpacked files or raise UnhandledSource."""
280 raise UnhandledSource("Wrong source type {}".format(source))
281
282 def parse_url(self, url):
283 return urlparse(url)
284
285 def base_url(self, url):
286 """Return url without querystring or fragment"""
287 parts = list(self.parse_url(url))
288 parts[4:] = ['' for i in parts[4:]]
289 return urlunparse(parts)
290
291
292def plugins(fetch_handlers=None):357def plugins(fetch_handlers=None):
293 if not fetch_handlers:358 if not fetch_handlers:
294 fetch_handlers = FETCH_HANDLERS359 fetch_handlers = FETCH_HANDLERS
@@ -306,3 +371,40 @@
306 log("FetchHandler {} not found, skipping plugin".format(371 log("FetchHandler {} not found, skipping plugin".format(
307 handler_name))372 handler_name))
308 return plugin_list373 return plugin_list
374
375
376def _run_apt_command(cmd, fatal=False):
377 """
378 Run an APT command, checking output and retrying if the fatal flag is set
379 to True.
380
381 :param: cmd: str: The apt command to run.
382 :param: fatal: bool: Whether the command's output should be checked and
383 retried.
384 """
385 env = os.environ.copy()
386
387 if 'DEBIAN_FRONTEND' not in env:
388 env['DEBIAN_FRONTEND'] = 'noninteractive'
389
390 if fatal:
391 retry_count = 0
392 result = None
393
394 # If the command is considered "fatal", we need to retry if the apt
395 # lock was not acquired.
396
397 while result is None or result == APT_NO_LOCK:
398 try:
399 result = subprocess.check_call(cmd, env=env)
400 except subprocess.CalledProcessError, e:
401 retry_count = retry_count + 1
402 if retry_count > APT_NO_LOCK_RETRY_COUNT:
403 raise
404 result = e.returncode
405 log("Couldn't acquire DPKG lock. Will retry in {} seconds."
406 "".format(APT_NO_LOCK_RETRY_DELAY))
407 time.sleep(APT_NO_LOCK_RETRY_DELAY)
408
409 else:
410 subprocess.call(cmd, env=env)
309411
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2014-03-27 12:33:12 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2014-09-26 08:15:24 +0000
@@ -1,6 +1,8 @@
1import os1import os
2import urllib22import urllib2
3from urllib import urlretrieve
3import urlparse4import urlparse
5import hashlib
46
5from charmhelpers.fetch import (7from charmhelpers.fetch import (
6 BaseFetchHandler,8 BaseFetchHandler,
@@ -10,11 +12,19 @@
10 get_archive_handler,12 get_archive_handler,
11 extract,13 extract,
12)14)
13from charmhelpers.core.host import mkdir15from charmhelpers.core.host import mkdir, check_hash
1416
1517
16class ArchiveUrlFetchHandler(BaseFetchHandler):18class ArchiveUrlFetchHandler(BaseFetchHandler):
17 """Handler for archives via generic URLs"""19 """
20 Handler to download archive files from arbitrary URLs.
21
22 Can fetch from http, https, ftp, and file URLs.
23
24 Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
25
26 Installs the contents of the archive in $CHARM_DIR/fetched/.
27 """
18 def can_handle(self, source):28 def can_handle(self, source):
19 url_parts = self.parse_url(source)29 url_parts = self.parse_url(source)
20 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):30 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@@ -24,6 +34,12 @@
24 return False34 return False
2535
26 def download(self, source, dest):36 def download(self, source, dest):
37 """
38 Download an archive file.
39
40 :param str source: URL pointing to an archive file.
41 :param str dest: Local path location to download archive file to.
42 """
27 # propogate all exceptions43 # propogate all exceptions
28 # URLError, OSError, etc44 # URLError, OSError, etc
29 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)45 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
@@ -48,7 +64,30 @@
48 os.unlink(dest)64 os.unlink(dest)
49 raise e65 raise e
5066
51 def install(self, source):67 # Mandatory file validation via Sha1 or MD5 hashing.
68 def download_and_validate(self, url, hashsum, validate="sha1"):
69 tempfile, headers = urlretrieve(url)
70 check_hash(tempfile, hashsum, validate)
71 return tempfile
72
73 def install(self, source, dest=None, checksum=None, hash_type='sha1'):
74 """
75 Download and install an archive file, with optional checksum validation.
76
77 The checksum can also be given on the `source` URL's fragment.
78 For example::
79
80 handler.install('http://example.com/file.tgz#sha1=deadbeef')
81
82 :param str source: URL pointing to an archive file.
83 :param str dest: Local destination path to install to. If not given,
84 installs to `$CHARM_DIR/archives/archive_file_name`.
85 :param str checksum: If given, validate the archive file after download.
86 :param str hash_type: Algorithm used to generate `checksum`.
87 Can be any hash alrgorithm supported by :mod:`hashlib`,
88 such as md5, sha1, sha256, sha512, etc.
89
90 """
52 url_parts = self.parse_url(source)91 url_parts = self.parse_url(source)
53 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')92 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
54 if not os.path.exists(dest_dir):93 if not os.path.exists(dest_dir):
@@ -60,4 +99,10 @@
60 raise UnhandledSource(e.reason)99 raise UnhandledSource(e.reason)
61 except OSError as e:100 except OSError as e:
62 raise UnhandledSource(e.strerror)101 raise UnhandledSource(e.strerror)
63 return extract(dld_file)102 options = urlparse.parse_qs(url_parts.fragment)
103 for key, value in options.items():
104 if key in hashlib.algorithms:
105 check_hash(dld_file, value, key)
106 if checksum:
107 check_hash(dld_file, checksum, hash_type)
108 return extract(dld_file, dest)
64109
=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
--- hooks/charmhelpers/fetch/bzrurl.py 2014-03-05 12:57:20 +0000
+++ hooks/charmhelpers/fetch/bzrurl.py 2014-09-26 08:15:24 +0000
@@ -39,7 +39,8 @@
39 def install(self, source):39 def install(self, source):
40 url_parts = self.parse_url(source)40 url_parts = self.parse_url(source)
41 branch_name = url_parts.path.strip("/").split("/")[-1]41 branch_name = url_parts.path.strip("/").split("/")[-1]
42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
43 branch_name)
43 if not os.path.exists(dest_dir):44 if not os.path.exists(dest_dir):
44 mkdir(dest_dir, perms=0755)45 mkdir(dest_dir, perms=0755)
45 try:46 try:
4647
=== modified file 'hooks/rabbit_utils.py'
--- hooks/rabbit_utils.py 2014-06-11 19:58:50 +0000
+++ hooks/rabbit_utils.py 2014-09-26 08:15:24 +0000
@@ -6,7 +6,6 @@
6import subprocess6import subprocess
7import glob7import glob
8from lib.utils import render_template8from lib.utils import render_template
9import apt_pkg as apt
109
11from charmhelpers.contrib.openstack.utils import (10from charmhelpers.contrib.openstack.utils import (
12 get_hostname,11 get_hostname,
@@ -21,7 +20,12 @@
21 service_name20 service_name
22)21)
2322
24from charmhelpers.core.host import pwgen, mkdir, write_file23from charmhelpers.core.host import (
24 pwgen,
25 mkdir,
26 write_file,
27 cmp_pkgrevno,
28)
2529
26from charmhelpers.contrib.peerstorage import (30from charmhelpers.contrib.peerstorage import (
27 peer_store,31 peer_store,
@@ -103,21 +107,9 @@
103 subprocess.check_call(cmd)107 subprocess.check_call(cmd)
104108
105109
106def compare_version(base_version):
107 apt.init()
108 cache = apt.Cache()
109 pkg = cache['rabbitmq-server']
110 if pkg.current_ver:
111 return apt.version_compare(
112 apt.upstream_version(pkg.current_ver.ver_str),
113 base_version)
114 else:
115 return False
116
117
118def cluster_with():110def cluster_with():
119 log('Clustering with new node')111 log('Clustering with new node')
120 if compare_version('3.0.1') >= 0:112 if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
121 cluster_cmd = 'join_cluster'113 cluster_cmd = 'join_cluster'
122 else:114 else:
123 cluster_cmd = 'cluster'115 cluster_cmd = 'cluster'
@@ -167,7 +159,7 @@
167 cmd = [RABBITMQ_CTL, 'start_app']159 cmd = [RABBITMQ_CTL, 'start_app']
168 subprocess.check_call(cmd)160 subprocess.check_call(cmd)
169 log('Host clustered with %s.' % node)161 log('Host clustered with %s.' % node)
170 if compare_version('3.0.1') >= 0:162 if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0:
171 cmd = [RABBITMQ_CTL, 'set_policy', 'HA',163 cmd = [RABBITMQ_CTL, 'set_policy', 'HA',
172 '^(?!amq\.).*', '{"ha-mode": "all"}']164 '^(?!amq\.).*', '{"ha-mode": "all"}']
173 subprocess.check_call(cmd)165 subprocess.check_call(cmd)

Subscribers

People subscribed via source and target branches