Merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into lp:charms/trusty/rabbitmq-server
- Trusty Tahr (14.04)
- resync-charm-helpers
- Merge into trunk
Status: | Merged |
---|---|
Merged at revision: | 60 |
Proposed branch: | lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers |
Merge into: | lp:charms/trusty/rabbitmq-server |
Diff against target: |
3241 lines (+2078/-264) 25 files modified
hooks/charmhelpers/contrib/charmsupport/volumes.py (+5/-2) hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0) hooks/charmhelpers/contrib/openstack/context.py (+187/-47) hooks/charmhelpers/contrib/openstack/ip.py (+79/-0) hooks/charmhelpers/contrib/openstack/neutron.py (+31/-1) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/openstack/utils.py (+18/-7) hooks/charmhelpers/contrib/peerstorage/__init__.py (+77/-29) hooks/charmhelpers/contrib/ssl/service.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/utils.py (+23/-5) hooks/charmhelpers/core/fstab.py (+116/-0) hooks/charmhelpers/core/hookenv.py (+132/-7) hooks/charmhelpers/core/host.py (+100/-12) hooks/charmhelpers/core/services/__init__.py (+2/-0) hooks/charmhelpers/core/services/base.py (+313/-0) hooks/charmhelpers/core/services/helpers.py (+239/-0) hooks/charmhelpers/core/templating.py (+51/-0) hooks/charmhelpers/fetch/__init__.py (+192/-90) hooks/charmhelpers/fetch/archiveurl.py (+49/-4) hooks/charmhelpers/fetch/bzrurl.py (+2/-1) hooks/rabbit_utils.py (+8/-16) |
To merge this branch: | bzr merge lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
David Britton (community) | Approve | ||
Review via email: mp+236072@code.launchpad.net |
Commit message
Description of the change
This branch resyncs charm-helpers to make the charm benefit from in-memeory apt-cache index, so as not to run into race-conditions with other charms.
It also uses the chram-helpers package version comparison instead of its own (again, to prevent grabbing the apt index lock for nothing).
Similar causes and fixes than https:/
Michael Hudson-Doyle (mwhudson) wrote : | # |
Hi, I'm afraid this broke the amqp-relation-
David Britton (dpb) wrote : | # |
Thanks @Michael, I put up a follow-on MP:
On Sun, Sep 28, 2014 at 7:54 PM, Michael Hudson-Doyle <
<email address hidden>> wrote:
> Hi, I'm afraid this broke the amqp-relation-
> https:/
> --
>
> https:/
> You are reviewing the proposed merge of
> lp:~tribaal/charms/trusty/rabbitmq-server/resync-charm-helpers into
> lp:charms/trusty/rabbitmq-server.
>
--
David Britton <email address hidden>
Preview Diff
1 | === modified file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' | |||
2 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-03-05 12:57:20 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 2014-09-26 08:15:24 +0000 | |||
4 | @@ -2,7 +2,8 @@ | |||
5 | 2 | Functions for managing volumes in juju units. One volume is supported per unit. | 2 | Functions for managing volumes in juju units. One volume is supported per unit. |
6 | 3 | Subordinates may have their own storage, provided it is on its own partition. | 3 | Subordinates may have their own storage, provided it is on its own partition. |
7 | 4 | 4 | ||
9 | 5 | Configuration stanzas: | 5 | Configuration stanzas:: |
10 | 6 | |||
11 | 6 | volume-ephemeral: | 7 | volume-ephemeral: |
12 | 7 | type: boolean | 8 | type: boolean |
13 | 8 | default: true | 9 | default: true |
14 | @@ -20,7 +21,8 @@ | |||
15 | 20 | is 'true' and no volume-map value is set. Use 'juju set' to set a | 21 | is 'true' and no volume-map value is set. Use 'juju set' to set a |
16 | 21 | value and 'juju resolved' to complete configuration. | 22 | value and 'juju resolved' to complete configuration. |
17 | 22 | 23 | ||
19 | 23 | Usage: | 24 | Usage:: |
20 | 25 | |||
21 | 24 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | 26 | from charmsupport.volumes import configure_volume, VolumeConfigurationError |
22 | 25 | from charmsupport.hookenv import log, ERROR | 27 | from charmsupport.hookenv import log, ERROR |
23 | 26 | def post_mount_hook(): | 28 | def post_mount_hook(): |
24 | @@ -34,6 +36,7 @@ | |||
25 | 34 | after_change=post_mount_hook) | 36 | after_change=post_mount_hook) |
26 | 35 | except VolumeConfigurationError: | 37 | except VolumeConfigurationError: |
27 | 36 | log('Storage could not be configured', ERROR) | 38 | log('Storage could not be configured', ERROR) |
28 | 39 | |||
29 | 37 | ''' | 40 | ''' |
30 | 38 | 41 | ||
31 | 39 | # XXX: Known limitations | 42 | # XXX: Known limitations |
32 | 40 | 43 | ||
33 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
34 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-05 12:57:20 +0000 | |||
35 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-09-26 08:15:24 +0000 | |||
36 | @@ -6,6 +6,11 @@ | |||
37 | 6 | # Adam Gandelman <adamg@ubuntu.com> | 6 | # Adam Gandelman <adamg@ubuntu.com> |
38 | 7 | # | 7 | # |
39 | 8 | 8 | ||
40 | 9 | """ | ||
41 | 10 | Helpers for clustering and determining "cluster leadership" and other | ||
42 | 11 | clustering-related helpers. | ||
43 | 12 | """ | ||
44 | 13 | |||
45 | 9 | import subprocess | 14 | import subprocess |
46 | 10 | import os | 15 | import os |
47 | 11 | 16 | ||
48 | @@ -19,6 +24,7 @@ | |||
49 | 19 | config as config_get, | 24 | config as config_get, |
50 | 20 | INFO, | 25 | INFO, |
51 | 21 | ERROR, | 26 | ERROR, |
52 | 27 | WARNING, | ||
53 | 22 | unit_get, | 28 | unit_get, |
54 | 23 | ) | 29 | ) |
55 | 24 | 30 | ||
56 | @@ -27,6 +33,29 @@ | |||
57 | 27 | pass | 33 | pass |
58 | 28 | 34 | ||
59 | 29 | 35 | ||
60 | 36 | def is_elected_leader(resource): | ||
61 | 37 | """ | ||
62 | 38 | Returns True if the charm executing this is the elected cluster leader. | ||
63 | 39 | |||
64 | 40 | It relies on two mechanisms to determine leadership: | ||
65 | 41 | 1. If the charm is part of a corosync cluster, call corosync to | ||
66 | 42 | determine leadership. | ||
67 | 43 | 2. If the charm is not part of a corosync cluster, the leader is | ||
68 | 44 | determined as being "the alive unit with the lowest unit numer". In | ||
69 | 45 | other words, the oldest surviving unit. | ||
70 | 46 | """ | ||
71 | 47 | if is_clustered(): | ||
72 | 48 | if not is_crm_leader(resource): | ||
73 | 49 | log('Deferring action to CRM leader.', level=INFO) | ||
74 | 50 | return False | ||
75 | 51 | else: | ||
76 | 52 | peers = peer_units() | ||
77 | 53 | if peers and not oldest_peer(peers): | ||
78 | 54 | log('Deferring action to oldest service unit.', level=INFO) | ||
79 | 55 | return False | ||
80 | 56 | return True | ||
81 | 57 | |||
82 | 58 | |||
83 | 30 | def is_clustered(): | 59 | def is_clustered(): |
84 | 31 | for r_id in (relation_ids('ha') or []): | 60 | for r_id in (relation_ids('ha') or []): |
85 | 32 | for unit in (relation_list(r_id) or []): | 61 | for unit in (relation_list(r_id) or []): |
86 | @@ -38,7 +67,11 @@ | |||
87 | 38 | return False | 67 | return False |
88 | 39 | 68 | ||
89 | 40 | 69 | ||
91 | 41 | def is_leader(resource): | 70 | def is_crm_leader(resource): |
92 | 71 | """ | ||
93 | 72 | Returns True if the charm calling this is the elected corosync leader, | ||
94 | 73 | as returned by calling the external "crm" command. | ||
95 | 74 | """ | ||
96 | 42 | cmd = [ | 75 | cmd = [ |
97 | 43 | "crm", "resource", | 76 | "crm", "resource", |
98 | 44 | "show", resource | 77 | "show", resource |
99 | @@ -54,15 +87,31 @@ | |||
100 | 54 | return False | 87 | return False |
101 | 55 | 88 | ||
102 | 56 | 89 | ||
104 | 57 | def peer_units(): | 90 | def is_leader(resource): |
105 | 91 | log("is_leader is deprecated. Please consider using is_crm_leader " | ||
106 | 92 | "instead.", level=WARNING) | ||
107 | 93 | return is_crm_leader(resource) | ||
108 | 94 | |||
109 | 95 | |||
110 | 96 | def peer_units(peer_relation="cluster"): | ||
111 | 58 | peers = [] | 97 | peers = [] |
113 | 59 | for r_id in (relation_ids('cluster') or []): | 98 | for r_id in (relation_ids(peer_relation) or []): |
114 | 60 | for unit in (relation_list(r_id) or []): | 99 | for unit in (relation_list(r_id) or []): |
115 | 61 | peers.append(unit) | 100 | peers.append(unit) |
116 | 62 | return peers | 101 | return peers |
117 | 63 | 102 | ||
118 | 64 | 103 | ||
119 | 104 | def peer_ips(peer_relation='cluster', addr_key='private-address'): | ||
120 | 105 | '''Return a dict of peers and their private-address''' | ||
121 | 106 | peers = {} | ||
122 | 107 | for r_id in relation_ids(peer_relation): | ||
123 | 108 | for unit in relation_list(r_id): | ||
124 | 109 | peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) | ||
125 | 110 | return peers | ||
126 | 111 | |||
127 | 112 | |||
128 | 65 | def oldest_peer(peers): | 113 | def oldest_peer(peers): |
129 | 114 | """Determines who the oldest peer is by comparing unit numbers.""" | ||
130 | 66 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | 115 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
131 | 67 | for peer in peers: | 116 | for peer in peers: |
132 | 68 | remote_unit_no = int(peer.split('/')[1]) | 117 | remote_unit_no = int(peer.split('/')[1]) |
133 | @@ -72,16 +121,9 @@ | |||
134 | 72 | 121 | ||
135 | 73 | 122 | ||
136 | 74 | def eligible_leader(resource): | 123 | def eligible_leader(resource): |
147 | 75 | if is_clustered(): | 124 | log("eligible_leader is deprecated. Please consider using " |
148 | 76 | if not is_leader(resource): | 125 | "is_elected_leader instead.", level=WARNING) |
149 | 77 | log('Deferring action to CRM leader.', level=INFO) | 126 | return is_elected_leader(resource) |
140 | 78 | return False | ||
141 | 79 | else: | ||
142 | 80 | peers = peer_units() | ||
143 | 81 | if peers and not oldest_peer(peers): | ||
144 | 82 | log('Deferring action to oldest service unit.', level=INFO) | ||
145 | 83 | return False | ||
146 | 84 | return True | ||
150 | 85 | 127 | ||
151 | 86 | 128 | ||
152 | 87 | def https(): | 129 | def https(): |
153 | @@ -97,10 +139,9 @@ | |||
154 | 97 | return True | 139 | return True |
155 | 98 | for r_id in relation_ids('identity-service'): | 140 | for r_id in relation_ids('identity-service'): |
156 | 99 | for unit in relation_list(r_id): | 141 | for unit in relation_list(r_id): |
157 | 142 | # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN | ||
158 | 100 | rel_state = [ | 143 | rel_state = [ |
159 | 101 | relation_get('https_keystone', rid=r_id, unit=unit), | 144 | relation_get('https_keystone', rid=r_id, unit=unit), |
160 | 102 | relation_get('ssl_cert', rid=r_id, unit=unit), | ||
161 | 103 | relation_get('ssl_key', rid=r_id, unit=unit), | ||
162 | 104 | relation_get('ca_cert', rid=r_id, unit=unit), | 145 | relation_get('ca_cert', rid=r_id, unit=unit), |
163 | 105 | ] | 146 | ] |
164 | 106 | # NOTE: works around (LP: #1203241) | 147 | # NOTE: works around (LP: #1203241) |
165 | @@ -146,12 +187,12 @@ | |||
166 | 146 | Obtains all relevant configuration from charm configuration required | 187 | Obtains all relevant configuration from charm configuration required |
167 | 147 | for initiating a relation to hacluster: | 188 | for initiating a relation to hacluster: |
168 | 148 | 189 | ||
170 | 149 | ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr | 190 | ha-bindiface, ha-mcastport, vip |
171 | 150 | 191 | ||
172 | 151 | returns: dict: A dict containing settings keyed by setting name. | 192 | returns: dict: A dict containing settings keyed by setting name. |
173 | 152 | raises: HAIncompleteConfig if settings are missing. | 193 | raises: HAIncompleteConfig if settings are missing. |
174 | 153 | ''' | 194 | ''' |
176 | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] | 195 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
177 | 155 | conf = {} | 196 | conf = {} |
178 | 156 | for setting in settings: | 197 | for setting in settings: |
179 | 157 | conf[setting] = config_get(setting) | 198 | conf[setting] = config_get(setting) |
180 | @@ -170,6 +211,7 @@ | |||
181 | 170 | 211 | ||
182 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | 212 | :configs : OSTemplateRenderer: A config tempating object to inspect for |
183 | 172 | a complete https context. | 213 | a complete https context. |
184 | 214 | |||
185 | 173 | :vip_setting: str: Setting in charm config that specifies | 215 | :vip_setting: str: Setting in charm config that specifies |
186 | 174 | VIP address. | 216 | VIP address. |
187 | 175 | ''' | 217 | ''' |
188 | 176 | 218 | ||
189 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
190 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
191 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
192 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
193 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-26 08:15:24 +0000 | |||
194 | @@ -0,0 +1,94 @@ | |||
195 | 1 | from bzrlib.branch import Branch | ||
196 | 2 | import os | ||
197 | 3 | import re | ||
198 | 4 | from charmhelpers.contrib.amulet.deployment import ( | ||
199 | 5 | AmuletDeployment | ||
200 | 6 | ) | ||
201 | 7 | |||
202 | 8 | |||
203 | 9 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
204 | 10 | """OpenStack amulet deployment. | ||
205 | 11 | |||
206 | 12 | This class inherits from AmuletDeployment and has additional support | ||
207 | 13 | that is specifically for use by OpenStack charms. | ||
208 | 14 | """ | ||
209 | 15 | |||
210 | 16 | def __init__(self, series=None, openstack=None, source=None): | ||
211 | 17 | """Initialize the deployment environment.""" | ||
212 | 18 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
213 | 19 | self.openstack = openstack | ||
214 | 20 | self.source = source | ||
215 | 21 | |||
216 | 22 | def _is_dev_branch(self): | ||
217 | 23 | """Determine if branch being tested is a dev (i.e. next) branch.""" | ||
218 | 24 | branch = Branch.open(os.getcwd()) | ||
219 | 25 | parent = branch.get_parent() | ||
220 | 26 | pattern = re.compile("^.*/next/$") | ||
221 | 27 | if (pattern.match(parent)): | ||
222 | 28 | return True | ||
223 | 29 | else: | ||
224 | 30 | return False | ||
225 | 31 | |||
226 | 32 | def _determine_branch_locations(self, other_services): | ||
227 | 33 | """Determine the branch locations for the other services. | ||
228 | 34 | |||
229 | 35 | If the branch being tested is a dev branch, then determine the | ||
230 | 36 | development branch locations for the other services. Otherwise, | ||
231 | 37 | the default charm store branches will be used.""" | ||
232 | 38 | name = 0 | ||
233 | 39 | if self._is_dev_branch(): | ||
234 | 40 | updated_services = [] | ||
235 | 41 | for svc in other_services: | ||
236 | 42 | if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: | ||
237 | 43 | location = 'lp:charms/{}'.format(svc[name]) | ||
238 | 44 | else: | ||
239 | 45 | temp = 'lp:~openstack-charmers/charms/trusty/{}/next' | ||
240 | 46 | location = temp.format(svc[name]) | ||
241 | 47 | updated_services.append(svc + (location,)) | ||
242 | 48 | other_services = updated_services | ||
243 | 49 | return other_services | ||
244 | 50 | |||
245 | 51 | def _add_services(self, this_service, other_services): | ||
246 | 52 | """Add services to the deployment and set openstack-origin/source.""" | ||
247 | 53 | name = 0 | ||
248 | 54 | other_services = self._determine_branch_locations(other_services) | ||
249 | 55 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
250 | 56 | other_services) | ||
251 | 57 | services = other_services | ||
252 | 58 | services.append(this_service) | ||
253 | 59 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
254 | 60 | |||
255 | 61 | if self.openstack: | ||
256 | 62 | for svc in services: | ||
257 | 63 | if svc[name] not in use_source: | ||
258 | 64 | config = {'openstack-origin': self.openstack} | ||
259 | 65 | self.d.configure(svc[name], config) | ||
260 | 66 | |||
261 | 67 | if self.source: | ||
262 | 68 | for svc in services: | ||
263 | 69 | if svc[name] in use_source: | ||
264 | 70 | config = {'source': self.source} | ||
265 | 71 | self.d.configure(svc[name], config) | ||
266 | 72 | |||
267 | 73 | def _configure_services(self, configs): | ||
268 | 74 | """Configure all of the services.""" | ||
269 | 75 | for service, config in configs.iteritems(): | ||
270 | 76 | self.d.configure(service, config) | ||
271 | 77 | |||
272 | 78 | def _get_openstack_release(self): | ||
273 | 79 | """Get openstack release. | ||
274 | 80 | |||
275 | 81 | Return an integer representing the enum value of the openstack | ||
276 | 82 | release. | ||
277 | 83 | """ | ||
278 | 84 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
279 | 85 | self.precise_havana, self.precise_icehouse, | ||
280 | 86 | self.trusty_icehouse) = range(6) | ||
281 | 87 | releases = { | ||
282 | 88 | ('precise', None): self.precise_essex, | ||
283 | 89 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
284 | 90 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
285 | 91 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
286 | 92 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
287 | 93 | ('trusty', None): self.trusty_icehouse} | ||
288 | 94 | return releases[(self.series, self.openstack)] | ||
289 | 0 | 95 | ||
290 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
291 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
292 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-26 08:15:24 +0000 | |||
293 | @@ -0,0 +1,276 @@ | |||
294 | 1 | import logging | ||
295 | 2 | import os | ||
296 | 3 | import time | ||
297 | 4 | import urllib | ||
298 | 5 | |||
299 | 6 | import glanceclient.v1.client as glance_client | ||
300 | 7 | import keystoneclient.v2_0 as keystone_client | ||
301 | 8 | import novaclient.v1_1.client as nova_client | ||
302 | 9 | |||
303 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
304 | 11 | AmuletUtils | ||
305 | 12 | ) | ||
306 | 13 | |||
307 | 14 | DEBUG = logging.DEBUG | ||
308 | 15 | ERROR = logging.ERROR | ||
309 | 16 | |||
310 | 17 | |||
311 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
312 | 19 | """OpenStack amulet utilities. | ||
313 | 20 | |||
314 | 21 | This class inherits from AmuletUtils and has additional support | ||
315 | 22 | that is specifically for use by OpenStack charms. | ||
316 | 23 | """ | ||
317 | 24 | |||
318 | 25 | def __init__(self, log_level=ERROR): | ||
319 | 26 | """Initialize the deployment environment.""" | ||
320 | 27 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
321 | 28 | |||
322 | 29 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
323 | 30 | public_port, expected): | ||
324 | 31 | """Validate endpoint data. | ||
325 | 32 | |||
326 | 33 | Validate actual endpoint data vs expected endpoint data. The ports | ||
327 | 34 | are used to find the matching endpoint. | ||
328 | 35 | """ | ||
329 | 36 | found = False | ||
330 | 37 | for ep in endpoints: | ||
331 | 38 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
332 | 39 | if (admin_port in ep.adminurl and | ||
333 | 40 | internal_port in ep.internalurl and | ||
334 | 41 | public_port in ep.publicurl): | ||
335 | 42 | found = True | ||
336 | 43 | actual = {'id': ep.id, | ||
337 | 44 | 'region': ep.region, | ||
338 | 45 | 'adminurl': ep.adminurl, | ||
339 | 46 | 'internalurl': ep.internalurl, | ||
340 | 47 | 'publicurl': ep.publicurl, | ||
341 | 48 | 'service_id': ep.service_id} | ||
342 | 49 | ret = self._validate_dict_data(expected, actual) | ||
343 | 50 | if ret: | ||
344 | 51 | return 'unexpected endpoint data - {}'.format(ret) | ||
345 | 52 | |||
346 | 53 | if not found: | ||
347 | 54 | return 'endpoint not found' | ||
348 | 55 | |||
349 | 56 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
350 | 57 | """Validate service catalog endpoint data. | ||
351 | 58 | |||
352 | 59 | Validate a list of actual service catalog endpoints vs a list of | ||
353 | 60 | expected service catalog endpoints. | ||
354 | 61 | """ | ||
355 | 62 | self.log.debug('actual: {}'.format(repr(actual))) | ||
356 | 63 | for k, v in expected.iteritems(): | ||
357 | 64 | if k in actual: | ||
358 | 65 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
359 | 66 | if ret: | ||
360 | 67 | return self.endpoint_error(k, ret) | ||
361 | 68 | else: | ||
362 | 69 | return "endpoint {} does not exist".format(k) | ||
363 | 70 | return ret | ||
364 | 71 | |||
365 | 72 | def validate_tenant_data(self, expected, actual): | ||
366 | 73 | """Validate tenant data. | ||
367 | 74 | |||
368 | 75 | Validate a list of actual tenant data vs list of expected tenant | ||
369 | 76 | data. | ||
370 | 77 | """ | ||
371 | 78 | self.log.debug('actual: {}'.format(repr(actual))) | ||
372 | 79 | for e in expected: | ||
373 | 80 | found = False | ||
374 | 81 | for act in actual: | ||
375 | 82 | a = {'enabled': act.enabled, 'description': act.description, | ||
376 | 83 | 'name': act.name, 'id': act.id} | ||
377 | 84 | if e['name'] == a['name']: | ||
378 | 85 | found = True | ||
379 | 86 | ret = self._validate_dict_data(e, a) | ||
380 | 87 | if ret: | ||
381 | 88 | return "unexpected tenant data - {}".format(ret) | ||
382 | 89 | if not found: | ||
383 | 90 | return "tenant {} does not exist".format(e['name']) | ||
384 | 91 | return ret | ||
385 | 92 | |||
386 | 93 | def validate_role_data(self, expected, actual): | ||
387 | 94 | """Validate role data. | ||
388 | 95 | |||
389 | 96 | Validate a list of actual role data vs a list of expected role | ||
390 | 97 | data. | ||
391 | 98 | """ | ||
392 | 99 | self.log.debug('actual: {}'.format(repr(actual))) | ||
393 | 100 | for e in expected: | ||
394 | 101 | found = False | ||
395 | 102 | for act in actual: | ||
396 | 103 | a = {'name': act.name, 'id': act.id} | ||
397 | 104 | if e['name'] == a['name']: | ||
398 | 105 | found = True | ||
399 | 106 | ret = self._validate_dict_data(e, a) | ||
400 | 107 | if ret: | ||
401 | 108 | return "unexpected role data - {}".format(ret) | ||
402 | 109 | if not found: | ||
403 | 110 | return "role {} does not exist".format(e['name']) | ||
404 | 111 | return ret | ||
405 | 112 | |||
406 | 113 | def validate_user_data(self, expected, actual): | ||
407 | 114 | """Validate user data. | ||
408 | 115 | |||
409 | 116 | Validate a list of actual user data vs a list of expected user | ||
410 | 117 | data. | ||
411 | 118 | """ | ||
412 | 119 | self.log.debug('actual: {}'.format(repr(actual))) | ||
413 | 120 | for e in expected: | ||
414 | 121 | found = False | ||
415 | 122 | for act in actual: | ||
416 | 123 | a = {'enabled': act.enabled, 'name': act.name, | ||
417 | 124 | 'email': act.email, 'tenantId': act.tenantId, | ||
418 | 125 | 'id': act.id} | ||
419 | 126 | if e['name'] == a['name']: | ||
420 | 127 | found = True | ||
421 | 128 | ret = self._validate_dict_data(e, a) | ||
422 | 129 | if ret: | ||
423 | 130 | return "unexpected user data - {}".format(ret) | ||
424 | 131 | if not found: | ||
425 | 132 | return "user {} does not exist".format(e['name']) | ||
426 | 133 | return ret | ||
427 | 134 | |||
428 | 135 | def validate_flavor_data(self, expected, actual): | ||
429 | 136 | """Validate flavor data. | ||
430 | 137 | |||
431 | 138 | Validate a list of actual flavors vs a list of expected flavors. | ||
432 | 139 | """ | ||
433 | 140 | self.log.debug('actual: {}'.format(repr(actual))) | ||
434 | 141 | act = [a.name for a in actual] | ||
435 | 142 | return self._validate_list_data(expected, act) | ||
436 | 143 | |||
437 | 144 | def tenant_exists(self, keystone, tenant): | ||
438 | 145 | """Return True if tenant exists.""" | ||
439 | 146 | return tenant in [t.name for t in keystone.tenants.list()] | ||
440 | 147 | |||
441 | 148 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
442 | 149 | tenant): | ||
443 | 150 | """Authenticates admin user with the keystone admin endpoint.""" | ||
444 | 151 | unit = keystone_sentry | ||
445 | 152 | service_ip = unit.relation('shared-db', | ||
446 | 153 | 'mysql:shared-db')['private-address'] | ||
447 | 154 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
448 | 155 | return keystone_client.Client(username=user, password=password, | ||
449 | 156 | tenant_name=tenant, auth_url=ep) | ||
450 | 157 | |||
451 | 158 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
452 | 159 | """Authenticates a regular user with the keystone public endpoint.""" | ||
453 | 160 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
454 | 161 | endpoint_type='publicURL') | ||
455 | 162 | return keystone_client.Client(username=user, password=password, | ||
456 | 163 | tenant_name=tenant, auth_url=ep) | ||
457 | 164 | |||
458 | 165 | def authenticate_glance_admin(self, keystone): | ||
459 | 166 | """Authenticates admin user with glance.""" | ||
460 | 167 | ep = keystone.service_catalog.url_for(service_type='image', | ||
461 | 168 | endpoint_type='adminURL') | ||
462 | 169 | return glance_client.Client(ep, token=keystone.auth_token) | ||
463 | 170 | |||
464 | 171 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
465 | 172 | """Authenticates a regular user with nova-api.""" | ||
466 | 173 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
467 | 174 | endpoint_type='publicURL') | ||
468 | 175 | return nova_client.Client(username=user, api_key=password, | ||
469 | 176 | project_id=tenant, auth_url=ep) | ||
470 | 177 | |||
471 | 178 | def create_cirros_image(self, glance, image_name): | ||
472 | 179 | """Download the latest cirros image and upload it to glance.""" | ||
473 | 180 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
474 | 181 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
475 | 182 | if http_proxy: | ||
476 | 183 | proxies = {'http': http_proxy} | ||
477 | 184 | opener = urllib.FancyURLopener(proxies) | ||
478 | 185 | else: | ||
479 | 186 | opener = urllib.FancyURLopener() | ||
480 | 187 | |||
481 | 188 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
482 | 189 | version = f.read().strip() | ||
483 | 190 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) | ||
484 | 191 | local_path = os.path.join('tests', cirros_img) | ||
485 | 192 | |||
486 | 193 | if not os.path.exists(local_path): | ||
487 | 194 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
488 | 195 | version, cirros_img) | ||
489 | 196 | opener.retrieve(cirros_url, local_path) | ||
490 | 197 | f.close() | ||
491 | 198 | |||
492 | 199 | with open(local_path) as f: | ||
493 | 200 | image = glance.images.create(name=image_name, is_public=True, | ||
494 | 201 | disk_format='qcow2', | ||
495 | 202 | container_format='bare', data=f) | ||
496 | 203 | count = 1 | ||
497 | 204 | status = image.status | ||
498 | 205 | while status != 'active' and count < 10: | ||
499 | 206 | time.sleep(3) | ||
500 | 207 | image = glance.images.get(image.id) | ||
501 | 208 | status = image.status | ||
502 | 209 | self.log.debug('image status: {}'.format(status)) | ||
503 | 210 | count += 1 | ||
504 | 211 | |||
505 | 212 | if status != 'active': | ||
506 | 213 | self.log.error('image creation timed out') | ||
507 | 214 | return None | ||
508 | 215 | |||
509 | 216 | return image | ||
510 | 217 | |||
511 | 218 | def delete_image(self, glance, image): | ||
512 | 219 | """Delete the specified image.""" | ||
513 | 220 | num_before = len(list(glance.images.list())) | ||
514 | 221 | glance.images.delete(image) | ||
515 | 222 | |||
516 | 223 | count = 1 | ||
517 | 224 | num_after = len(list(glance.images.list())) | ||
518 | 225 | while num_after != (num_before - 1) and count < 10: | ||
519 | 226 | time.sleep(3) | ||
520 | 227 | num_after = len(list(glance.images.list())) | ||
521 | 228 | self.log.debug('number of images: {}'.format(num_after)) | ||
522 | 229 | count += 1 | ||
523 | 230 | |||
524 | 231 | if num_after != (num_before - 1): | ||
525 | 232 | self.log.error('image deletion timed out') | ||
526 | 233 | return False | ||
527 | 234 | |||
528 | 235 | return True | ||
529 | 236 | |||
530 | 237 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
531 | 238 | """Create the specified instance.""" | ||
532 | 239 | image = nova.images.find(name=image_name) | ||
533 | 240 | flavor = nova.flavors.find(name=flavor) | ||
534 | 241 | instance = nova.servers.create(name=instance_name, image=image, | ||
535 | 242 | flavor=flavor) | ||
536 | 243 | |||
537 | 244 | count = 1 | ||
538 | 245 | status = instance.status | ||
539 | 246 | while status != 'ACTIVE' and count < 60: | ||
540 | 247 | time.sleep(3) | ||
541 | 248 | instance = nova.servers.get(instance.id) | ||
542 | 249 | status = instance.status | ||
543 | 250 | self.log.debug('instance status: {}'.format(status)) | ||
544 | 251 | count += 1 | ||
545 | 252 | |||
546 | 253 | if status != 'ACTIVE': | ||
547 | 254 | self.log.error('instance creation timed out') | ||
548 | 255 | return None | ||
549 | 256 | |||
550 | 257 | return instance | ||
551 | 258 | |||
552 | 259 | def delete_instance(self, nova, instance): | ||
553 | 260 | """Delete the specified instance.""" | ||
554 | 261 | num_before = len(list(nova.servers.list())) | ||
555 | 262 | nova.servers.delete(instance) | ||
556 | 263 | |||
557 | 264 | count = 1 | ||
558 | 265 | num_after = len(list(nova.servers.list())) | ||
559 | 266 | while num_after != (num_before - 1) and count < 10: | ||
560 | 267 | time.sleep(3) | ||
561 | 268 | num_after = len(list(nova.servers.list())) | ||
562 | 269 | self.log.debug('number of instances: {}'.format(num_after)) | ||
563 | 270 | count += 1 | ||
564 | 271 | |||
565 | 272 | if num_after != (num_before - 1): | ||
566 | 273 | self.log.error('instance deletion timed out') | ||
567 | 274 | return False | ||
568 | 275 | |||
569 | 276 | return True | ||
570 | 0 | 277 | ||
571 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
572 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-04-10 16:56:26 +0000 | |||
573 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-09-26 08:15:24 +0000 | |||
574 | @@ -8,7 +8,6 @@ | |||
575 | 8 | check_call | 8 | check_call |
576 | 9 | ) | 9 | ) |
577 | 10 | 10 | ||
578 | 11 | |||
579 | 12 | from charmhelpers.fetch import ( | 11 | from charmhelpers.fetch import ( |
580 | 13 | apt_install, | 12 | apt_install, |
581 | 14 | filter_installed_packages, | 13 | filter_installed_packages, |
582 | @@ -21,9 +20,16 @@ | |||
583 | 21 | relation_get, | 20 | relation_get, |
584 | 22 | relation_ids, | 21 | relation_ids, |
585 | 23 | related_units, | 22 | related_units, |
586 | 23 | relation_set, | ||
587 | 24 | unit_get, | 24 | unit_get, |
588 | 25 | unit_private_ip, | 25 | unit_private_ip, |
589 | 26 | ERROR, | 26 | ERROR, |
590 | 27 | INFO | ||
591 | 28 | ) | ||
592 | 29 | |||
593 | 30 | from charmhelpers.core.host import ( | ||
594 | 31 | mkdir, | ||
595 | 32 | write_file | ||
596 | 27 | ) | 33 | ) |
597 | 28 | 34 | ||
598 | 29 | from charmhelpers.contrib.hahelpers.cluster import ( | 35 | from charmhelpers.contrib.hahelpers.cluster import ( |
599 | @@ -36,12 +42,19 @@ | |||
600 | 36 | from charmhelpers.contrib.hahelpers.apache import ( | 42 | from charmhelpers.contrib.hahelpers.apache import ( |
601 | 37 | get_cert, | 43 | get_cert, |
602 | 38 | get_ca_cert, | 44 | get_ca_cert, |
603 | 45 | install_ca_cert, | ||
604 | 39 | ) | 46 | ) |
605 | 40 | 47 | ||
606 | 41 | from charmhelpers.contrib.openstack.neutron import ( | 48 | from charmhelpers.contrib.openstack.neutron import ( |
607 | 42 | neutron_plugin_attribute, | 49 | neutron_plugin_attribute, |
608 | 43 | ) | 50 | ) |
609 | 44 | 51 | ||
610 | 52 | from charmhelpers.contrib.network.ip import ( | ||
611 | 53 | get_address_in_network, | ||
612 | 54 | get_ipv6_addr, | ||
613 | 55 | is_address_in_network | ||
614 | 56 | ) | ||
615 | 57 | |||
616 | 45 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 58 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
617 | 46 | 59 | ||
618 | 47 | 60 | ||
619 | @@ -134,8 +147,26 @@ | |||
620 | 134 | 'Missing required charm config options. ' | 147 | 'Missing required charm config options. ' |
621 | 135 | '(database name and user)') | 148 | '(database name and user)') |
622 | 136 | raise OSContextError | 149 | raise OSContextError |
623 | 150 | |||
624 | 137 | ctxt = {} | 151 | ctxt = {} |
625 | 138 | 152 | ||
626 | 153 | # NOTE(jamespage) if mysql charm provides a network upon which | ||
627 | 154 | # access to the database should be made, reconfigure relation | ||
628 | 155 | # with the service units local address and defer execution | ||
629 | 156 | access_network = relation_get('access-network') | ||
630 | 157 | if access_network is not None: | ||
631 | 158 | if self.relation_prefix is not None: | ||
632 | 159 | hostname_key = "{}_hostname".format(self.relation_prefix) | ||
633 | 160 | else: | ||
634 | 161 | hostname_key = "hostname" | ||
635 | 162 | access_hostname = get_address_in_network(access_network, | ||
636 | 163 | unit_get('private-address')) | ||
637 | 164 | set_hostname = relation_get(attribute=hostname_key, | ||
638 | 165 | unit=local_unit()) | ||
639 | 166 | if set_hostname != access_hostname: | ||
640 | 167 | relation_set(relation_settings={hostname_key: access_hostname}) | ||
641 | 168 | return ctxt # Defer any further hook execution for now.... | ||
642 | 169 | |||
643 | 139 | password_setting = 'password' | 170 | password_setting = 'password' |
644 | 140 | if self.relation_prefix: | 171 | if self.relation_prefix: |
645 | 141 | password_setting = self.relation_prefix + '_password' | 172 | password_setting = self.relation_prefix + '_password' |
646 | @@ -243,23 +274,31 @@ | |||
647 | 243 | 274 | ||
648 | 244 | 275 | ||
649 | 245 | class AMQPContext(OSContextGenerator): | 276 | class AMQPContext(OSContextGenerator): |
650 | 246 | interfaces = ['amqp'] | ||
651 | 247 | 277 | ||
653 | 248 | def __init__(self, ssl_dir=None): | 278 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
654 | 249 | self.ssl_dir = ssl_dir | 279 | self.ssl_dir = ssl_dir |
655 | 280 | self.rel_name = rel_name | ||
656 | 281 | self.relation_prefix = relation_prefix | ||
657 | 282 | self.interfaces = [rel_name] | ||
658 | 250 | 283 | ||
659 | 251 | def __call__(self): | 284 | def __call__(self): |
660 | 252 | log('Generating template context for amqp') | 285 | log('Generating template context for amqp') |
661 | 253 | conf = config() | 286 | conf = config() |
662 | 287 | user_setting = 'rabbit-user' | ||
663 | 288 | vhost_setting = 'rabbit-vhost' | ||
664 | 289 | if self.relation_prefix: | ||
665 | 290 | user_setting = self.relation_prefix + '-rabbit-user' | ||
666 | 291 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | ||
667 | 292 | |||
668 | 254 | try: | 293 | try: |
671 | 255 | username = conf['rabbit-user'] | 294 | username = conf[user_setting] |
672 | 256 | vhost = conf['rabbit-vhost'] | 295 | vhost = conf[vhost_setting] |
673 | 257 | except KeyError as e: | 296 | except KeyError as e: |
674 | 258 | log('Could not generate shared_db context. ' | 297 | log('Could not generate shared_db context. ' |
675 | 259 | 'Missing required charm config options: %s.' % e) | 298 | 'Missing required charm config options: %s.' % e) |
676 | 260 | raise OSContextError | 299 | raise OSContextError |
677 | 261 | ctxt = {} | 300 | ctxt = {} |
679 | 262 | for rid in relation_ids('amqp'): | 301 | for rid in relation_ids(self.rel_name): |
680 | 263 | ha_vip_only = False | 302 | ha_vip_only = False |
681 | 264 | for unit in related_units(rid): | 303 | for unit in related_units(rid): |
682 | 265 | if relation_get('clustered', rid=rid, unit=unit): | 304 | if relation_get('clustered', rid=rid, unit=unit): |
683 | @@ -332,10 +371,12 @@ | |||
684 | 332 | use_syslog = str(config('use-syslog')).lower() | 371 | use_syslog = str(config('use-syslog')).lower() |
685 | 333 | for rid in relation_ids('ceph'): | 372 | for rid in relation_ids('ceph'): |
686 | 334 | for unit in related_units(rid): | 373 | for unit in related_units(rid): |
687 | 335 | mon_hosts.append(relation_get('private-address', rid=rid, | ||
688 | 336 | unit=unit)) | ||
689 | 337 | auth = relation_get('auth', rid=rid, unit=unit) | 374 | auth = relation_get('auth', rid=rid, unit=unit) |
690 | 338 | key = relation_get('key', rid=rid, unit=unit) | 375 | key = relation_get('key', rid=rid, unit=unit) |
691 | 376 | ceph_addr = \ | ||
692 | 377 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | ||
693 | 378 | relation_get('private-address', rid=rid, unit=unit) | ||
694 | 379 | mon_hosts.append(ceph_addr) | ||
695 | 339 | 380 | ||
696 | 340 | ctxt = { | 381 | ctxt = { |
697 | 341 | 'mon_hosts': ' '.join(mon_hosts), | 382 | 'mon_hosts': ' '.join(mon_hosts), |
698 | @@ -369,7 +410,12 @@ | |||
699 | 369 | 410 | ||
700 | 370 | cluster_hosts = {} | 411 | cluster_hosts = {} |
701 | 371 | l_unit = local_unit().replace('/', '-') | 412 | l_unit = local_unit().replace('/', '-') |
703 | 372 | cluster_hosts[l_unit] = unit_get('private-address') | 413 | if config('prefer-ipv6'): |
704 | 414 | addr = get_ipv6_addr() | ||
705 | 415 | else: | ||
706 | 416 | addr = unit_get('private-address') | ||
707 | 417 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | ||
708 | 418 | addr) | ||
709 | 373 | 419 | ||
710 | 374 | for rid in relation_ids('cluster'): | 420 | for rid in relation_ids('cluster'): |
711 | 375 | for unit in related_units(rid): | 421 | for unit in related_units(rid): |
712 | @@ -380,6 +426,21 @@ | |||
713 | 380 | ctxt = { | 426 | ctxt = { |
714 | 381 | 'units': cluster_hosts, | 427 | 'units': cluster_hosts, |
715 | 382 | } | 428 | } |
716 | 429 | |||
717 | 430 | if config('haproxy-server-timeout'): | ||
718 | 431 | ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') | ||
719 | 432 | if config('haproxy-client-timeout'): | ||
720 | 433 | ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') | ||
721 | 434 | |||
722 | 435 | if config('prefer-ipv6'): | ||
723 | 436 | ctxt['local_host'] = 'ip6-localhost' | ||
724 | 437 | ctxt['haproxy_host'] = '::' | ||
725 | 438 | ctxt['stat_port'] = ':::8888' | ||
726 | 439 | else: | ||
727 | 440 | ctxt['local_host'] = '127.0.0.1' | ||
728 | 441 | ctxt['haproxy_host'] = '0.0.0.0' | ||
729 | 442 | ctxt['stat_port'] = ':8888' | ||
730 | 443 | |||
731 | 383 | if len(cluster_hosts.keys()) > 1: | 444 | if len(cluster_hosts.keys()) > 1: |
732 | 384 | # Enable haproxy when we have enough peers. | 445 | # Enable haproxy when we have enough peers. |
733 | 385 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 446 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
734 | @@ -418,12 +479,13 @@ | |||
735 | 418 | """ | 479 | """ |
736 | 419 | Generates a context for an apache vhost configuration that configures | 480 | Generates a context for an apache vhost configuration that configures |
737 | 420 | HTTPS reverse proxying for one or many endpoints. Generated context | 481 | HTTPS reverse proxying for one or many endpoints. Generated context |
744 | 421 | looks something like: | 482 | looks something like:: |
745 | 422 | { | 483 | |
746 | 423 | 'namespace': 'cinder', | 484 | { |
747 | 424 | 'private_address': 'iscsi.mycinderhost.com', | 485 | 'namespace': 'cinder', |
748 | 425 | 'endpoints': [(8776, 8766), (8777, 8767)] | 486 | 'private_address': 'iscsi.mycinderhost.com', |
749 | 426 | } | 487 | 'endpoints': [(8776, 8766), (8777, 8767)] |
750 | 488 | } | ||
751 | 427 | 489 | ||
752 | 428 | The endpoints list consists of a tuples mapping external ports | 490 | The endpoints list consists of a tuples mapping external ports |
753 | 429 | to internal ports. | 491 | to internal ports. |
754 | @@ -439,22 +501,36 @@ | |||
755 | 439 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] | 501 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] |
756 | 440 | check_call(cmd) | 502 | check_call(cmd) |
757 | 441 | 503 | ||
761 | 442 | def configure_cert(self): | 504 | def configure_cert(self, cn=None): |
759 | 443 | if not os.path.isdir('/etc/apache2/ssl'): | ||
760 | 444 | os.mkdir('/etc/apache2/ssl') | ||
762 | 445 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) | 505 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) |
770 | 446 | if not os.path.isdir(ssl_dir): | 506 | mkdir(path=ssl_dir) |
771 | 447 | os.mkdir(ssl_dir) | 507 | cert, key = get_cert(cn) |
772 | 448 | cert, key = get_cert() | 508 | if cn: |
773 | 449 | with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: | 509 | cert_filename = 'cert_{}'.format(cn) |
774 | 450 | cert_out.write(b64decode(cert)) | 510 | key_filename = 'key_{}'.format(cn) |
775 | 451 | with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: | 511 | else: |
776 | 452 | key_out.write(b64decode(key)) | 512 | cert_filename = 'cert' |
777 | 513 | key_filename = 'key' | ||
778 | 514 | write_file(path=os.path.join(ssl_dir, cert_filename), | ||
779 | 515 | content=b64decode(cert)) | ||
780 | 516 | write_file(path=os.path.join(ssl_dir, key_filename), | ||
781 | 517 | content=b64decode(key)) | ||
782 | 518 | |||
783 | 519 | def configure_ca(self): | ||
784 | 453 | ca_cert = get_ca_cert() | 520 | ca_cert = get_ca_cert() |
785 | 454 | if ca_cert: | 521 | if ca_cert: |
789 | 455 | with open(CA_CERT_PATH, 'w') as ca_out: | 522 | install_ca_cert(b64decode(ca_cert)) |
790 | 456 | ca_out.write(b64decode(ca_cert)) | 523 | |
791 | 457 | check_call(['update-ca-certificates']) | 524 | def canonical_names(self): |
792 | 525 | '''Figure out which canonical names clients will access this service''' | ||
793 | 526 | cns = [] | ||
794 | 527 | for r_id in relation_ids('identity-service'): | ||
795 | 528 | for unit in related_units(r_id): | ||
796 | 529 | rdata = relation_get(rid=r_id, unit=unit) | ||
797 | 530 | for k in rdata: | ||
798 | 531 | if k.startswith('ssl_key_'): | ||
799 | 532 | cns.append(k.lstrip('ssl_key_')) | ||
800 | 533 | return list(set(cns)) | ||
801 | 458 | 534 | ||
802 | 459 | def __call__(self): | 535 | def __call__(self): |
803 | 460 | if isinstance(self.external_ports, basestring): | 536 | if isinstance(self.external_ports, basestring): |
804 | @@ -462,21 +538,47 @@ | |||
805 | 462 | if (not self.external_ports or not https()): | 538 | if (not self.external_ports or not https()): |
806 | 463 | return {} | 539 | return {} |
807 | 464 | 540 | ||
809 | 465 | self.configure_cert() | 541 | self.configure_ca() |
810 | 466 | self.enable_modules() | 542 | self.enable_modules() |
811 | 467 | 543 | ||
812 | 468 | ctxt = { | 544 | ctxt = { |
813 | 469 | 'namespace': self.service_namespace, | 545 | 'namespace': self.service_namespace, |
816 | 470 | 'private_address': unit_get('private-address'), | 546 | 'endpoints': [], |
817 | 471 | 'endpoints': [] | 547 | 'ext_ports': [] |
818 | 472 | } | 548 | } |
826 | 473 | if is_clustered(): | 549 | |
827 | 474 | ctxt['private_address'] = config('vip') | 550 | for cn in self.canonical_names(): |
828 | 475 | for api_port in self.external_ports: | 551 | self.configure_cert(cn) |
829 | 476 | ext_port = determine_apache_port(api_port) | 552 | |
830 | 477 | int_port = determine_api_port(api_port) | 553 | addresses = [] |
831 | 478 | portmap = (int(ext_port), int(int_port)) | 554 | vips = [] |
832 | 479 | ctxt['endpoints'].append(portmap) | 555 | if config('vip'): |
833 | 556 | vips = config('vip').split() | ||
834 | 557 | |||
835 | 558 | for network_type in ['os-internal-network', | ||
836 | 559 | 'os-admin-network', | ||
837 | 560 | 'os-public-network']: | ||
838 | 561 | address = get_address_in_network(config(network_type), | ||
839 | 562 | unit_get('private-address')) | ||
840 | 563 | if len(vips) > 0 and is_clustered(): | ||
841 | 564 | for vip in vips: | ||
842 | 565 | if is_address_in_network(config(network_type), | ||
843 | 566 | vip): | ||
844 | 567 | addresses.append((address, vip)) | ||
845 | 568 | break | ||
846 | 569 | elif is_clustered(): | ||
847 | 570 | addresses.append((address, config('vip'))) | ||
848 | 571 | else: | ||
849 | 572 | addresses.append((address, address)) | ||
850 | 573 | |||
851 | 574 | for address, endpoint in set(addresses): | ||
852 | 575 | for api_port in self.external_ports: | ||
853 | 576 | ext_port = determine_apache_port(api_port) | ||
854 | 577 | int_port = determine_api_port(api_port) | ||
855 | 578 | portmap = (address, endpoint, int(ext_port), int(int_port)) | ||
856 | 579 | ctxt['endpoints'].append(portmap) | ||
857 | 580 | ctxt['ext_ports'].append(int(ext_port)) | ||
858 | 581 | ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) | ||
859 | 480 | return ctxt | 582 | return ctxt |
860 | 481 | 583 | ||
861 | 482 | 584 | ||
862 | @@ -541,6 +643,26 @@ | |||
863 | 541 | 643 | ||
864 | 542 | return nvp_ctxt | 644 | return nvp_ctxt |
865 | 543 | 645 | ||
866 | 646 | def n1kv_ctxt(self): | ||
867 | 647 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
868 | 648 | self.network_manager) | ||
869 | 649 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
870 | 650 | self.network_manager) | ||
871 | 651 | n1kv_ctxt = { | ||
872 | 652 | 'core_plugin': driver, | ||
873 | 653 | 'neutron_plugin': 'n1kv', | ||
874 | 654 | 'neutron_security_groups': self.neutron_security_groups, | ||
875 | 655 | 'local_ip': unit_private_ip(), | ||
876 | 656 | 'config': n1kv_config, | ||
877 | 657 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
878 | 658 | 'vsm_username': config('n1kv-vsm-username'), | ||
879 | 659 | 'vsm_password': config('n1kv-vsm-password'), | ||
880 | 660 | 'restrict_policy_profiles': config( | ||
881 | 661 | 'n1kv_restrict_policy_profiles'), | ||
882 | 662 | } | ||
883 | 663 | |||
884 | 664 | return n1kv_ctxt | ||
885 | 665 | |||
886 | 544 | def neutron_ctxt(self): | 666 | def neutron_ctxt(self): |
887 | 545 | if https(): | 667 | if https(): |
888 | 546 | proto = 'https' | 668 | proto = 'https' |
889 | @@ -570,8 +692,10 @@ | |||
890 | 570 | 692 | ||
891 | 571 | if self.plugin == 'ovs': | 693 | if self.plugin == 'ovs': |
892 | 572 | ctxt.update(self.ovs_ctxt()) | 694 | ctxt.update(self.ovs_ctxt()) |
894 | 573 | elif self.plugin == 'nvp': | 695 | elif self.plugin in ['nvp', 'nsx']: |
895 | 574 | ctxt.update(self.nvp_ctxt()) | 696 | ctxt.update(self.nvp_ctxt()) |
896 | 697 | elif self.plugin == 'n1kv': | ||
897 | 698 | ctxt.update(self.n1kv_ctxt()) | ||
898 | 575 | 699 | ||
899 | 576 | alchemy_flags = config('neutron-alchemy-flags') | 700 | alchemy_flags = config('neutron-alchemy-flags') |
900 | 577 | if alchemy_flags: | 701 | if alchemy_flags: |
901 | @@ -611,7 +735,7 @@ | |||
902 | 611 | The subordinate interface allows subordinates to export their | 735 | The subordinate interface allows subordinates to export their |
903 | 612 | configuration requirements to the principle for multiple config | 736 | configuration requirements to the principle for multiple config |
904 | 613 | files and multiple serivces. Ie, a subordinate that has interfaces | 737 | files and multiple serivces. Ie, a subordinate that has interfaces |
906 | 614 | to both glance and nova may export to following yaml blob as json: | 738 | to both glance and nova may export to following yaml blob as json:: |
907 | 615 | 739 | ||
908 | 616 | glance: | 740 | glance: |
909 | 617 | /etc/glance/glance-api.conf: | 741 | /etc/glance/glance-api.conf: |
910 | @@ -630,7 +754,8 @@ | |||
911 | 630 | 754 | ||
912 | 631 | It is then up to the principle charms to subscribe this context to | 755 | It is then up to the principle charms to subscribe this context to |
913 | 632 | the service+config file it is interestd in. Configuration data will | 756 | the service+config file it is interestd in. Configuration data will |
915 | 633 | be available in the template context, in glance's case, as: | 757 | be available in the template context, in glance's case, as:: |
916 | 758 | |||
917 | 634 | ctxt = { | 759 | ctxt = { |
918 | 635 | ... other context ... | 760 | ... other context ... |
919 | 636 | 'subordinate_config': { | 761 | 'subordinate_config': { |
920 | @@ -657,7 +782,7 @@ | |||
921 | 657 | self.interface = interface | 782 | self.interface = interface |
922 | 658 | 783 | ||
923 | 659 | def __call__(self): | 784 | def __call__(self): |
925 | 660 | ctxt = {} | 785 | ctxt = {'sections': {}} |
926 | 661 | for rid in relation_ids(self.interface): | 786 | for rid in relation_ids(self.interface): |
927 | 662 | for unit in related_units(rid): | 787 | for unit in related_units(rid): |
928 | 663 | sub_config = relation_get('subordinate_configuration', | 788 | sub_config = relation_get('subordinate_configuration', |
929 | @@ -683,11 +808,26 @@ | |||
930 | 683 | 808 | ||
931 | 684 | sub_config = sub_config[self.config_file] | 809 | sub_config = sub_config[self.config_file] |
932 | 685 | for k, v in sub_config.iteritems(): | 810 | for k, v in sub_config.iteritems(): |
938 | 686 | ctxt[k] = v | 811 | if k == 'sections': |
939 | 687 | 812 | for section, config_dict in v.iteritems(): | |
940 | 688 | if not ctxt: | 813 | log("adding section '%s'" % (section)) |
941 | 689 | ctxt['sections'] = {} | 814 | ctxt[k][section] = config_dict |
942 | 690 | 815 | else: | |
943 | 816 | ctxt[k] = v | ||
944 | 817 | |||
945 | 818 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | ||
946 | 819 | |||
947 | 820 | return ctxt | ||
948 | 821 | |||
949 | 822 | |||
950 | 823 | class LogLevelContext(OSContextGenerator): | ||
951 | 824 | |||
952 | 825 | def __call__(self): | ||
953 | 826 | ctxt = {} | ||
954 | 827 | ctxt['debug'] = \ | ||
955 | 828 | False if config('debug') is None else config('debug') | ||
956 | 829 | ctxt['verbose'] = \ | ||
957 | 830 | False if config('verbose') is None else config('verbose') | ||
958 | 691 | return ctxt | 831 | return ctxt |
959 | 692 | 832 | ||
960 | 693 | 833 | ||
961 | 694 | 834 | ||
962 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
963 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 | |||
964 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-09-26 08:15:24 +0000 | |||
965 | @@ -0,0 +1,79 @@ | |||
966 | 1 | from charmhelpers.core.hookenv import ( | ||
967 | 2 | config, | ||
968 | 3 | unit_get, | ||
969 | 4 | ) | ||
970 | 5 | |||
971 | 6 | from charmhelpers.contrib.network.ip import ( | ||
972 | 7 | get_address_in_network, | ||
973 | 8 | is_address_in_network, | ||
974 | 9 | is_ipv6, | ||
975 | 10 | get_ipv6_addr, | ||
976 | 11 | ) | ||
977 | 12 | |||
978 | 13 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | ||
979 | 14 | |||
980 | 15 | PUBLIC = 'public' | ||
981 | 16 | INTERNAL = 'int' | ||
982 | 17 | ADMIN = 'admin' | ||
983 | 18 | |||
984 | 19 | _address_map = { | ||
985 | 20 | PUBLIC: { | ||
986 | 21 | 'config': 'os-public-network', | ||
987 | 22 | 'fallback': 'public-address' | ||
988 | 23 | }, | ||
989 | 24 | INTERNAL: { | ||
990 | 25 | 'config': 'os-internal-network', | ||
991 | 26 | 'fallback': 'private-address' | ||
992 | 27 | }, | ||
993 | 28 | ADMIN: { | ||
994 | 29 | 'config': 'os-admin-network', | ||
995 | 30 | 'fallback': 'private-address' | ||
996 | 31 | } | ||
997 | 32 | } | ||
998 | 33 | |||
999 | 34 | |||
1000 | 35 | def canonical_url(configs, endpoint_type=PUBLIC): | ||
1001 | 36 | ''' | ||
1002 | 37 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
1003 | 38 | configuration, hacluster and charm configuration. | ||
1004 | 39 | |||
1005 | 40 | :configs OSTemplateRenderer: A config tempating object to inspect for | ||
1006 | 41 | a complete https context. | ||
1007 | 42 | :endpoint_type str: The endpoint type to resolve. | ||
1008 | 43 | |||
1009 | 44 | :returns str: Base URL for services on the current service unit. | ||
1010 | 45 | ''' | ||
1011 | 46 | scheme = 'http' | ||
1012 | 47 | if 'https' in configs.complete_contexts(): | ||
1013 | 48 | scheme = 'https' | ||
1014 | 49 | address = resolve_address(endpoint_type) | ||
1015 | 50 | if is_ipv6(address): | ||
1016 | 51 | address = "[{}]".format(address) | ||
1017 | 52 | return '%s://%s' % (scheme, address) | ||
1018 | 53 | |||
1019 | 54 | |||
1020 | 55 | def resolve_address(endpoint_type=PUBLIC): | ||
1021 | 56 | resolved_address = None | ||
1022 | 57 | if is_clustered(): | ||
1023 | 58 | if config(_address_map[endpoint_type]['config']) is None: | ||
1024 | 59 | # Assume vip is simple and pass back directly | ||
1025 | 60 | resolved_address = config('vip') | ||
1026 | 61 | else: | ||
1027 | 62 | for vip in config('vip').split(): | ||
1028 | 63 | if is_address_in_network( | ||
1029 | 64 | config(_address_map[endpoint_type]['config']), | ||
1030 | 65 | vip): | ||
1031 | 66 | resolved_address = vip | ||
1032 | 67 | else: | ||
1033 | 68 | if config('prefer-ipv6'): | ||
1034 | 69 | fallback_addr = get_ipv6_addr() | ||
1035 | 70 | else: | ||
1036 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | ||
1037 | 72 | resolved_address = get_address_in_network( | ||
1038 | 73 | config(_address_map[endpoint_type]['config']), fallback_addr) | ||
1039 | 74 | |||
1040 | 75 | if resolved_address is None: | ||
1041 | 76 | raise ValueError('Unable to resolve a suitable IP address' | ||
1042 | 77 | ' based on charm state and configuration') | ||
1043 | 78 | else: | ||
1044 | 79 | return resolved_address | ||
1045 | 0 | 80 | ||
1046 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
1047 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-03-27 12:33:12 +0000 | |||
1048 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-09-26 08:15:24 +0000 | |||
1049 | @@ -114,14 +114,44 @@ | |||
1050 | 114 | 'server_packages': ['neutron-server', | 114 | 'server_packages': ['neutron-server', |
1051 | 115 | 'neutron-plugin-nicira'], | 115 | 'neutron-plugin-nicira'], |
1052 | 116 | 'server_services': ['neutron-server'] | 116 | 'server_services': ['neutron-server'] |
1053 | 117 | }, | ||
1054 | 118 | 'nsx': { | ||
1055 | 119 | 'config': '/etc/neutron/plugins/vmware/nsx.ini', | ||
1056 | 120 | 'driver': 'vmware', | ||
1057 | 121 | 'contexts': [ | ||
1058 | 122 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1059 | 123 | database=config('neutron-database'), | ||
1060 | 124 | relation_prefix='neutron', | ||
1061 | 125 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1062 | 126 | 'services': [], | ||
1063 | 127 | 'packages': [], | ||
1064 | 128 | 'server_packages': ['neutron-server', | ||
1065 | 129 | 'neutron-plugin-vmware'], | ||
1066 | 130 | 'server_services': ['neutron-server'] | ||
1067 | 131 | }, | ||
1068 | 132 | 'n1kv': { | ||
1069 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
1070 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
1071 | 135 | 'contexts': [ | ||
1072 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1073 | 137 | database=config('neutron-database'), | ||
1074 | 138 | relation_prefix='neutron', | ||
1075 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1076 | 140 | 'services': [], | ||
1077 | 141 | 'packages': [['neutron-plugin-cisco']], | ||
1078 | 142 | 'server_packages': ['neutron-server', | ||
1079 | 143 | 'neutron-plugin-cisco'], | ||
1080 | 144 | 'server_services': ['neutron-server'] | ||
1081 | 117 | } | 145 | } |
1082 | 118 | } | 146 | } |
1083 | 119 | # NOTE: patch in ml2 plugin for icehouse onwards | ||
1084 | 120 | if release >= 'icehouse': | 147 | if release >= 'icehouse': |
1085 | 148 | # NOTE: patch in ml2 plugin for icehouse onwards | ||
1086 | 121 | plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' | 149 | plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' |
1087 | 122 | plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' | 150 | plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' |
1088 | 123 | plugins['ovs']['server_packages'] = ['neutron-server', | 151 | plugins['ovs']['server_packages'] = ['neutron-server', |
1089 | 124 | 'neutron-plugin-ml2'] | 152 | 'neutron-plugin-ml2'] |
1090 | 153 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | ||
1091 | 154 | plugins['nvp'] = plugins['nsx'] | ||
1092 | 125 | return plugins | 155 | return plugins |
1093 | 126 | 156 | ||
1094 | 127 | 157 | ||
1095 | 128 | 158 | ||
1096 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
1097 | --- hooks/charmhelpers/contrib/openstack/templating.py 2014-03-05 12:57:20 +0000 | |||
1098 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-09-26 08:15:24 +0000 | |||
1099 | @@ -30,17 +30,17 @@ | |||
1100 | 30 | loading dir. | 30 | loading dir. |
1101 | 31 | 31 | ||
1102 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
1114 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
1115 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
1116 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
1117 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
1118 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
1119 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
1120 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
1121 | 40 | 40 | loader. | |
1122 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
1123 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
1124 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
1125 | 44 | """ | 44 | """ |
1126 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
1127 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
1128 | @@ -111,7 +111,8 @@ | |||
1129 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
1130 | 112 | releases. | 112 | releases. |
1131 | 113 | 113 | ||
1133 | 114 | Basic usage: | 114 | Basic usage:: |
1134 | 115 | |||
1135 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
1136 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
1137 | 117 | 118 | ||
1138 | @@ -131,21 +132,19 @@ | |||
1139 | 131 | # write out all registered configs | 132 | # write out all registered configs |
1140 | 132 | configs.write_all() | 133 | configs.write_all() |
1141 | 133 | 134 | ||
1143 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
1144 | 135 | 136 | ||
1145 | 136 | OpenStack Releases and template loading | ||
1146 | 137 | --------------------------------------- | ||
1147 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
1148 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
1149 | 140 | 139 | ||
1150 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
1151 | 142 | in the following order: | 141 | in the following order: |
1158 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
1159 | 144 | - the base templates_dir | 143 | - the base templates_dir |
1160 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
1161 | 146 | 145 | ||
1162 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
1163 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
1164 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
1165 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
1166 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
1167 | @@ -169,8 +168,8 @@ | |||
1168 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
1169 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
1170 | 171 | 170 | ||
1173 | 172 | Context generators | 171 | **Context generators** |
1174 | 173 | --------------------------------------- | 172 | |
1175 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
1176 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
1177 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
1178 | 177 | 176 | ||
1179 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
1180 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-04-10 16:56:26 +0000 | |||
1181 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-09-26 08:15:24 +0000 | |||
1182 | @@ -3,7 +3,6 @@ | |||
1183 | 3 | # Common python helper functions used for OpenStack charms. | 3 | # Common python helper functions used for OpenStack charms. |
1184 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1185 | 5 | 5 | ||
1186 | 6 | import apt_pkg as apt | ||
1187 | 7 | import subprocess | 6 | import subprocess |
1188 | 8 | import os | 7 | import os |
1189 | 9 | import socket | 8 | import socket |
1190 | @@ -24,7 +23,7 @@ | |||
1191 | 24 | ) | 23 | ) |
1192 | 25 | 24 | ||
1193 | 26 | from charmhelpers.core.host import lsb_release, mounts, umount | 25 | from charmhelpers.core.host import lsb_release, mounts, umount |
1195 | 27 | from charmhelpers.fetch import apt_install | 26 | from charmhelpers.fetch import apt_install, apt_cache |
1196 | 28 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | 27 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
1197 | 29 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | 28 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
1198 | 30 | 29 | ||
1199 | @@ -41,7 +40,8 @@ | |||
1200 | 41 | ('quantal', 'folsom'), | 40 | ('quantal', 'folsom'), |
1201 | 42 | ('raring', 'grizzly'), | 41 | ('raring', 'grizzly'), |
1202 | 43 | ('saucy', 'havana'), | 42 | ('saucy', 'havana'), |
1204 | 44 | ('trusty', 'icehouse') | 43 | ('trusty', 'icehouse'), |
1205 | 44 | ('utopic', 'juno'), | ||
1206 | 45 | ]) | 45 | ]) |
1207 | 46 | 46 | ||
1208 | 47 | 47 | ||
1209 | @@ -52,6 +52,7 @@ | |||
1210 | 52 | ('2013.1', 'grizzly'), | 52 | ('2013.1', 'grizzly'), |
1211 | 53 | ('2013.2', 'havana'), | 53 | ('2013.2', 'havana'), |
1212 | 54 | ('2014.1', 'icehouse'), | 54 | ('2014.1', 'icehouse'), |
1213 | 55 | ('2014.2', 'juno'), | ||
1214 | 55 | ]) | 56 | ]) |
1215 | 56 | 57 | ||
1216 | 57 | # The ugly duckling | 58 | # The ugly duckling |
1217 | @@ -69,6 +70,7 @@ | |||
1218 | 69 | ('1.13.0', 'icehouse'), | 70 | ('1.13.0', 'icehouse'), |
1219 | 70 | ('1.12.0', 'icehouse'), | 71 | ('1.12.0', 'icehouse'), |
1220 | 71 | ('1.11.0', 'icehouse'), | 72 | ('1.11.0', 'icehouse'), |
1221 | 73 | ('2.0.0', 'juno'), | ||
1222 | 72 | ]) | 74 | ]) |
1223 | 73 | 75 | ||
1224 | 74 | DEFAULT_LOOPBACK_SIZE = '5G' | 76 | DEFAULT_LOOPBACK_SIZE = '5G' |
1225 | @@ -83,6 +85,8 @@ | |||
1226 | 83 | '''Derive OpenStack release codename from a given installation source.''' | 85 | '''Derive OpenStack release codename from a given installation source.''' |
1227 | 84 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 86 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
1228 | 85 | rel = '' | 87 | rel = '' |
1229 | 88 | if src is None: | ||
1230 | 89 | return rel | ||
1231 | 86 | if src in ['distro', 'distro-proposed']: | 90 | if src in ['distro', 'distro-proposed']: |
1232 | 87 | try: | 91 | try: |
1233 | 88 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 92 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
1234 | @@ -130,8 +134,9 @@ | |||
1235 | 130 | 134 | ||
1236 | 131 | def get_os_codename_package(package, fatal=True): | 135 | def get_os_codename_package(package, fatal=True): |
1237 | 132 | '''Derive OpenStack release codename from an installed package.''' | 136 | '''Derive OpenStack release codename from an installed package.''' |
1240 | 133 | apt.init() | 137 | import apt_pkg as apt |
1241 | 134 | cache = apt.Cache() | 138 | |
1242 | 139 | cache = apt_cache() | ||
1243 | 135 | 140 | ||
1244 | 136 | try: | 141 | try: |
1245 | 137 | pkg = cache[package] | 142 | pkg = cache[package] |
1246 | @@ -182,8 +187,8 @@ | |||
1247 | 182 | for version, cname in vers_map.iteritems(): | 187 | for version, cname in vers_map.iteritems(): |
1248 | 183 | if cname == codename: | 188 | if cname == codename: |
1249 | 184 | return version | 189 | return version |
1252 | 185 | #e = "Could not determine OpenStack version for package: %s" % pkg | 190 | # e = "Could not determine OpenStack version for package: %s" % pkg |
1253 | 186 | #error_out(e) | 191 | # error_out(e) |
1254 | 187 | 192 | ||
1255 | 188 | 193 | ||
1256 | 189 | os_rel = None | 194 | os_rel = None |
1257 | @@ -268,6 +273,9 @@ | |||
1258 | 268 | 'icehouse': 'precise-updates/icehouse', | 273 | 'icehouse': 'precise-updates/icehouse', |
1259 | 269 | 'icehouse/updates': 'precise-updates/icehouse', | 274 | 'icehouse/updates': 'precise-updates/icehouse', |
1260 | 270 | 'icehouse/proposed': 'precise-proposed/icehouse', | 275 | 'icehouse/proposed': 'precise-proposed/icehouse', |
1261 | 276 | 'juno': 'trusty-updates/juno', | ||
1262 | 277 | 'juno/updates': 'trusty-updates/juno', | ||
1263 | 278 | 'juno/proposed': 'trusty-proposed/juno', | ||
1264 | 271 | } | 279 | } |
1265 | 272 | 280 | ||
1266 | 273 | try: | 281 | try: |
1267 | @@ -315,6 +323,7 @@ | |||
1268 | 315 | 323 | ||
1269 | 316 | """ | 324 | """ |
1270 | 317 | 325 | ||
1271 | 326 | import apt_pkg as apt | ||
1272 | 318 | src = config('openstack-origin') | 327 | src = config('openstack-origin') |
1273 | 319 | cur_vers = get_os_version_package(package) | 328 | cur_vers = get_os_version_package(package) |
1274 | 320 | available_vers = get_os_version_install_source(src) | 329 | available_vers = get_os_version_install_source(src) |
1275 | @@ -401,6 +410,8 @@ | |||
1276 | 401 | rtype = 'PTR' | 410 | rtype = 'PTR' |
1277 | 402 | elif isinstance(address, basestring): | 411 | elif isinstance(address, basestring): |
1278 | 403 | rtype = 'A' | 412 | rtype = 'A' |
1279 | 413 | else: | ||
1280 | 414 | return None | ||
1281 | 404 | 415 | ||
1282 | 405 | answers = dns.resolver.query(address, rtype) | 416 | answers = dns.resolver.query(address, rtype) |
1283 | 406 | if answers: | 417 | if answers: |
1284 | 407 | 418 | ||
1285 | === modified file 'hooks/charmhelpers/contrib/peerstorage/__init__.py' | |||
1286 | --- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-03-10 11:38:19 +0000 | |||
1287 | +++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-09-26 08:15:24 +0000 | |||
1288 | @@ -1,44 +1,44 @@ | |||
1289 | 1 | from charmhelpers.core.hookenv import relation_id as current_relation_id | ||
1290 | 1 | from charmhelpers.core.hookenv import ( | 2 | from charmhelpers.core.hookenv import ( |
1291 | 3 | is_relation_made, | ||
1292 | 2 | relation_ids, | 4 | relation_ids, |
1293 | 3 | relation_get, | 5 | relation_get, |
1294 | 4 | local_unit, | 6 | local_unit, |
1295 | 5 | relation_set, | 7 | relation_set, |
1296 | 6 | ) | 8 | ) |
1297 | 7 | 9 | ||
1298 | 10 | |||
1299 | 8 | """ | 11 | """ |
1300 | 9 | This helper provides functions to support use of a peer relation | 12 | This helper provides functions to support use of a peer relation |
1301 | 10 | for basic key/value storage, with the added benefit that all storage | 13 | for basic key/value storage, with the added benefit that all storage |
1326 | 11 | can be replicated across peer units, so this is really useful for | 14 | can be replicated across peer units. |
1327 | 12 | services that issue usernames/passwords to remote services. | 15 | |
1328 | 13 | 16 | Requirement to use: | |
1329 | 14 | def shared_db_changed() | 17 | |
1330 | 15 | # Only the lead unit should create passwords | 18 | To use this, the "peer_echo()" method has to be called form the peer |
1331 | 16 | if not is_leader(): | 19 | relation's relation-changed hook: |
1332 | 17 | return | 20 | |
1333 | 18 | username = relation_get('username') | 21 | @hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name |
1334 | 19 | key = '{}.password'.format(username) | 22 | def cluster_relation_changed(): |
1311 | 20 | # Attempt to retrieve any existing password for this user | ||
1312 | 21 | password = peer_retrieve(key) | ||
1313 | 22 | if password is None: | ||
1314 | 23 | # New user, create password and store | ||
1315 | 24 | password = pwgen(length=64) | ||
1316 | 25 | peer_store(key, password) | ||
1317 | 26 | create_access(username, password) | ||
1318 | 27 | relation_set(password=password) | ||
1319 | 28 | |||
1320 | 29 | |||
1321 | 30 | def cluster_changed() | ||
1322 | 31 | # Echo any relation data other that *-address | ||
1323 | 32 | # back onto the peer relation so all units have | ||
1324 | 33 | # all *.password keys stored on their local relation | ||
1325 | 34 | # for later retrieval. | ||
1335 | 35 | peer_echo() | 23 | peer_echo() |
1336 | 36 | 24 | ||
1337 | 25 | Once this is done, you can use peer storage from anywhere: | ||
1338 | 26 | |||
1339 | 27 | @hooks.hook("some-hook") | ||
1340 | 28 | def some_hook(): | ||
1341 | 29 | # You can store and retrieve key/values this way: | ||
1342 | 30 | if is_relation_made("cluster"): # from charmhelpers.core.hookenv | ||
1343 | 31 | # There are peers available so we can work with peer storage | ||
1344 | 32 | peer_store("mykey", "myvalue") | ||
1345 | 33 | value = peer_retrieve("mykey") | ||
1346 | 34 | print value | ||
1347 | 35 | else: | ||
1348 | 36 | print "No peers joind the relation, cannot share key/values :(" | ||
1349 | 37 | """ | 37 | """ |
1350 | 38 | 38 | ||
1351 | 39 | 39 | ||
1352 | 40 | def peer_retrieve(key, relation_name='cluster'): | 40 | def peer_retrieve(key, relation_name='cluster'): |
1354 | 41 | """ Retrieve a named key from peer relation relation_name """ | 41 | """Retrieve a named key from peer relation `relation_name`.""" |
1355 | 42 | cluster_rels = relation_ids(relation_name) | 42 | cluster_rels = relation_ids(relation_name) |
1356 | 43 | if len(cluster_rels) > 0: | 43 | if len(cluster_rels) > 0: |
1357 | 44 | cluster_rid = cluster_rels[0] | 44 | cluster_rid = cluster_rels[0] |
1358 | @@ -49,8 +49,26 @@ | |||
1359 | 49 | 'peer relation {}'.format(relation_name)) | 49 | 'peer relation {}'.format(relation_name)) |
1360 | 50 | 50 | ||
1361 | 51 | 51 | ||
1362 | 52 | def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_', | ||
1363 | 53 | inc_list=None, exc_list=None): | ||
1364 | 54 | """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """ | ||
1365 | 55 | inc_list = inc_list if inc_list else [] | ||
1366 | 56 | exc_list = exc_list if exc_list else [] | ||
1367 | 57 | peerdb_settings = peer_retrieve('-', relation_name=relation_name) | ||
1368 | 58 | matched = {} | ||
1369 | 59 | for k, v in peerdb_settings.items(): | ||
1370 | 60 | full_prefix = prefix + delimiter | ||
1371 | 61 | if k.startswith(full_prefix): | ||
1372 | 62 | new_key = k.replace(full_prefix, '') | ||
1373 | 63 | if new_key in exc_list: | ||
1374 | 64 | continue | ||
1375 | 65 | if new_key in inc_list or len(inc_list) == 0: | ||
1376 | 66 | matched[new_key] = v | ||
1377 | 67 | return matched | ||
1378 | 68 | |||
1379 | 69 | |||
1380 | 52 | def peer_store(key, value, relation_name='cluster'): | 70 | def peer_store(key, value, relation_name='cluster'): |
1382 | 53 | """ Store the key/value pair on the named peer relation relation_name """ | 71 | """Store the key/value pair on the named peer relation `relation_name`.""" |
1383 | 54 | cluster_rels = relation_ids(relation_name) | 72 | cluster_rels = relation_ids(relation_name) |
1384 | 55 | if len(cluster_rels) > 0: | 73 | if len(cluster_rels) > 0: |
1385 | 56 | cluster_rid = cluster_rels[0] | 74 | cluster_rid = cluster_rels[0] |
1386 | @@ -62,10 +80,10 @@ | |||
1387 | 62 | 80 | ||
1388 | 63 | 81 | ||
1389 | 64 | def peer_echo(includes=None): | 82 | def peer_echo(includes=None): |
1391 | 65 | """Echo filtered attributes back onto the same relation for storage | 83 | """Echo filtered attributes back onto the same relation for storage. |
1392 | 66 | 84 | ||
1395 | 67 | Note that this helper must only be called within a peer relation | 85 | This is a requirement to use the peerstorage module - it needs to be called |
1396 | 68 | changed hook | 86 | from the peer relation's changed hook. |
1397 | 69 | """ | 87 | """ |
1398 | 70 | rdata = relation_get() | 88 | rdata = relation_get() |
1399 | 71 | echo_data = {} | 89 | echo_data = {} |
1400 | @@ -81,3 +99,33 @@ | |||
1401 | 81 | echo_data[attribute] = value | 99 | echo_data[attribute] = value |
1402 | 82 | if len(echo_data) > 0: | 100 | if len(echo_data) > 0: |
1403 | 83 | relation_set(relation_settings=echo_data) | 101 | relation_set(relation_settings=echo_data) |
1404 | 102 | |||
1405 | 103 | |||
1406 | 104 | def peer_store_and_set(relation_id=None, peer_relation_name='cluster', | ||
1407 | 105 | peer_store_fatal=False, relation_settings=None, | ||
1408 | 106 | delimiter='_', **kwargs): | ||
1409 | 107 | """Store passed-in arguments both in argument relation and in peer storage. | ||
1410 | 108 | |||
1411 | 109 | It functions like doing relation_set() and peer_store() at the same time, | ||
1412 | 110 | with the same data. | ||
1413 | 111 | |||
1414 | 112 | @param relation_id: the id of the relation to store the data on. Defaults | ||
1415 | 113 | to the current relation. | ||
1416 | 114 | @param peer_store_fatal: Set to True, the function will raise an exception | ||
1417 | 115 | should the peer sotrage not be avialable.""" | ||
1418 | 116 | |||
1419 | 117 | relation_settings = relation_settings if relation_settings else {} | ||
1420 | 118 | relation_set(relation_id=relation_id, | ||
1421 | 119 | relation_settings=relation_settings, | ||
1422 | 120 | **kwargs) | ||
1423 | 121 | if is_relation_made(peer_relation_name): | ||
1424 | 122 | for key, value in dict(kwargs.items() + | ||
1425 | 123 | relation_settings.items()).iteritems(): | ||
1426 | 124 | key_prefix = relation_id or current_relation_id() | ||
1427 | 125 | peer_store(key_prefix + delimiter + key, | ||
1428 | 126 | value, | ||
1429 | 127 | relation_name=peer_relation_name) | ||
1430 | 128 | else: | ||
1431 | 129 | if peer_store_fatal: | ||
1432 | 130 | raise ValueError('Unable to detect ' | ||
1433 | 131 | 'peer relation {}'.format(peer_relation_name)) | ||
1434 | 84 | 132 | ||
1435 | === modified file 'hooks/charmhelpers/contrib/ssl/service.py' | |||
1436 | --- hooks/charmhelpers/contrib/ssl/service.py 2014-03-05 12:57:20 +0000 | |||
1437 | +++ hooks/charmhelpers/contrib/ssl/service.py 2014-09-26 08:15:24 +0000 | |||
1438 | @@ -127,7 +127,7 @@ | |||
1439 | 127 | return self.get_certificate(common_name) | 127 | return self.get_certificate(common_name) |
1440 | 128 | 128 | ||
1441 | 129 | def get_certificate(self, common_name): | 129 | def get_certificate(self, common_name): |
1443 | 130 | if not common_name in self: | 130 | if common_name not in self: |
1444 | 131 | raise ValueError("No certificate for %s" % common_name) | 131 | raise ValueError("No certificate for %s" % common_name) |
1445 | 132 | key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) | 132 | key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) |
1446 | 133 | crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) | 133 | crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) |
1447 | 134 | 134 | ||
1448 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
1449 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-05 12:57:20 +0000 | |||
1450 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-09-26 08:15:24 +0000 | |||
1451 | @@ -303,7 +303,7 @@ | |||
1452 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
1453 | 304 | """ | 304 | """ |
1454 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
1456 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
1457 | 307 | 307 | ||
1458 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
1459 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
1460 | 310 | 310 | ||
1461 | === modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py' | |||
1462 | --- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-03-05 12:57:20 +0000 | |||
1463 | +++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-09-26 08:15:24 +0000 | |||
1464 | @@ -62,7 +62,7 @@ | |||
1465 | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() |
1466 | 63 | for l in pvd: | 63 | for l in pvd: |
1467 | 64 | if l.strip().startswith('VG Name'): | 64 | if l.strip().startswith('VG Name'): |
1469 | 65 | vg = ' '.join(l.split()).split(' ').pop() | 65 | vg = ' '.join(l.strip().split()[2:]) |
1470 | 66 | return vg | 66 | return vg |
1471 | 67 | 67 | ||
1472 | 68 | 68 | ||
1473 | 69 | 69 | ||
1474 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
1475 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-02 13:03:56 +0000 | |||
1476 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-26 08:15:24 +0000 | |||
1477 | @@ -1,4 +1,5 @@ | |||
1479 | 1 | from os import stat | 1 | import os |
1480 | 2 | import re | ||
1481 | 2 | from stat import S_ISBLK | 3 | from stat import S_ISBLK |
1482 | 3 | 4 | ||
1483 | 4 | from subprocess import ( | 5 | from subprocess import ( |
1484 | @@ -14,7 +15,9 @@ | |||
1485 | 14 | 15 | ||
1486 | 15 | :returns: boolean: True if path is a block device, False if not. | 16 | :returns: boolean: True if path is a block device, False if not. |
1487 | 16 | ''' | 17 | ''' |
1489 | 17 | return S_ISBLK(stat(path).st_mode) | 18 | if not os.path.exists(path): |
1490 | 19 | return False | ||
1491 | 20 | return S_ISBLK(os.stat(path).st_mode) | ||
1492 | 18 | 21 | ||
1493 | 19 | 22 | ||
1494 | 20 | def zap_disk(block_device): | 23 | def zap_disk(block_device): |
1495 | @@ -29,7 +32,22 @@ | |||
1496 | 29 | '--clear', block_device]) | 32 | '--clear', block_device]) |
1497 | 30 | dev_end = check_output(['blockdev', '--getsz', block_device]) | 33 | dev_end = check_output(['blockdev', '--getsz', block_device]) |
1498 | 31 | gpt_end = int(dev_end.split()[0]) - 100 | 34 | gpt_end = int(dev_end.split()[0]) - 100 |
1500 | 32 | check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), | 35 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
1501 | 33 | 'bs=1M', 'count=1']) | 36 | 'bs=1M', 'count=1']) |
1504 | 34 | check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device), | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
1505 | 35 | 'bs=512', 'count=100', 'seek=%s'%(gpt_end)]) | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) |
1506 | 39 | |||
1507 | 40 | |||
1508 | 41 | def is_device_mounted(device): | ||
1509 | 42 | '''Given a device path, return True if that device is mounted, and False | ||
1510 | 43 | if it isn't. | ||
1511 | 44 | |||
1512 | 45 | :param device: str: Full path of the device to check. | ||
1513 | 46 | :returns: boolean: True if the path represents a mounted device, False if | ||
1514 | 47 | it doesn't. | ||
1515 | 48 | ''' | ||
1516 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | ||
1517 | 50 | out = check_output(['mount']) | ||
1518 | 51 | if is_partition: | ||
1519 | 52 | return bool(re.search(device + r"\b", out)) | ||
1520 | 53 | return bool(re.search(device + r"[0-9]+\b", out)) | ||
1521 | 36 | 54 | ||
1522 | === added file 'hooks/charmhelpers/core/fstab.py' | |||
1523 | --- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000 | |||
1524 | +++ hooks/charmhelpers/core/fstab.py 2014-09-26 08:15:24 +0000 | |||
1525 | @@ -0,0 +1,116 @@ | |||
1526 | 1 | #!/usr/bin/env python | ||
1527 | 2 | # -*- coding: utf-8 -*- | ||
1528 | 3 | |||
1529 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
1530 | 5 | |||
1531 | 6 | import os | ||
1532 | 7 | |||
1533 | 8 | |||
1534 | 9 | class Fstab(file): | ||
1535 | 10 | """This class extends file in order to implement a file reader/writer | ||
1536 | 11 | for file `/etc/fstab` | ||
1537 | 12 | """ | ||
1538 | 13 | |||
1539 | 14 | class Entry(object): | ||
1540 | 15 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
1541 | 16 | """ | ||
1542 | 17 | def __init__(self, device, mountpoint, filesystem, | ||
1543 | 18 | options, d=0, p=0): | ||
1544 | 19 | self.device = device | ||
1545 | 20 | self.mountpoint = mountpoint | ||
1546 | 21 | self.filesystem = filesystem | ||
1547 | 22 | |||
1548 | 23 | if not options: | ||
1549 | 24 | options = "defaults" | ||
1550 | 25 | |||
1551 | 26 | self.options = options | ||
1552 | 27 | self.d = d | ||
1553 | 28 | self.p = p | ||
1554 | 29 | |||
1555 | 30 | def __eq__(self, o): | ||
1556 | 31 | return str(self) == str(o) | ||
1557 | 32 | |||
1558 | 33 | def __str__(self): | ||
1559 | 34 | return "{} {} {} {} {} {}".format(self.device, | ||
1560 | 35 | self.mountpoint, | ||
1561 | 36 | self.filesystem, | ||
1562 | 37 | self.options, | ||
1563 | 38 | self.d, | ||
1564 | 39 | self.p) | ||
1565 | 40 | |||
1566 | 41 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
1567 | 42 | |||
1568 | 43 | def __init__(self, path=None): | ||
1569 | 44 | if path: | ||
1570 | 45 | self._path = path | ||
1571 | 46 | else: | ||
1572 | 47 | self._path = self.DEFAULT_PATH | ||
1573 | 48 | file.__init__(self, self._path, 'r+') | ||
1574 | 49 | |||
1575 | 50 | def _hydrate_entry(self, line): | ||
1576 | 51 | # NOTE: use split with no arguments to split on any | ||
1577 | 52 | # whitespace including tabs | ||
1578 | 53 | return Fstab.Entry(*filter( | ||
1579 | 54 | lambda x: x not in ('', None), | ||
1580 | 55 | line.strip("\n").split())) | ||
1581 | 56 | |||
1582 | 57 | @property | ||
1583 | 58 | def entries(self): | ||
1584 | 59 | self.seek(0) | ||
1585 | 60 | for line in self.readlines(): | ||
1586 | 61 | try: | ||
1587 | 62 | if not line.startswith("#"): | ||
1588 | 63 | yield self._hydrate_entry(line) | ||
1589 | 64 | except ValueError: | ||
1590 | 65 | pass | ||
1591 | 66 | |||
1592 | 67 | def get_entry_by_attr(self, attr, value): | ||
1593 | 68 | for entry in self.entries: | ||
1594 | 69 | e_attr = getattr(entry, attr) | ||
1595 | 70 | if e_attr == value: | ||
1596 | 71 | return entry | ||
1597 | 72 | return None | ||
1598 | 73 | |||
1599 | 74 | def add_entry(self, entry): | ||
1600 | 75 | if self.get_entry_by_attr('device', entry.device): | ||
1601 | 76 | return False | ||
1602 | 77 | |||
1603 | 78 | self.write(str(entry) + '\n') | ||
1604 | 79 | self.truncate() | ||
1605 | 80 | return entry | ||
1606 | 81 | |||
1607 | 82 | def remove_entry(self, entry): | ||
1608 | 83 | self.seek(0) | ||
1609 | 84 | |||
1610 | 85 | lines = self.readlines() | ||
1611 | 86 | |||
1612 | 87 | found = False | ||
1613 | 88 | for index, line in enumerate(lines): | ||
1614 | 89 | if not line.startswith("#"): | ||
1615 | 90 | if self._hydrate_entry(line) == entry: | ||
1616 | 91 | found = True | ||
1617 | 92 | break | ||
1618 | 93 | |||
1619 | 94 | if not found: | ||
1620 | 95 | return False | ||
1621 | 96 | |||
1622 | 97 | lines.remove(line) | ||
1623 | 98 | |||
1624 | 99 | self.seek(0) | ||
1625 | 100 | self.write(''.join(lines)) | ||
1626 | 101 | self.truncate() | ||
1627 | 102 | return True | ||
1628 | 103 | |||
1629 | 104 | @classmethod | ||
1630 | 105 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
1631 | 106 | fstab = cls(path=path) | ||
1632 | 107 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
1633 | 108 | if entry: | ||
1634 | 109 | return fstab.remove_entry(entry) | ||
1635 | 110 | return False | ||
1636 | 111 | |||
1637 | 112 | @classmethod | ||
1638 | 113 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
1639 | 114 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
1640 | 115 | mountpoint, filesystem, | ||
1641 | 116 | options=options)) | ||
1642 | 0 | 117 | ||
1643 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1644 | --- hooks/charmhelpers/core/hookenv.py 2014-03-05 12:57:20 +0000 | |||
1645 | +++ hooks/charmhelpers/core/hookenv.py 2014-09-26 08:15:24 +0000 | |||
1646 | @@ -25,7 +25,7 @@ | |||
1647 | 25 | def cached(func): | 25 | def cached(func): |
1648 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
1649 | 27 | 27 | ||
1651 | 28 | For example: | 28 | For example:: |
1652 | 29 | 29 | ||
1653 | 30 | @cached | 30 | @cached |
1654 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
1655 | @@ -155,6 +155,121 @@ | |||
1656 | 155 | return os.path.basename(sys.argv[0]) | 155 | return os.path.basename(sys.argv[0]) |
1657 | 156 | 156 | ||
1658 | 157 | 157 | ||
1659 | 158 | class Config(dict): | ||
1660 | 159 | """A dictionary representation of the charm's config.yaml, with some | ||
1661 | 160 | extra features: | ||
1662 | 161 | |||
1663 | 162 | - See which values in the dictionary have changed since the previous hook. | ||
1664 | 163 | - For values that have changed, see what the previous value was. | ||
1665 | 164 | - Store arbitrary data for use in a later hook. | ||
1666 | 165 | |||
1667 | 166 | NOTE: Do not instantiate this object directly - instead call | ||
1668 | 167 | ``hookenv.config()``, which will return an instance of :class:`Config`. | ||
1669 | 168 | |||
1670 | 169 | Example usage:: | ||
1671 | 170 | |||
1672 | 171 | >>> # inside a hook | ||
1673 | 172 | >>> from charmhelpers.core import hookenv | ||
1674 | 173 | >>> config = hookenv.config() | ||
1675 | 174 | >>> config['foo'] | ||
1676 | 175 | 'bar' | ||
1677 | 176 | >>> # store a new key/value for later use | ||
1678 | 177 | >>> config['mykey'] = 'myval' | ||
1679 | 178 | |||
1680 | 179 | |||
1681 | 180 | >>> # user runs `juju set mycharm foo=baz` | ||
1682 | 181 | >>> # now we're inside subsequent config-changed hook | ||
1683 | 182 | >>> config = hookenv.config() | ||
1684 | 183 | >>> config['foo'] | ||
1685 | 184 | 'baz' | ||
1686 | 185 | >>> # test to see if this val has changed since last hook | ||
1687 | 186 | >>> config.changed('foo') | ||
1688 | 187 | True | ||
1689 | 188 | >>> # what was the previous value? | ||
1690 | 189 | >>> config.previous('foo') | ||
1691 | 190 | 'bar' | ||
1692 | 191 | >>> # keys/values that we add are preserved across hooks | ||
1693 | 192 | >>> config['mykey'] | ||
1694 | 193 | 'myval' | ||
1695 | 194 | |||
1696 | 195 | """ | ||
1697 | 196 | CONFIG_FILE_NAME = '.juju-persistent-config' | ||
1698 | 197 | |||
1699 | 198 | def __init__(self, *args, **kw): | ||
1700 | 199 | super(Config, self).__init__(*args, **kw) | ||
1701 | 200 | self.implicit_save = True | ||
1702 | 201 | self._prev_dict = None | ||
1703 | 202 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | ||
1704 | 203 | if os.path.exists(self.path): | ||
1705 | 204 | self.load_previous() | ||
1706 | 205 | |||
1707 | 206 | def __getitem__(self, key): | ||
1708 | 207 | """For regular dict lookups, check the current juju config first, | ||
1709 | 208 | then the previous (saved) copy. This ensures that user-saved values | ||
1710 | 209 | will be returned by a dict lookup. | ||
1711 | 210 | |||
1712 | 211 | """ | ||
1713 | 212 | try: | ||
1714 | 213 | return dict.__getitem__(self, key) | ||
1715 | 214 | except KeyError: | ||
1716 | 215 | return (self._prev_dict or {})[key] | ||
1717 | 216 | |||
1718 | 217 | def load_previous(self, path=None): | ||
1719 | 218 | """Load previous copy of config from disk. | ||
1720 | 219 | |||
1721 | 220 | In normal usage you don't need to call this method directly - it | ||
1722 | 221 | is called automatically at object initialization. | ||
1723 | 222 | |||
1724 | 223 | :param path: | ||
1725 | 224 | |||
1726 | 225 | File path from which to load the previous config. If `None`, | ||
1727 | 226 | config is loaded from the default location. If `path` is | ||
1728 | 227 | specified, subsequent `save()` calls will write to the same | ||
1729 | 228 | path. | ||
1730 | 229 | |||
1731 | 230 | """ | ||
1732 | 231 | self.path = path or self.path | ||
1733 | 232 | with open(self.path) as f: | ||
1734 | 233 | self._prev_dict = json.load(f) | ||
1735 | 234 | |||
1736 | 235 | def changed(self, key): | ||
1737 | 236 | """Return True if the current value for this key is different from | ||
1738 | 237 | the previous value. | ||
1739 | 238 | |||
1740 | 239 | """ | ||
1741 | 240 | if self._prev_dict is None: | ||
1742 | 241 | return True | ||
1743 | 242 | return self.previous(key) != self.get(key) | ||
1744 | 243 | |||
1745 | 244 | def previous(self, key): | ||
1746 | 245 | """Return previous value for this key, or None if there | ||
1747 | 246 | is no previous value. | ||
1748 | 247 | |||
1749 | 248 | """ | ||
1750 | 249 | if self._prev_dict: | ||
1751 | 250 | return self._prev_dict.get(key) | ||
1752 | 251 | return None | ||
1753 | 252 | |||
1754 | 253 | def save(self): | ||
1755 | 254 | """Save this config to disk. | ||
1756 | 255 | |||
1757 | 256 | If the charm is using the :mod:`Services Framework <services.base>` | ||
1758 | 257 | or :meth:'@hook <Hooks.hook>' decorator, this | ||
1759 | 258 | is called automatically at the end of successful hook execution. | ||
1760 | 259 | Otherwise, it should be called directly by user code. | ||
1761 | 260 | |||
1762 | 261 | To disable automatic saves, set ``implicit_save=False`` on this | ||
1763 | 262 | instance. | ||
1764 | 263 | |||
1765 | 264 | """ | ||
1766 | 265 | if self._prev_dict: | ||
1767 | 266 | for k, v in self._prev_dict.iteritems(): | ||
1768 | 267 | if k not in self: | ||
1769 | 268 | self[k] = v | ||
1770 | 269 | with open(self.path, 'w') as f: | ||
1771 | 270 | json.dump(self, f) | ||
1772 | 271 | |||
1773 | 272 | |||
1774 | 158 | @cached | 273 | @cached |
1775 | 159 | def config(scope=None): | 274 | def config(scope=None): |
1776 | 160 | """Juju charm configuration""" | 275 | """Juju charm configuration""" |
1777 | @@ -163,7 +278,10 @@ | |||
1778 | 163 | config_cmd_line.append(scope) | 278 | config_cmd_line.append(scope) |
1779 | 164 | config_cmd_line.append('--format=json') | 279 | config_cmd_line.append('--format=json') |
1780 | 165 | try: | 280 | try: |
1782 | 166 | return json.loads(subprocess.check_output(config_cmd_line)) | 281 | config_data = json.loads(subprocess.check_output(config_cmd_line)) |
1783 | 282 | if scope is not None: | ||
1784 | 283 | return config_data | ||
1785 | 284 | return Config(config_data) | ||
1786 | 167 | except ValueError: | 285 | except ValueError: |
1787 | 168 | return None | 286 | return None |
1788 | 169 | 287 | ||
1789 | @@ -188,8 +306,9 @@ | |||
1790 | 188 | raise | 306 | raise |
1791 | 189 | 307 | ||
1792 | 190 | 308 | ||
1794 | 191 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 309 | def relation_set(relation_id=None, relation_settings=None, **kwargs): |
1795 | 192 | """Set relation information for the current unit""" | 310 | """Set relation information for the current unit""" |
1796 | 311 | relation_settings = relation_settings if relation_settings else {} | ||
1797 | 193 | relation_cmd_line = ['relation-set'] | 312 | relation_cmd_line = ['relation-set'] |
1798 | 194 | if relation_id is not None: | 313 | if relation_id is not None: |
1799 | 195 | relation_cmd_line.extend(('-r', relation_id)) | 314 | relation_cmd_line.extend(('-r', relation_id)) |
1800 | @@ -348,27 +467,29 @@ | |||
1801 | 348 | class Hooks(object): | 467 | class Hooks(object): |
1802 | 349 | """A convenient handler for hook functions. | 468 | """A convenient handler for hook functions. |
1803 | 350 | 469 | ||
1805 | 351 | Example: | 470 | Example:: |
1806 | 471 | |||
1807 | 352 | hooks = Hooks() | 472 | hooks = Hooks() |
1808 | 353 | 473 | ||
1809 | 354 | # register a hook, taking its name from the function name | 474 | # register a hook, taking its name from the function name |
1810 | 355 | @hooks.hook() | 475 | @hooks.hook() |
1811 | 356 | def install(): | 476 | def install(): |
1813 | 357 | ... | 477 | pass # your code here |
1814 | 358 | 478 | ||
1815 | 359 | # register a hook, providing a custom hook name | 479 | # register a hook, providing a custom hook name |
1816 | 360 | @hooks.hook("config-changed") | 480 | @hooks.hook("config-changed") |
1817 | 361 | def config_changed(): | 481 | def config_changed(): |
1819 | 362 | ... | 482 | pass # your code here |
1820 | 363 | 483 | ||
1821 | 364 | if __name__ == "__main__": | 484 | if __name__ == "__main__": |
1822 | 365 | # execute a hook based on the name the program is called by | 485 | # execute a hook based on the name the program is called by |
1823 | 366 | hooks.execute(sys.argv) | 486 | hooks.execute(sys.argv) |
1824 | 367 | """ | 487 | """ |
1825 | 368 | 488 | ||
1827 | 369 | def __init__(self): | 489 | def __init__(self, config_save=True): |
1828 | 370 | super(Hooks, self).__init__() | 490 | super(Hooks, self).__init__() |
1829 | 371 | self._hooks = {} | 491 | self._hooks = {} |
1830 | 492 | self._config_save = config_save | ||
1831 | 372 | 493 | ||
1832 | 373 | def register(self, name, function): | 494 | def register(self, name, function): |
1833 | 374 | """Register a hook""" | 495 | """Register a hook""" |
1834 | @@ -379,6 +500,10 @@ | |||
1835 | 379 | hook_name = os.path.basename(args[0]) | 500 | hook_name = os.path.basename(args[0]) |
1836 | 380 | if hook_name in self._hooks: | 501 | if hook_name in self._hooks: |
1837 | 381 | self._hooks[hook_name]() | 502 | self._hooks[hook_name]() |
1838 | 503 | if self._config_save: | ||
1839 | 504 | cfg = config() | ||
1840 | 505 | if cfg.implicit_save: | ||
1841 | 506 | cfg.save() | ||
1842 | 382 | else: | 507 | else: |
1843 | 383 | raise UnregisteredHookError(hook_name) | 508 | raise UnregisteredHookError(hook_name) |
1844 | 384 | 509 | ||
1845 | 385 | 510 | ||
1846 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1847 | --- hooks/charmhelpers/core/host.py 2014-03-05 12:57:20 +0000 | |||
1848 | +++ hooks/charmhelpers/core/host.py 2014-09-26 08:15:24 +0000 | |||
1849 | @@ -12,10 +12,13 @@ | |||
1850 | 12 | import string | 12 | import string |
1851 | 13 | import subprocess | 13 | import subprocess |
1852 | 14 | import hashlib | 14 | import hashlib |
1853 | 15 | import shutil | ||
1854 | 16 | from contextlib import contextmanager | ||
1855 | 15 | 17 | ||
1856 | 16 | from collections import OrderedDict | 18 | from collections import OrderedDict |
1857 | 17 | 19 | ||
1858 | 18 | from hookenv import log | 20 | from hookenv import log |
1859 | 21 | from fstab import Fstab | ||
1860 | 19 | 22 | ||
1861 | 20 | 23 | ||
1862 | 21 | def service_start(service_name): | 24 | def service_start(service_name): |
1863 | @@ -34,7 +37,8 @@ | |||
1864 | 34 | 37 | ||
1865 | 35 | 38 | ||
1866 | 36 | def service_reload(service_name, restart_on_failure=False): | 39 | def service_reload(service_name, restart_on_failure=False): |
1868 | 37 | """Reload a system service, optionally falling back to restart if reload fails""" | 40 | """Reload a system service, optionally falling back to restart if |
1869 | 41 | reload fails""" | ||
1870 | 38 | service_result = service('reload', service_name) | 42 | service_result = service('reload', service_name) |
1871 | 39 | if not service_result and restart_on_failure: | 43 | if not service_result and restart_on_failure: |
1872 | 40 | service_result = service('restart', service_name) | 44 | service_result = service('restart', service_name) |
1873 | @@ -50,7 +54,7 @@ | |||
1874 | 50 | def service_running(service): | 54 | def service_running(service): |
1875 | 51 | """Determine whether a system service is running""" | 55 | """Determine whether a system service is running""" |
1876 | 52 | try: | 56 | try: |
1878 | 53 | output = subprocess.check_output(['service', service, 'status']) | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) |
1879 | 54 | except subprocess.CalledProcessError: | 58 | except subprocess.CalledProcessError: |
1880 | 55 | return False | 59 | return False |
1881 | 56 | else: | 60 | else: |
1882 | @@ -60,6 +64,16 @@ | |||
1883 | 60 | return False | 64 | return False |
1884 | 61 | 65 | ||
1885 | 62 | 66 | ||
1886 | 67 | def service_available(service_name): | ||
1887 | 68 | """Determine whether a system service is available""" | ||
1888 | 69 | try: | ||
1889 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | ||
1890 | 71 | except subprocess.CalledProcessError as e: | ||
1891 | 72 | return 'unrecognized service' not in e.output | ||
1892 | 73 | else: | ||
1893 | 74 | return True | ||
1894 | 75 | |||
1895 | 76 | |||
1896 | 63 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 77 | def adduser(username, password=None, shell='/bin/bash', system_user=False): |
1897 | 64 | """Add a user to the system""" | 78 | """Add a user to the system""" |
1898 | 65 | try: | 79 | try: |
1899 | @@ -143,7 +157,19 @@ | |||
1900 | 143 | target.write(content) | 157 | target.write(content) |
1901 | 144 | 158 | ||
1902 | 145 | 159 | ||
1904 | 146 | def mount(device, mountpoint, options=None, persist=False): | 160 | def fstab_remove(mp): |
1905 | 161 | """Remove the given mountpoint entry from /etc/fstab | ||
1906 | 162 | """ | ||
1907 | 163 | return Fstab.remove_by_mountpoint(mp) | ||
1908 | 164 | |||
1909 | 165 | |||
1910 | 166 | def fstab_add(dev, mp, fs, options=None): | ||
1911 | 167 | """Adds the given device entry to the /etc/fstab file | ||
1912 | 168 | """ | ||
1913 | 169 | return Fstab.add(dev, mp, fs, options=options) | ||
1914 | 170 | |||
1915 | 171 | |||
1916 | 172 | def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): | ||
1917 | 147 | """Mount a filesystem at a particular mountpoint""" | 173 | """Mount a filesystem at a particular mountpoint""" |
1918 | 148 | cmd_args = ['mount'] | 174 | cmd_args = ['mount'] |
1919 | 149 | if options is not None: | 175 | if options is not None: |
1920 | @@ -154,9 +180,9 @@ | |||
1921 | 154 | except subprocess.CalledProcessError, e: | 180 | except subprocess.CalledProcessError, e: |
1922 | 155 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 181 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
1923 | 156 | return False | 182 | return False |
1924 | 183 | |||
1925 | 157 | if persist: | 184 | if persist: |
1928 | 158 | # TODO: update fstab | 185 | return fstab_add(device, mountpoint, filesystem, options=options) |
1927 | 159 | pass | ||
1929 | 160 | return True | 186 | return True |
1930 | 161 | 187 | ||
1931 | 162 | 188 | ||
1932 | @@ -168,9 +194,9 @@ | |||
1933 | 168 | except subprocess.CalledProcessError, e: | 194 | except subprocess.CalledProcessError, e: |
1934 | 169 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 195 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
1935 | 170 | return False | 196 | return False |
1936 | 197 | |||
1937 | 171 | if persist: | 198 | if persist: |
1940 | 172 | # TODO: update fstab | 199 | return fstab_remove(mountpoint) |
1939 | 173 | pass | ||
1941 | 174 | return True | 200 | return True |
1942 | 175 | 201 | ||
1943 | 176 | 202 | ||
1944 | @@ -183,10 +209,15 @@ | |||
1945 | 183 | return system_mounts | 209 | return system_mounts |
1946 | 184 | 210 | ||
1947 | 185 | 211 | ||
1950 | 186 | def file_hash(path): | 212 | def file_hash(path, hash_type='md5'): |
1951 | 187 | """Generate a md5 hash of the contents of 'path' or None if not found """ | 213 | """ |
1952 | 214 | Generate a hash checksum of the contents of 'path' or None if not found. | ||
1953 | 215 | |||
1954 | 216 | :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, | ||
1955 | 217 | such as md5, sha1, sha256, sha512, etc. | ||
1956 | 218 | """ | ||
1957 | 188 | if os.path.exists(path): | 219 | if os.path.exists(path): |
1959 | 189 | h = hashlib.md5() | 220 | h = getattr(hashlib, hash_type)() |
1960 | 190 | with open(path, 'r') as source: | 221 | with open(path, 'r') as source: |
1961 | 191 | h.update(source.read()) # IGNORE:E1101 - it does have update | 222 | h.update(source.read()) # IGNORE:E1101 - it does have update |
1962 | 192 | return h.hexdigest() | 223 | return h.hexdigest() |
1963 | @@ -194,16 +225,36 @@ | |||
1964 | 194 | return None | 225 | return None |
1965 | 195 | 226 | ||
1966 | 196 | 227 | ||
1967 | 228 | def check_hash(path, checksum, hash_type='md5'): | ||
1968 | 229 | """ | ||
1969 | 230 | Validate a file using a cryptographic checksum. | ||
1970 | 231 | |||
1971 | 232 | :param str checksum: Value of the checksum used to validate the file. | ||
1972 | 233 | :param str hash_type: Hash algorithm used to generate `checksum`. | ||
1973 | 234 | Can be any hash alrgorithm supported by :mod:`hashlib`, | ||
1974 | 235 | such as md5, sha1, sha256, sha512, etc. | ||
1975 | 236 | :raises ChecksumError: If the file fails the checksum | ||
1976 | 237 | |||
1977 | 238 | """ | ||
1978 | 239 | actual_checksum = file_hash(path, hash_type) | ||
1979 | 240 | if checksum != actual_checksum: | ||
1980 | 241 | raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) | ||
1981 | 242 | |||
1982 | 243 | |||
1983 | 244 | class ChecksumError(ValueError): | ||
1984 | 245 | pass | ||
1985 | 246 | |||
1986 | 247 | |||
1987 | 197 | def restart_on_change(restart_map, stopstart=False): | 248 | def restart_on_change(restart_map, stopstart=False): |
1988 | 198 | """Restart services based on configuration files changing | 249 | """Restart services based on configuration files changing |
1989 | 199 | 250 | ||
1991 | 200 | This function is used a decorator, for example | 251 | This function is used a decorator, for example:: |
1992 | 201 | 252 | ||
1993 | 202 | @restart_on_change({ | 253 | @restart_on_change({ |
1994 | 203 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 254 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
1995 | 204 | }) | 255 | }) |
1996 | 205 | def ceph_client_changed(): | 256 | def ceph_client_changed(): |
1998 | 206 | ... | 257 | pass # your code here |
1999 | 207 | 258 | ||
2000 | 208 | In this example, the cinder-api and cinder-volume services | 259 | In this example, the cinder-api and cinder-volume services |
2001 | 209 | would be restarted if /etc/ceph/ceph.conf is changed by the | 260 | would be restarted if /etc/ceph/ceph.conf is changed by the |
2002 | @@ -295,3 +346,40 @@ | |||
2003 | 295 | if 'link/ether' in words: | 346 | if 'link/ether' in words: |
2004 | 296 | hwaddr = words[words.index('link/ether') + 1] | 347 | hwaddr = words[words.index('link/ether') + 1] |
2005 | 297 | return hwaddr | 348 | return hwaddr |
2006 | 349 | |||
2007 | 350 | |||
2008 | 351 | def cmp_pkgrevno(package, revno, pkgcache=None): | ||
2009 | 352 | '''Compare supplied revno with the revno of the installed package | ||
2010 | 353 | |||
2011 | 354 | * 1 => Installed revno is greater than supplied arg | ||
2012 | 355 | * 0 => Installed revno is the same as supplied arg | ||
2013 | 356 | * -1 => Installed revno is less than supplied arg | ||
2014 | 357 | |||
2015 | 358 | ''' | ||
2016 | 359 | import apt_pkg | ||
2017 | 360 | from charmhelpers.fetch import apt_cache | ||
2018 | 361 | if not pkgcache: | ||
2019 | 362 | pkgcache = apt_cache() | ||
2020 | 363 | pkg = pkgcache[package] | ||
2021 | 364 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | ||
2022 | 365 | |||
2023 | 366 | |||
2024 | 367 | @contextmanager | ||
2025 | 368 | def chdir(d): | ||
2026 | 369 | cur = os.getcwd() | ||
2027 | 370 | try: | ||
2028 | 371 | yield os.chdir(d) | ||
2029 | 372 | finally: | ||
2030 | 373 | os.chdir(cur) | ||
2031 | 374 | |||
2032 | 375 | |||
2033 | 376 | def chownr(path, owner, group): | ||
2034 | 377 | uid = pwd.getpwnam(owner).pw_uid | ||
2035 | 378 | gid = grp.getgrnam(group).gr_gid | ||
2036 | 379 | |||
2037 | 380 | for root, dirs, files in os.walk(path): | ||
2038 | 381 | for name in dirs + files: | ||
2039 | 382 | full = os.path.join(root, name) | ||
2040 | 383 | broken_symlink = os.path.lexists(full) and not os.path.exists(full) | ||
2041 | 384 | if not broken_symlink: | ||
2042 | 385 | os.chown(full, uid, gid) | ||
2043 | 298 | 386 | ||
2044 | === added directory 'hooks/charmhelpers/core/services' | |||
2045 | === added file 'hooks/charmhelpers/core/services/__init__.py' | |||
2046 | --- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000 | |||
2047 | +++ hooks/charmhelpers/core/services/__init__.py 2014-09-26 08:15:24 +0000 | |||
2048 | @@ -0,0 +1,2 @@ | |||
2049 | 1 | from .base import * | ||
2050 | 2 | from .helpers import * | ||
2051 | 0 | 3 | ||
2052 | === added file 'hooks/charmhelpers/core/services/base.py' | |||
2053 | --- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000 | |||
2054 | +++ hooks/charmhelpers/core/services/base.py 2014-09-26 08:15:24 +0000 | |||
2055 | @@ -0,0 +1,313 @@ | |||
2056 | 1 | import os | ||
2057 | 2 | import re | ||
2058 | 3 | import json | ||
2059 | 4 | from collections import Iterable | ||
2060 | 5 | |||
2061 | 6 | from charmhelpers.core import host | ||
2062 | 7 | from charmhelpers.core import hookenv | ||
2063 | 8 | |||
2064 | 9 | |||
2065 | 10 | __all__ = ['ServiceManager', 'ManagerCallback', | ||
2066 | 11 | 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', | ||
2067 | 12 | 'service_restart', 'service_stop'] | ||
2068 | 13 | |||
2069 | 14 | |||
2070 | 15 | class ServiceManager(object): | ||
2071 | 16 | def __init__(self, services=None): | ||
2072 | 17 | """ | ||
2073 | 18 | Register a list of services, given their definitions. | ||
2074 | 19 | |||
2075 | 20 | Service definitions are dicts in the following formats (all keys except | ||
2076 | 21 | 'service' are optional):: | ||
2077 | 22 | |||
2078 | 23 | { | ||
2079 | 24 | "service": <service name>, | ||
2080 | 25 | "required_data": <list of required data contexts>, | ||
2081 | 26 | "provided_data": <list of provided data contexts>, | ||
2082 | 27 | "data_ready": <one or more callbacks>, | ||
2083 | 28 | "data_lost": <one or more callbacks>, | ||
2084 | 29 | "start": <one or more callbacks>, | ||
2085 | 30 | "stop": <one or more callbacks>, | ||
2086 | 31 | "ports": <list of ports to manage>, | ||
2087 | 32 | } | ||
2088 | 33 | |||
2089 | 34 | The 'required_data' list should contain dicts of required data (or | ||
2090 | 35 | dependency managers that act like dicts and know how to collect the data). | ||
2091 | 36 | Only when all items in the 'required_data' list are populated are the list | ||
2092 | 37 | of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more | ||
2093 | 38 | information. | ||
2094 | 39 | |||
2095 | 40 | The 'provided_data' list should contain relation data providers, most likely | ||
2096 | 41 | a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, | ||
2097 | 42 | that will indicate a set of data to set on a given relation. | ||
2098 | 43 | |||
2099 | 44 | The 'data_ready' value should be either a single callback, or a list of | ||
2100 | 45 | callbacks, to be called when all items in 'required_data' pass `is_ready()`. | ||
2101 | 46 | Each callback will be called with the service name as the only parameter. | ||
2102 | 47 | After all of the 'data_ready' callbacks are called, the 'start' callbacks | ||
2103 | 48 | are fired. | ||
2104 | 49 | |||
2105 | 50 | The 'data_lost' value should be either a single callback, or a list of | ||
2106 | 51 | callbacks, to be called when a 'required_data' item no longer passes | ||
2107 | 52 | `is_ready()`. Each callback will be called with the service name as the | ||
2108 | 53 | only parameter. After all of the 'data_lost' callbacks are called, | ||
2109 | 54 | the 'stop' callbacks are fired. | ||
2110 | 55 | |||
2111 | 56 | The 'start' value should be either a single callback, or a list of | ||
2112 | 57 | callbacks, to be called when starting the service, after the 'data_ready' | ||
2113 | 58 | callbacks are complete. Each callback will be called with the service | ||
2114 | 59 | name as the only parameter. This defaults to | ||
2115 | 60 | `[host.service_start, services.open_ports]`. | ||
2116 | 61 | |||
2117 | 62 | The 'stop' value should be either a single callback, or a list of | ||
2118 | 63 | callbacks, to be called when stopping the service. If the service is | ||
2119 | 64 | being stopped because it no longer has all of its 'required_data', this | ||
2120 | 65 | will be called after all of the 'data_lost' callbacks are complete. | ||
2121 | 66 | Each callback will be called with the service name as the only parameter. | ||
2122 | 67 | This defaults to `[services.close_ports, host.service_stop]`. | ||
2123 | 68 | |||
2124 | 69 | The 'ports' value should be a list of ports to manage. The default | ||
2125 | 70 | 'start' handler will open the ports after the service is started, | ||
2126 | 71 | and the default 'stop' handler will close the ports prior to stopping | ||
2127 | 72 | the service. | ||
2128 | 73 | |||
2129 | 74 | |||
2130 | 75 | Examples: | ||
2131 | 76 | |||
2132 | 77 | The following registers an Upstart service called bingod that depends on | ||
2133 | 78 | a mongodb relation and which runs a custom `db_migrate` function prior to | ||
2134 | 79 | restarting the service, and a Runit service called spadesd:: | ||
2135 | 80 | |||
2136 | 81 | manager = services.ServiceManager([ | ||
2137 | 82 | { | ||
2138 | 83 | 'service': 'bingod', | ||
2139 | 84 | 'ports': [80, 443], | ||
2140 | 85 | 'required_data': [MongoRelation(), config(), {'my': 'data'}], | ||
2141 | 86 | 'data_ready': [ | ||
2142 | 87 | services.template(source='bingod.conf'), | ||
2143 | 88 | services.template(source='bingod.ini', | ||
2144 | 89 | target='/etc/bingod.ini', | ||
2145 | 90 | owner='bingo', perms=0400), | ||
2146 | 91 | ], | ||
2147 | 92 | }, | ||
2148 | 93 | { | ||
2149 | 94 | 'service': 'spadesd', | ||
2150 | 95 | 'data_ready': services.template(source='spadesd_run.j2', | ||
2151 | 96 | target='/etc/sv/spadesd/run', | ||
2152 | 97 | perms=0555), | ||
2153 | 98 | 'start': runit_start, | ||
2154 | 99 | 'stop': runit_stop, | ||
2155 | 100 | }, | ||
2156 | 101 | ]) | ||
2157 | 102 | manager.manage() | ||
2158 | 103 | """ | ||
2159 | 104 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | ||
2160 | 105 | self._ready = None | ||
2161 | 106 | self.services = {} | ||
2162 | 107 | for service in services or []: | ||
2163 | 108 | service_name = service['service'] | ||
2164 | 109 | self.services[service_name] = service | ||
2165 | 110 | |||
2166 | 111 | def manage(self): | ||
2167 | 112 | """ | ||
2168 | 113 | Handle the current hook by doing The Right Thing with the registered services. | ||
2169 | 114 | """ | ||
2170 | 115 | hook_name = hookenv.hook_name() | ||
2171 | 116 | if hook_name == 'stop': | ||
2172 | 117 | self.stop_services() | ||
2173 | 118 | else: | ||
2174 | 119 | self.provide_data() | ||
2175 | 120 | self.reconfigure_services() | ||
2176 | 121 | cfg = hookenv.config() | ||
2177 | 122 | if cfg.implicit_save: | ||
2178 | 123 | cfg.save() | ||
2179 | 124 | |||
2180 | 125 | def provide_data(self): | ||
2181 | 126 | """ | ||
2182 | 127 | Set the relation data for each provider in the ``provided_data`` list. | ||
2183 | 128 | |||
2184 | 129 | A provider must have a `name` attribute, which indicates which relation | ||
2185 | 130 | to set data on, and a `provide_data()` method, which returns a dict of | ||
2186 | 131 | data to set. | ||
2187 | 132 | """ | ||
2188 | 133 | hook_name = hookenv.hook_name() | ||
2189 | 134 | for service in self.services.values(): | ||
2190 | 135 | for provider in service.get('provided_data', []): | ||
2191 | 136 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | ||
2192 | 137 | data = provider.provide_data() | ||
2193 | 138 | _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data | ||
2194 | 139 | if _ready: | ||
2195 | 140 | hookenv.relation_set(None, data) | ||
2196 | 141 | |||
2197 | 142 | def reconfigure_services(self, *service_names): | ||
2198 | 143 | """ | ||
2199 | 144 | Update all files for one or more registered services, and, | ||
2200 | 145 | if ready, optionally restart them. | ||
2201 | 146 | |||
2202 | 147 | If no service names are given, reconfigures all registered services. | ||
2203 | 148 | """ | ||
2204 | 149 | for service_name in service_names or self.services.keys(): | ||
2205 | 150 | if self.is_ready(service_name): | ||
2206 | 151 | self.fire_event('data_ready', service_name) | ||
2207 | 152 | self.fire_event('start', service_name, default=[ | ||
2208 | 153 | service_restart, | ||
2209 | 154 | manage_ports]) | ||
2210 | 155 | self.save_ready(service_name) | ||
2211 | 156 | else: | ||
2212 | 157 | if self.was_ready(service_name): | ||
2213 | 158 | self.fire_event('data_lost', service_name) | ||
2214 | 159 | self.fire_event('stop', service_name, default=[ | ||
2215 | 160 | manage_ports, | ||
2216 | 161 | service_stop]) | ||
2217 | 162 | self.save_lost(service_name) | ||
2218 | 163 | |||
2219 | 164 | def stop_services(self, *service_names): | ||
2220 | 165 | """ | ||
2221 | 166 | Stop one or more registered services, by name. | ||
2222 | 167 | |||
2223 | 168 | If no service names are given, stops all registered services. | ||
2224 | 169 | """ | ||
2225 | 170 | for service_name in service_names or self.services.keys(): | ||
2226 | 171 | self.fire_event('stop', service_name, default=[ | ||
2227 | 172 | manage_ports, | ||
2228 | 173 | service_stop]) | ||
2229 | 174 | |||
2230 | 175 | def get_service(self, service_name): | ||
2231 | 176 | """ | ||
2232 | 177 | Given the name of a registered service, return its service definition. | ||
2233 | 178 | """ | ||
2234 | 179 | service = self.services.get(service_name) | ||
2235 | 180 | if not service: | ||
2236 | 181 | raise KeyError('Service not registered: %s' % service_name) | ||
2237 | 182 | return service | ||
2238 | 183 | |||
2239 | 184 | def fire_event(self, event_name, service_name, default=None): | ||
2240 | 185 | """ | ||
2241 | 186 | Fire a data_ready, data_lost, start, or stop event on a given service. | ||
2242 | 187 | """ | ||
2243 | 188 | service = self.get_service(service_name) | ||
2244 | 189 | callbacks = service.get(event_name, default) | ||
2245 | 190 | if not callbacks: | ||
2246 | 191 | return | ||
2247 | 192 | if not isinstance(callbacks, Iterable): | ||
2248 | 193 | callbacks = [callbacks] | ||
2249 | 194 | for callback in callbacks: | ||
2250 | 195 | if isinstance(callback, ManagerCallback): | ||
2251 | 196 | callback(self, service_name, event_name) | ||
2252 | 197 | else: | ||
2253 | 198 | callback(service_name) | ||
2254 | 199 | |||
2255 | 200 | def is_ready(self, service_name): | ||
2256 | 201 | """ | ||
2257 | 202 | Determine if a registered service is ready, by checking its 'required_data'. | ||
2258 | 203 | |||
2259 | 204 | A 'required_data' item can be any mapping type, and is considered ready | ||
2260 | 205 | if `bool(item)` evaluates as True. | ||
2261 | 206 | """ | ||
2262 | 207 | service = self.get_service(service_name) | ||
2263 | 208 | reqs = service.get('required_data', []) | ||
2264 | 209 | return all(bool(req) for req in reqs) | ||
2265 | 210 | |||
2266 | 211 | def _load_ready_file(self): | ||
2267 | 212 | if self._ready is not None: | ||
2268 | 213 | return | ||
2269 | 214 | if os.path.exists(self._ready_file): | ||
2270 | 215 | with open(self._ready_file) as fp: | ||
2271 | 216 | self._ready = set(json.load(fp)) | ||
2272 | 217 | else: | ||
2273 | 218 | self._ready = set() | ||
2274 | 219 | |||
2275 | 220 | def _save_ready_file(self): | ||
2276 | 221 | if self._ready is None: | ||
2277 | 222 | return | ||
2278 | 223 | with open(self._ready_file, 'w') as fp: | ||
2279 | 224 | json.dump(list(self._ready), fp) | ||
2280 | 225 | |||
2281 | 226 | def save_ready(self, service_name): | ||
2282 | 227 | """ | ||
2283 | 228 | Save an indicator that the given service is now data_ready. | ||
2284 | 229 | """ | ||
2285 | 230 | self._load_ready_file() | ||
2286 | 231 | self._ready.add(service_name) | ||
2287 | 232 | self._save_ready_file() | ||
2288 | 233 | |||
2289 | 234 | def save_lost(self, service_name): | ||
2290 | 235 | """ | ||
2291 | 236 | Save an indicator that the given service is no longer data_ready. | ||
2292 | 237 | """ | ||
2293 | 238 | self._load_ready_file() | ||
2294 | 239 | self._ready.discard(service_name) | ||
2295 | 240 | self._save_ready_file() | ||
2296 | 241 | |||
2297 | 242 | def was_ready(self, service_name): | ||
2298 | 243 | """ | ||
2299 | 244 | Determine if the given service was previously data_ready. | ||
2300 | 245 | """ | ||
2301 | 246 | self._load_ready_file() | ||
2302 | 247 | return service_name in self._ready | ||
2303 | 248 | |||
2304 | 249 | |||
2305 | 250 | class ManagerCallback(object): | ||
2306 | 251 | """ | ||
2307 | 252 | Special case of a callback that takes the `ServiceManager` instance | ||
2308 | 253 | in addition to the service name. | ||
2309 | 254 | |||
2310 | 255 | Subclasses should implement `__call__` which should accept three parameters: | ||
2311 | 256 | |||
2312 | 257 | * `manager` The `ServiceManager` instance | ||
2313 | 258 | * `service_name` The name of the service it's being triggered for | ||
2314 | 259 | * `event_name` The name of the event that this callback is handling | ||
2315 | 260 | """ | ||
2316 | 261 | def __call__(self, manager, service_name, event_name): | ||
2317 | 262 | raise NotImplementedError() | ||
2318 | 263 | |||
2319 | 264 | |||
2320 | 265 | class PortManagerCallback(ManagerCallback): | ||
2321 | 266 | """ | ||
2322 | 267 | Callback class that will open or close ports, for use as either | ||
2323 | 268 | a start or stop action. | ||
2324 | 269 | """ | ||
2325 | 270 | def __call__(self, manager, service_name, event_name): | ||
2326 | 271 | service = manager.get_service(service_name) | ||
2327 | 272 | new_ports = service.get('ports', []) | ||
2328 | 273 | port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) | ||
2329 | 274 | if os.path.exists(port_file): | ||
2330 | 275 | with open(port_file) as fp: | ||
2331 | 276 | old_ports = fp.read().split(',') | ||
2332 | 277 | for old_port in old_ports: | ||
2333 | 278 | if bool(old_port): | ||
2334 | 279 | old_port = int(old_port) | ||
2335 | 280 | if old_port not in new_ports: | ||
2336 | 281 | hookenv.close_port(old_port) | ||
2337 | 282 | with open(port_file, 'w') as fp: | ||
2338 | 283 | fp.write(','.join(str(port) for port in new_ports)) | ||
2339 | 284 | for port in new_ports: | ||
2340 | 285 | if event_name == 'start': | ||
2341 | 286 | hookenv.open_port(port) | ||
2342 | 287 | elif event_name == 'stop': | ||
2343 | 288 | hookenv.close_port(port) | ||
2344 | 289 | |||
2345 | 290 | |||
2346 | 291 | def service_stop(service_name): | ||
2347 | 292 | """ | ||
2348 | 293 | Wrapper around host.service_stop to prevent spurious "unknown service" | ||
2349 | 294 | messages in the logs. | ||
2350 | 295 | """ | ||
2351 | 296 | if host.service_running(service_name): | ||
2352 | 297 | host.service_stop(service_name) | ||
2353 | 298 | |||
2354 | 299 | |||
2355 | 300 | def service_restart(service_name): | ||
2356 | 301 | """ | ||
2357 | 302 | Wrapper around host.service_restart to prevent spurious "unknown service" | ||
2358 | 303 | messages in the logs. | ||
2359 | 304 | """ | ||
2360 | 305 | if host.service_available(service_name): | ||
2361 | 306 | if host.service_running(service_name): | ||
2362 | 307 | host.service_restart(service_name) | ||
2363 | 308 | else: | ||
2364 | 309 | host.service_start(service_name) | ||
2365 | 310 | |||
2366 | 311 | |||
2367 | 312 | # Convenience aliases | ||
2368 | 313 | open_ports = close_ports = manage_ports = PortManagerCallback() | ||
2369 | 0 | 314 | ||
2370 | === added file 'hooks/charmhelpers/core/services/helpers.py' | |||
2371 | --- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000 | |||
2372 | +++ hooks/charmhelpers/core/services/helpers.py 2014-09-26 08:15:24 +0000 | |||
2373 | @@ -0,0 +1,239 @@ | |||
2374 | 1 | import os | ||
2375 | 2 | import yaml | ||
2376 | 3 | from charmhelpers.core import hookenv | ||
2377 | 4 | from charmhelpers.core import templating | ||
2378 | 5 | |||
2379 | 6 | from charmhelpers.core.services.base import ManagerCallback | ||
2380 | 7 | |||
2381 | 8 | |||
2382 | 9 | __all__ = ['RelationContext', 'TemplateCallback', | ||
2383 | 10 | 'render_template', 'template'] | ||
2384 | 11 | |||
2385 | 12 | |||
2386 | 13 | class RelationContext(dict): | ||
2387 | 14 | """ | ||
2388 | 15 | Base class for a context generator that gets relation data from juju. | ||
2389 | 16 | |||
2390 | 17 | Subclasses must provide the attributes `name`, which is the name of the | ||
2391 | 18 | interface of interest, `interface`, which is the type of the interface of | ||
2392 | 19 | interest, and `required_keys`, which is the set of keys required for the | ||
2393 | 20 | relation to be considered complete. The data for all interfaces matching | ||
2394 | 21 | the `name` attribute that are complete will used to populate the dictionary | ||
2395 | 22 | values (see `get_data`, below). | ||
2396 | 23 | |||
2397 | 24 | The generated context will be namespaced under the relation :attr:`name`, | ||
2398 | 25 | to prevent potential naming conflicts. | ||
2399 | 26 | |||
2400 | 27 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
2401 | 28 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
2402 | 29 | """ | ||
2403 | 30 | name = None | ||
2404 | 31 | interface = None | ||
2405 | 32 | required_keys = [] | ||
2406 | 33 | |||
2407 | 34 | def __init__(self, name=None, additional_required_keys=None): | ||
2408 | 35 | if name is not None: | ||
2409 | 36 | self.name = name | ||
2410 | 37 | if additional_required_keys is not None: | ||
2411 | 38 | self.required_keys.extend(additional_required_keys) | ||
2412 | 39 | self.get_data() | ||
2413 | 40 | |||
2414 | 41 | def __bool__(self): | ||
2415 | 42 | """ | ||
2416 | 43 | Returns True if all of the required_keys are available. | ||
2417 | 44 | """ | ||
2418 | 45 | return self.is_ready() | ||
2419 | 46 | |||
2420 | 47 | __nonzero__ = __bool__ | ||
2421 | 48 | |||
2422 | 49 | def __repr__(self): | ||
2423 | 50 | return super(RelationContext, self).__repr__() | ||
2424 | 51 | |||
2425 | 52 | def is_ready(self): | ||
2426 | 53 | """ | ||
2427 | 54 | Returns True if all of the `required_keys` are available from any units. | ||
2428 | 55 | """ | ||
2429 | 56 | ready = len(self.get(self.name, [])) > 0 | ||
2430 | 57 | if not ready: | ||
2431 | 58 | hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) | ||
2432 | 59 | return ready | ||
2433 | 60 | |||
2434 | 61 | def _is_ready(self, unit_data): | ||
2435 | 62 | """ | ||
2436 | 63 | Helper method that tests a set of relation data and returns True if | ||
2437 | 64 | all of the `required_keys` are present. | ||
2438 | 65 | """ | ||
2439 | 66 | return set(unit_data.keys()).issuperset(set(self.required_keys)) | ||
2440 | 67 | |||
2441 | 68 | def get_data(self): | ||
2442 | 69 | """ | ||
2443 | 70 | Retrieve the relation data for each unit involved in a relation and, | ||
2444 | 71 | if complete, store it in a list under `self[self.name]`. This | ||
2445 | 72 | is automatically called when the RelationContext is instantiated. | ||
2446 | 73 | |||
2447 | 74 | The units are sorted lexographically first by the service ID, then by | ||
2448 | 75 | the unit ID. Thus, if an interface has two other services, 'db:1' | ||
2449 | 76 | and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', | ||
2450 | 77 | and 'db:2' having one unit, 'mediawiki/0', all of which have a complete | ||
2451 | 78 | set of data, the relation data for the units will be stored in the | ||
2452 | 79 | order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. | ||
2453 | 80 | |||
2454 | 81 | If you only care about a single unit on the relation, you can just | ||
2455 | 82 | access it as `{{ interface[0]['key'] }}`. However, if you can at all | ||
2456 | 83 | support multiple units on a relation, you should iterate over the list, | ||
2457 | 84 | like:: | ||
2458 | 85 | |||
2459 | 86 | {% for unit in interface -%} | ||
2460 | 87 | {{ unit['key'] }}{% if not loop.last %},{% endif %} | ||
2461 | 88 | {%- endfor %} | ||
2462 | 89 | |||
2463 | 90 | Note that since all sets of relation data from all related services and | ||
2464 | 91 | units are in a single list, if you need to know which service or unit a | ||
2465 | 92 | set of data came from, you'll need to extend this class to preserve | ||
2466 | 93 | that information. | ||
2467 | 94 | """ | ||
2468 | 95 | if not hookenv.relation_ids(self.name): | ||
2469 | 96 | return | ||
2470 | 97 | |||
2471 | 98 | ns = self.setdefault(self.name, []) | ||
2472 | 99 | for rid in sorted(hookenv.relation_ids(self.name)): | ||
2473 | 100 | for unit in sorted(hookenv.related_units(rid)): | ||
2474 | 101 | reldata = hookenv.relation_get(rid=rid, unit=unit) | ||
2475 | 102 | if self._is_ready(reldata): | ||
2476 | 103 | ns.append(reldata) | ||
2477 | 104 | |||
2478 | 105 | def provide_data(self): | ||
2479 | 106 | """ | ||
2480 | 107 | Return data to be relation_set for this interface. | ||
2481 | 108 | """ | ||
2482 | 109 | return {} | ||
2483 | 110 | |||
2484 | 111 | |||
2485 | 112 | class MysqlRelation(RelationContext): | ||
2486 | 113 | """ | ||
2487 | 114 | Relation context for the `mysql` interface. | ||
2488 | 115 | |||
2489 | 116 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
2490 | 117 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
2491 | 118 | """ | ||
2492 | 119 | name = 'db' | ||
2493 | 120 | interface = 'mysql' | ||
2494 | 121 | required_keys = ['host', 'user', 'password', 'database'] | ||
2495 | 122 | |||
2496 | 123 | |||
2497 | 124 | class HttpRelation(RelationContext): | ||
2498 | 125 | """ | ||
2499 | 126 | Relation context for the `http` interface. | ||
2500 | 127 | |||
2501 | 128 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
2502 | 129 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
2503 | 130 | """ | ||
2504 | 131 | name = 'website' | ||
2505 | 132 | interface = 'http' | ||
2506 | 133 | required_keys = ['host', 'port'] | ||
2507 | 134 | |||
2508 | 135 | def provide_data(self): | ||
2509 | 136 | return { | ||
2510 | 137 | 'host': hookenv.unit_get('private-address'), | ||
2511 | 138 | 'port': 80, | ||
2512 | 139 | } | ||
2513 | 140 | |||
2514 | 141 | |||
2515 | 142 | class RequiredConfig(dict): | ||
2516 | 143 | """ | ||
2517 | 144 | Data context that loads config options with one or more mandatory options. | ||
2518 | 145 | |||
2519 | 146 | Once the required options have been changed from their default values, all | ||
2520 | 147 | config options will be available, namespaced under `config` to prevent | ||
2521 | 148 | potential naming conflicts (for example, between a config option and a | ||
2522 | 149 | relation property). | ||
2523 | 150 | |||
2524 | 151 | :param list *args: List of options that must be changed from their default values. | ||
2525 | 152 | """ | ||
2526 | 153 | |||
2527 | 154 | def __init__(self, *args): | ||
2528 | 155 | self.required_options = args | ||
2529 | 156 | self['config'] = hookenv.config() | ||
2530 | 157 | with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: | ||
2531 | 158 | self.config = yaml.load(fp).get('options', {}) | ||
2532 | 159 | |||
2533 | 160 | def __bool__(self): | ||
2534 | 161 | for option in self.required_options: | ||
2535 | 162 | if option not in self['config']: | ||
2536 | 163 | return False | ||
2537 | 164 | current_value = self['config'][option] | ||
2538 | 165 | default_value = self.config[option].get('default') | ||
2539 | 166 | if current_value == default_value: | ||
2540 | 167 | return False | ||
2541 | 168 | if current_value in (None, '') and default_value in (None, ''): | ||
2542 | 169 | return False | ||
2543 | 170 | return True | ||
2544 | 171 | |||
2545 | 172 | def __nonzero__(self): | ||
2546 | 173 | return self.__bool__() | ||
2547 | 174 | |||
2548 | 175 | |||
2549 | 176 | class StoredContext(dict): | ||
2550 | 177 | """ | ||
2551 | 178 | A data context that always returns the data that it was first created with. | ||
2552 | 179 | |||
2553 | 180 | This is useful to do a one-time generation of things like passwords, that | ||
2554 | 181 | will thereafter use the same value that was originally generated, instead | ||
2555 | 182 | of generating a new value each time it is run. | ||
2556 | 183 | """ | ||
2557 | 184 | def __init__(self, file_name, config_data): | ||
2558 | 185 | """ | ||
2559 | 186 | If the file exists, populate `self` with the data from the file. | ||
2560 | 187 | Otherwise, populate with the given data and persist it to the file. | ||
2561 | 188 | """ | ||
2562 | 189 | if os.path.exists(file_name): | ||
2563 | 190 | self.update(self.read_context(file_name)) | ||
2564 | 191 | else: | ||
2565 | 192 | self.store_context(file_name, config_data) | ||
2566 | 193 | self.update(config_data) | ||
2567 | 194 | |||
2568 | 195 | def store_context(self, file_name, config_data): | ||
2569 | 196 | if not os.path.isabs(file_name): | ||
2570 | 197 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
2571 | 198 | with open(file_name, 'w') as file_stream: | ||
2572 | 199 | os.fchmod(file_stream.fileno(), 0600) | ||
2573 | 200 | yaml.dump(config_data, file_stream) | ||
2574 | 201 | |||
2575 | 202 | def read_context(self, file_name): | ||
2576 | 203 | if not os.path.isabs(file_name): | ||
2577 | 204 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
2578 | 205 | with open(file_name, 'r') as file_stream: | ||
2579 | 206 | data = yaml.load(file_stream) | ||
2580 | 207 | if not data: | ||
2581 | 208 | raise OSError("%s is empty" % file_name) | ||
2582 | 209 | return data | ||
2583 | 210 | |||
2584 | 211 | |||
2585 | 212 | class TemplateCallback(ManagerCallback): | ||
2586 | 213 | """ | ||
2587 | 214 | Callback class that will render a Jinja2 template, for use as a ready action. | ||
2588 | 215 | |||
2589 | 216 | :param str source: The template source file, relative to `$CHARM_DIR/templates` | ||
2590 | 217 | :param str target: The target to write the rendered template to | ||
2591 | 218 | :param str owner: The owner of the rendered file | ||
2592 | 219 | :param str group: The group of the rendered file | ||
2593 | 220 | :param int perms: The permissions of the rendered file | ||
2594 | 221 | """ | ||
2595 | 222 | def __init__(self, source, target, owner='root', group='root', perms=0444): | ||
2596 | 223 | self.source = source | ||
2597 | 224 | self.target = target | ||
2598 | 225 | self.owner = owner | ||
2599 | 226 | self.group = group | ||
2600 | 227 | self.perms = perms | ||
2601 | 228 | |||
2602 | 229 | def __call__(self, manager, service_name, event_name): | ||
2603 | 230 | service = manager.get_service(service_name) | ||
2604 | 231 | context = {} | ||
2605 | 232 | for ctx in service.get('required_data', []): | ||
2606 | 233 | context.update(ctx) | ||
2607 | 234 | templating.render(self.source, self.target, context, | ||
2608 | 235 | self.owner, self.group, self.perms) | ||
2609 | 236 | |||
2610 | 237 | |||
2611 | 238 | # Convenience aliases for templates | ||
2612 | 239 | render_template = template = TemplateCallback | ||
2613 | 0 | 240 | ||
2614 | === added file 'hooks/charmhelpers/core/templating.py' | |||
2615 | --- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000 | |||
2616 | +++ hooks/charmhelpers/core/templating.py 2014-09-26 08:15:24 +0000 | |||
2617 | @@ -0,0 +1,51 @@ | |||
2618 | 1 | import os | ||
2619 | 2 | |||
2620 | 3 | from charmhelpers.core import host | ||
2621 | 4 | from charmhelpers.core import hookenv | ||
2622 | 5 | |||
2623 | 6 | |||
2624 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | ||
2625 | 8 | """ | ||
2626 | 9 | Render a template. | ||
2627 | 10 | |||
2628 | 11 | The `source` path, if not absolute, is relative to the `templates_dir`. | ||
2629 | 12 | |||
2630 | 13 | The `target` path should be absolute. | ||
2631 | 14 | |||
2632 | 15 | The context should be a dict containing the values to be replaced in the | ||
2633 | 16 | template. | ||
2634 | 17 | |||
2635 | 18 | The `owner`, `group`, and `perms` options will be passed to `write_file`. | ||
2636 | 19 | |||
2637 | 20 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | ||
2638 | 21 | |||
2639 | 22 | Note: Using this requires python-jinja2; if it is not installed, calling | ||
2640 | 23 | this will attempt to use charmhelpers.fetch.apt_install to install it. | ||
2641 | 24 | """ | ||
2642 | 25 | try: | ||
2643 | 26 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
2644 | 27 | except ImportError: | ||
2645 | 28 | try: | ||
2646 | 29 | from charmhelpers.fetch import apt_install | ||
2647 | 30 | except ImportError: | ||
2648 | 31 | hookenv.log('Could not import jinja2, and could not import ' | ||
2649 | 32 | 'charmhelpers.fetch to install it', | ||
2650 | 33 | level=hookenv.ERROR) | ||
2651 | 34 | raise | ||
2652 | 35 | apt_install('python-jinja2', fatal=True) | ||
2653 | 36 | from jinja2 import FileSystemLoader, Environment, exceptions | ||
2654 | 37 | |||
2655 | 38 | if templates_dir is None: | ||
2656 | 39 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
2657 | 40 | loader = Environment(loader=FileSystemLoader(templates_dir)) | ||
2658 | 41 | try: | ||
2659 | 42 | source = source | ||
2660 | 43 | template = loader.get_template(source) | ||
2661 | 44 | except exceptions.TemplateNotFound as e: | ||
2662 | 45 | hookenv.log('Could not load template %s from %s.' % | ||
2663 | 46 | (source, templates_dir), | ||
2664 | 47 | level=hookenv.ERROR) | ||
2665 | 48 | raise e | ||
2666 | 49 | content = template.render(context) | ||
2667 | 50 | host.mkdir(os.path.dirname(target)) | ||
2668 | 51 | host.write_file(target, content, owner, group, perms) | ||
2669 | 0 | 52 | ||
2670 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
2671 | --- hooks/charmhelpers/fetch/__init__.py 2014-05-02 13:03:56 +0000 | |||
2672 | +++ hooks/charmhelpers/fetch/__init__.py 2014-09-26 08:15:24 +0000 | |||
2673 | @@ -1,4 +1,6 @@ | |||
2674 | 1 | import importlib | 1 | import importlib |
2675 | 2 | from tempfile import NamedTemporaryFile | ||
2676 | 3 | import time | ||
2677 | 2 | from yaml import safe_load | 4 | from yaml import safe_load |
2678 | 3 | from charmhelpers.core.host import ( | 5 | from charmhelpers.core.host import ( |
2679 | 4 | lsb_release | 6 | lsb_release |
2680 | @@ -12,9 +14,9 @@ | |||
2681 | 12 | config, | 14 | config, |
2682 | 13 | log, | 15 | log, |
2683 | 14 | ) | 16 | ) |
2684 | 15 | import apt_pkg | ||
2685 | 16 | import os | 17 | import os |
2686 | 17 | 18 | ||
2687 | 19 | |||
2688 | 18 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive | 20 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
2689 | 19 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | 21 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
2690 | 20 | """ | 22 | """ |
2691 | @@ -54,13 +56,68 @@ | |||
2692 | 54 | 'icehouse/proposed': 'precise-proposed/icehouse', | 56 | 'icehouse/proposed': 'precise-proposed/icehouse', |
2693 | 55 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', | 57 | 'precise-icehouse/proposed': 'precise-proposed/icehouse', |
2694 | 56 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', | 58 | 'precise-proposed/icehouse': 'precise-proposed/icehouse', |
2695 | 59 | # Juno | ||
2696 | 60 | 'juno': 'trusty-updates/juno', | ||
2697 | 61 | 'trusty-juno': 'trusty-updates/juno', | ||
2698 | 62 | 'trusty-juno/updates': 'trusty-updates/juno', | ||
2699 | 63 | 'trusty-updates/juno': 'trusty-updates/juno', | ||
2700 | 64 | 'juno/proposed': 'trusty-proposed/juno', | ||
2701 | 65 | 'juno/proposed': 'trusty-proposed/juno', | ||
2702 | 66 | 'trusty-juno/proposed': 'trusty-proposed/juno', | ||
2703 | 67 | 'trusty-proposed/juno': 'trusty-proposed/juno', | ||
2704 | 57 | } | 68 | } |
2705 | 58 | 69 | ||
2706 | 70 | # The order of this list is very important. Handlers should be listed in from | ||
2707 | 71 | # least- to most-specific URL matching. | ||
2708 | 72 | FETCH_HANDLERS = ( | ||
2709 | 73 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | ||
2710 | 74 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | ||
2711 | 75 | ) | ||
2712 | 76 | |||
2713 | 77 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. | ||
2714 | 78 | APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. | ||
2715 | 79 | APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. | ||
2716 | 80 | |||
2717 | 81 | |||
2718 | 82 | class SourceConfigError(Exception): | ||
2719 | 83 | pass | ||
2720 | 84 | |||
2721 | 85 | |||
2722 | 86 | class UnhandledSource(Exception): | ||
2723 | 87 | pass | ||
2724 | 88 | |||
2725 | 89 | |||
2726 | 90 | class AptLockError(Exception): | ||
2727 | 91 | pass | ||
2728 | 92 | |||
2729 | 93 | |||
2730 | 94 | class BaseFetchHandler(object): | ||
2731 | 95 | |||
2732 | 96 | """Base class for FetchHandler implementations in fetch plugins""" | ||
2733 | 97 | |||
2734 | 98 | def can_handle(self, source): | ||
2735 | 99 | """Returns True if the source can be handled. Otherwise returns | ||
2736 | 100 | a string explaining why it cannot""" | ||
2737 | 101 | return "Wrong source type" | ||
2738 | 102 | |||
2739 | 103 | def install(self, source): | ||
2740 | 104 | """Try to download and unpack the source. Return the path to the | ||
2741 | 105 | unpacked files or raise UnhandledSource.""" | ||
2742 | 106 | raise UnhandledSource("Wrong source type {}".format(source)) | ||
2743 | 107 | |||
2744 | 108 | def parse_url(self, url): | ||
2745 | 109 | return urlparse(url) | ||
2746 | 110 | |||
2747 | 111 | def base_url(self, url): | ||
2748 | 112 | """Return url without querystring or fragment""" | ||
2749 | 113 | parts = list(self.parse_url(url)) | ||
2750 | 114 | parts[4:] = ['' for i in parts[4:]] | ||
2751 | 115 | return urlunparse(parts) | ||
2752 | 116 | |||
2753 | 59 | 117 | ||
2754 | 60 | def filter_installed_packages(packages): | 118 | def filter_installed_packages(packages): |
2755 | 61 | """Returns a list of packages that require installation""" | 119 | """Returns a list of packages that require installation""" |
2758 | 62 | apt_pkg.init() | 120 | cache = apt_cache() |
2757 | 63 | cache = apt_pkg.Cache() | ||
2759 | 64 | _pkgs = [] | 121 | _pkgs = [] |
2760 | 65 | for package in packages: | 122 | for package in packages: |
2761 | 66 | try: | 123 | try: |
2762 | @@ -73,6 +130,16 @@ | |||
2763 | 73 | return _pkgs | 130 | return _pkgs |
2764 | 74 | 131 | ||
2765 | 75 | 132 | ||
2766 | 133 | def apt_cache(in_memory=True): | ||
2767 | 134 | """Build and return an apt cache""" | ||
2768 | 135 | import apt_pkg | ||
2769 | 136 | apt_pkg.init() | ||
2770 | 137 | if in_memory: | ||
2771 | 138 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
2772 | 139 | apt_pkg.config.set("Dir::Cache::srcpkgcache", "") | ||
2773 | 140 | return apt_pkg.Cache() | ||
2774 | 141 | |||
2775 | 142 | |||
2776 | 76 | def apt_install(packages, options=None, fatal=False): | 143 | def apt_install(packages, options=None, fatal=False): |
2777 | 77 | """Install one or more packages""" | 144 | """Install one or more packages""" |
2778 | 78 | if options is None: | 145 | if options is None: |
2779 | @@ -87,14 +154,7 @@ | |||
2780 | 87 | cmd.extend(packages) | 154 | cmd.extend(packages) |
2781 | 88 | log("Installing {} with options: {}".format(packages, | 155 | log("Installing {} with options: {}".format(packages, |
2782 | 89 | options)) | 156 | options)) |
2791 | 90 | env = os.environ.copy() | 157 | _run_apt_command(cmd, fatal) |
2784 | 91 | if 'DEBIAN_FRONTEND' not in env: | ||
2785 | 92 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
2786 | 93 | |||
2787 | 94 | if fatal: | ||
2788 | 95 | subprocess.check_call(cmd, env=env) | ||
2789 | 96 | else: | ||
2790 | 97 | subprocess.call(cmd, env=env) | ||
2792 | 98 | 158 | ||
2793 | 99 | 159 | ||
2794 | 100 | def apt_upgrade(options=None, fatal=False, dist=False): | 160 | def apt_upgrade(options=None, fatal=False, dist=False): |
2795 | @@ -109,24 +169,13 @@ | |||
2796 | 109 | else: | 169 | else: |
2797 | 110 | cmd.append('upgrade') | 170 | cmd.append('upgrade') |
2798 | 111 | log("Upgrading with options: {}".format(options)) | 171 | log("Upgrading with options: {}".format(options)) |
2808 | 112 | 172 | _run_apt_command(cmd, fatal) | |
2800 | 113 | env = os.environ.copy() | ||
2801 | 114 | if 'DEBIAN_FRONTEND' not in env: | ||
2802 | 115 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
2803 | 116 | |||
2804 | 117 | if fatal: | ||
2805 | 118 | subprocess.check_call(cmd, env=env) | ||
2806 | 119 | else: | ||
2807 | 120 | subprocess.call(cmd, env=env) | ||
2809 | 121 | 173 | ||
2810 | 122 | 174 | ||
2811 | 123 | def apt_update(fatal=False): | 175 | def apt_update(fatal=False): |
2812 | 124 | """Update local apt cache""" | 176 | """Update local apt cache""" |
2813 | 125 | cmd = ['apt-get', 'update'] | 177 | cmd = ['apt-get', 'update'] |
2818 | 126 | if fatal: | 178 | _run_apt_command(cmd, fatal) |
2815 | 127 | subprocess.check_call(cmd) | ||
2816 | 128 | else: | ||
2817 | 129 | subprocess.call(cmd) | ||
2819 | 130 | 179 | ||
2820 | 131 | 180 | ||
2821 | 132 | def apt_purge(packages, fatal=False): | 181 | def apt_purge(packages, fatal=False): |
2822 | @@ -137,10 +186,7 @@ | |||
2823 | 137 | else: | 186 | else: |
2824 | 138 | cmd.extend(packages) | 187 | cmd.extend(packages) |
2825 | 139 | log("Purging {}".format(packages)) | 188 | log("Purging {}".format(packages)) |
2830 | 140 | if fatal: | 189 | _run_apt_command(cmd, fatal) |
2827 | 141 | subprocess.check_call(cmd) | ||
2828 | 142 | else: | ||
2829 | 143 | subprocess.call(cmd) | ||
2831 | 144 | 190 | ||
2832 | 145 | 191 | ||
2833 | 146 | def apt_hold(packages, fatal=False): | 192 | def apt_hold(packages, fatal=False): |
2834 | @@ -151,6 +197,7 @@ | |||
2835 | 151 | else: | 197 | else: |
2836 | 152 | cmd.extend(packages) | 198 | cmd.extend(packages) |
2837 | 153 | log("Holding {}".format(packages)) | 199 | log("Holding {}".format(packages)) |
2838 | 200 | |||
2839 | 154 | if fatal: | 201 | if fatal: |
2840 | 155 | subprocess.check_call(cmd) | 202 | subprocess.check_call(cmd) |
2841 | 156 | else: | 203 | else: |
2842 | @@ -158,6 +205,28 @@ | |||
2843 | 158 | 205 | ||
2844 | 159 | 206 | ||
2845 | 160 | def add_source(source, key=None): | 207 | def add_source(source, key=None): |
2846 | 208 | """Add a package source to this system. | ||
2847 | 209 | |||
2848 | 210 | @param source: a URL or sources.list entry, as supported by | ||
2849 | 211 | add-apt-repository(1). Examples:: | ||
2850 | 212 | |||
2851 | 213 | ppa:charmers/example | ||
2852 | 214 | deb https://stub:key@private.example.com/ubuntu trusty main | ||
2853 | 215 | |||
2854 | 216 | In addition: | ||
2855 | 217 | 'proposed:' may be used to enable the standard 'proposed' | ||
2856 | 218 | pocket for the release. | ||
2857 | 219 | 'cloud:' may be used to activate official cloud archive pockets, | ||
2858 | 220 | such as 'cloud:icehouse' | ||
2859 | 221 | |||
2860 | 222 | @param key: A key to be added to the system's APT keyring and used | ||
2861 | 223 | to verify the signatures on packages. Ideally, this should be an | ||
2862 | 224 | ASCII format GPG public key including the block headers. A GPG key | ||
2863 | 225 | id may also be used, but be aware that only insecure protocols are | ||
2864 | 226 | available to retrieve the actual public key from a public keyserver | ||
2865 | 227 | placing your Juju environment at risk. ppa and cloud archive keys | ||
2866 | 228 | are securely added automtically, so sould not be provided. | ||
2867 | 229 | """ | ||
2868 | 161 | if source is None: | 230 | if source is None: |
2869 | 162 | log('Source is not present. Skipping') | 231 | log('Source is not present. Skipping') |
2870 | 163 | return | 232 | return |
2871 | @@ -182,76 +251,96 @@ | |||
2872 | 182 | release = lsb_release()['DISTRIB_CODENAME'] | 251 | release = lsb_release()['DISTRIB_CODENAME'] |
2873 | 183 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 252 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
2874 | 184 | apt.write(PROPOSED_POCKET.format(release)) | 253 | apt.write(PROPOSED_POCKET.format(release)) |
2875 | 254 | else: | ||
2876 | 255 | raise SourceConfigError("Unknown source: {!r}".format(source)) | ||
2877 | 256 | |||
2878 | 185 | if key: | 257 | if key: |
2886 | 186 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | 258 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
2887 | 187 | 'hkp://keyserver.ubuntu.com:80', '--recv', | 259 | with NamedTemporaryFile() as key_file: |
2888 | 188 | key]) | 260 | key_file.write(key) |
2889 | 189 | 261 | key_file.flush() | |
2890 | 190 | 262 | key_file.seek(0) | |
2891 | 191 | class SourceConfigError(Exception): | 263 | subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) |
2892 | 192 | pass | 264 | else: |
2893 | 265 | # Note that hkp: is in no way a secure protocol. Using a | ||
2894 | 266 | # GPG key id is pointless from a security POV unless you | ||
2895 | 267 | # absolutely trust your network and DNS. | ||
2896 | 268 | subprocess.check_call(['apt-key', 'adv', '--keyserver', | ||
2897 | 269 | 'hkp://keyserver.ubuntu.com:80', '--recv', | ||
2898 | 270 | key]) | ||
2899 | 193 | 271 | ||
2900 | 194 | 272 | ||
2901 | 195 | def configure_sources(update=False, | 273 | def configure_sources(update=False, |
2902 | 196 | sources_var='install_sources', | 274 | sources_var='install_sources', |
2903 | 197 | keys_var='install_keys'): | 275 | keys_var='install_keys'): |
2904 | 198 | """ | 276 | """ |
2906 | 199 | Configure multiple sources from charm configuration | 277 | Configure multiple sources from charm configuration. |
2907 | 278 | |||
2908 | 279 | The lists are encoded as yaml fragments in the configuration. | ||
2909 | 280 | The frament needs to be included as a string. Sources and their | ||
2910 | 281 | corresponding keys are of the types supported by add_source(). | ||
2911 | 200 | 282 | ||
2912 | 201 | Example config: | 283 | Example config: |
2914 | 202 | install_sources: | 284 | install_sources: | |
2915 | 203 | - "ppa:foo" | 285 | - "ppa:foo" |
2916 | 204 | - "http://example.com/repo precise main" | 286 | - "http://example.com/repo precise main" |
2918 | 205 | install_keys: | 287 | install_keys: | |
2919 | 206 | - null | 288 | - null |
2920 | 207 | - "a1b2c3d4" | 289 | - "a1b2c3d4" |
2921 | 208 | 290 | ||
2922 | 209 | Note that 'null' (a.k.a. None) should not be quoted. | 291 | Note that 'null' (a.k.a. None) should not be quoted. |
2923 | 210 | """ | 292 | """ |
2931 | 211 | sources = safe_load(config(sources_var)) | 293 | sources = safe_load((config(sources_var) or '').strip()) or [] |
2932 | 212 | keys = config(keys_var) | 294 | keys = safe_load((config(keys_var) or '').strip()) or None |
2933 | 213 | if keys is not None: | 295 | |
2934 | 214 | keys = safe_load(keys) | 296 | if isinstance(sources, basestring): |
2935 | 215 | if isinstance(sources, basestring) and ( | 297 | sources = [sources] |
2936 | 216 | keys is None or isinstance(keys, basestring)): | 298 | |
2937 | 217 | add_source(sources, keys) | 299 | if keys is None: |
2938 | 300 | for source in sources: | ||
2939 | 301 | add_source(source, None) | ||
2940 | 218 | else: | 302 | else: |
2946 | 219 | if not len(sources) == len(keys): | 303 | if isinstance(keys, basestring): |
2947 | 220 | msg = 'Install sources and keys lists are different lengths' | 304 | keys = [keys] |
2948 | 221 | raise SourceConfigError(msg) | 305 | |
2949 | 222 | for src_num in range(len(sources)): | 306 | if len(sources) != len(keys): |
2950 | 223 | add_source(sources[src_num], keys[src_num]) | 307 | raise SourceConfigError( |
2951 | 308 | 'Install sources and keys lists are different lengths') | ||
2952 | 309 | for source, key in zip(sources, keys): | ||
2953 | 310 | add_source(source, key) | ||
2954 | 224 | if update: | 311 | if update: |
2955 | 225 | apt_update(fatal=True) | 312 | apt_update(fatal=True) |
2956 | 226 | 313 | ||
2970 | 227 | # The order of this list is very important. Handlers should be listed in from | 314 | |
2971 | 228 | # least- to most-specific URL matching. | 315 | def install_remote(source, *args, **kwargs): |
2959 | 229 | FETCH_HANDLERS = ( | ||
2960 | 230 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | ||
2961 | 231 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | ||
2962 | 232 | ) | ||
2963 | 233 | |||
2964 | 234 | |||
2965 | 235 | class UnhandledSource(Exception): | ||
2966 | 236 | pass | ||
2967 | 237 | |||
2968 | 238 | |||
2969 | 239 | def install_remote(source): | ||
2972 | 240 | """ | 316 | """ |
2973 | 241 | Install a file tree from a remote source | 317 | Install a file tree from a remote source |
2974 | 242 | 318 | ||
2975 | 243 | The specified source should be a url of the form: | 319 | The specified source should be a url of the form: |
2976 | 244 | scheme://[host]/path[#[option=value][&...]] | 320 | scheme://[host]/path[#[option=value][&...]] |
2977 | 245 | 321 | ||
2980 | 246 | Schemes supported are based on this modules submodules | 322 | Schemes supported are based on this modules submodules. |
2981 | 247 | Options supported are submodule-specific""" | 323 | Options supported are submodule-specific. |
2982 | 324 | Additional arguments are passed through to the submodule. | ||
2983 | 325 | |||
2984 | 326 | For example:: | ||
2985 | 327 | |||
2986 | 328 | dest = install_remote('http://example.com/archive.tgz', | ||
2987 | 329 | checksum='deadbeef', | ||
2988 | 330 | hash_type='sha1') | ||
2989 | 331 | |||
2990 | 332 | This will download `archive.tgz`, validate it using SHA1 and, if | ||
2991 | 333 | the file is ok, extract it and return the directory in which it | ||
2992 | 334 | was extracted. If the checksum fails, it will raise | ||
2993 | 335 | :class:`charmhelpers.core.host.ChecksumError`. | ||
2994 | 336 | """ | ||
2995 | 248 | # We ONLY check for True here because can_handle may return a string | 337 | # We ONLY check for True here because can_handle may return a string |
2996 | 249 | # explaining why it can't handle a given source. | 338 | # explaining why it can't handle a given source. |
2997 | 250 | handlers = [h for h in plugins() if h.can_handle(source) is True] | 339 | handlers = [h for h in plugins() if h.can_handle(source) is True] |
2998 | 251 | installed_to = None | 340 | installed_to = None |
2999 | 252 | for handler in handlers: | 341 | for handler in handlers: |
3000 | 253 | try: | 342 | try: |
3002 | 254 | installed_to = handler.install(source) | 343 | installed_to = handler.install(source, *args, **kwargs) |
3003 | 255 | except UnhandledSource: | 344 | except UnhandledSource: |
3004 | 256 | pass | 345 | pass |
3005 | 257 | if not installed_to: | 346 | if not installed_to: |
3006 | @@ -265,30 +354,6 @@ | |||
3007 | 265 | return install_remote(source) | 354 | return install_remote(source) |
3008 | 266 | 355 | ||
3009 | 267 | 356 | ||
3010 | 268 | class BaseFetchHandler(object): | ||
3011 | 269 | |||
3012 | 270 | """Base class for FetchHandler implementations in fetch plugins""" | ||
3013 | 271 | |||
3014 | 272 | def can_handle(self, source): | ||
3015 | 273 | """Returns True if the source can be handled. Otherwise returns | ||
3016 | 274 | a string explaining why it cannot""" | ||
3017 | 275 | return "Wrong source type" | ||
3018 | 276 | |||
3019 | 277 | def install(self, source): | ||
3020 | 278 | """Try to download and unpack the source. Return the path to the | ||
3021 | 279 | unpacked files or raise UnhandledSource.""" | ||
3022 | 280 | raise UnhandledSource("Wrong source type {}".format(source)) | ||
3023 | 281 | |||
3024 | 282 | def parse_url(self, url): | ||
3025 | 283 | return urlparse(url) | ||
3026 | 284 | |||
3027 | 285 | def base_url(self, url): | ||
3028 | 286 | """Return url without querystring or fragment""" | ||
3029 | 287 | parts = list(self.parse_url(url)) | ||
3030 | 288 | parts[4:] = ['' for i in parts[4:]] | ||
3031 | 289 | return urlunparse(parts) | ||
3032 | 290 | |||
3033 | 291 | |||
3034 | 292 | def plugins(fetch_handlers=None): | 357 | def plugins(fetch_handlers=None): |
3035 | 293 | if not fetch_handlers: | 358 | if not fetch_handlers: |
3036 | 294 | fetch_handlers = FETCH_HANDLERS | 359 | fetch_handlers = FETCH_HANDLERS |
3037 | @@ -306,3 +371,40 @@ | |||
3038 | 306 | log("FetchHandler {} not found, skipping plugin".format( | 371 | log("FetchHandler {} not found, skipping plugin".format( |
3039 | 307 | handler_name)) | 372 | handler_name)) |
3040 | 308 | return plugin_list | 373 | return plugin_list |
3041 | 374 | |||
3042 | 375 | |||
3043 | 376 | def _run_apt_command(cmd, fatal=False): | ||
3044 | 377 | """ | ||
3045 | 378 | Run an APT command, checking output and retrying if the fatal flag is set | ||
3046 | 379 | to True. | ||
3047 | 380 | |||
3048 | 381 | :param: cmd: str: The apt command to run. | ||
3049 | 382 | :param: fatal: bool: Whether the command's output should be checked and | ||
3050 | 383 | retried. | ||
3051 | 384 | """ | ||
3052 | 385 | env = os.environ.copy() | ||
3053 | 386 | |||
3054 | 387 | if 'DEBIAN_FRONTEND' not in env: | ||
3055 | 388 | env['DEBIAN_FRONTEND'] = 'noninteractive' | ||
3056 | 389 | |||
3057 | 390 | if fatal: | ||
3058 | 391 | retry_count = 0 | ||
3059 | 392 | result = None | ||
3060 | 393 | |||
3061 | 394 | # If the command is considered "fatal", we need to retry if the apt | ||
3062 | 395 | # lock was not acquired. | ||
3063 | 396 | |||
3064 | 397 | while result is None or result == APT_NO_LOCK: | ||
3065 | 398 | try: | ||
3066 | 399 | result = subprocess.check_call(cmd, env=env) | ||
3067 | 400 | except subprocess.CalledProcessError, e: | ||
3068 | 401 | retry_count = retry_count + 1 | ||
3069 | 402 | if retry_count > APT_NO_LOCK_RETRY_COUNT: | ||
3070 | 403 | raise | ||
3071 | 404 | result = e.returncode | ||
3072 | 405 | log("Couldn't acquire DPKG lock. Will retry in {} seconds." | ||
3073 | 406 | "".format(APT_NO_LOCK_RETRY_DELAY)) | ||
3074 | 407 | time.sleep(APT_NO_LOCK_RETRY_DELAY) | ||
3075 | 408 | |||
3076 | 409 | else: | ||
3077 | 410 | subprocess.call(cmd, env=env) | ||
3078 | 309 | 411 | ||
3079 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
3080 | --- hooks/charmhelpers/fetch/archiveurl.py 2014-03-27 12:33:12 +0000 | |||
3081 | +++ hooks/charmhelpers/fetch/archiveurl.py 2014-09-26 08:15:24 +0000 | |||
3082 | @@ -1,6 +1,8 @@ | |||
3083 | 1 | import os | 1 | import os |
3084 | 2 | import urllib2 | 2 | import urllib2 |
3085 | 3 | from urllib import urlretrieve | ||
3086 | 3 | import urlparse | 4 | import urlparse |
3087 | 5 | import hashlib | ||
3088 | 4 | 6 | ||
3089 | 5 | from charmhelpers.fetch import ( | 7 | from charmhelpers.fetch import ( |
3090 | 6 | BaseFetchHandler, | 8 | BaseFetchHandler, |
3091 | @@ -10,11 +12,19 @@ | |||
3092 | 10 | get_archive_handler, | 12 | get_archive_handler, |
3093 | 11 | extract, | 13 | extract, |
3094 | 12 | ) | 14 | ) |
3096 | 13 | from charmhelpers.core.host import mkdir | 15 | from charmhelpers.core.host import mkdir, check_hash |
3097 | 14 | 16 | ||
3098 | 15 | 17 | ||
3099 | 16 | class ArchiveUrlFetchHandler(BaseFetchHandler): | 18 | class ArchiveUrlFetchHandler(BaseFetchHandler): |
3101 | 17 | """Handler for archives via generic URLs""" | 19 | """ |
3102 | 20 | Handler to download archive files from arbitrary URLs. | ||
3103 | 21 | |||
3104 | 22 | Can fetch from http, https, ftp, and file URLs. | ||
3105 | 23 | |||
3106 | 24 | Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. | ||
3107 | 25 | |||
3108 | 26 | Installs the contents of the archive in $CHARM_DIR/fetched/. | ||
3109 | 27 | """ | ||
3110 | 18 | def can_handle(self, source): | 28 | def can_handle(self, source): |
3111 | 19 | url_parts = self.parse_url(source) | 29 | url_parts = self.parse_url(source) |
3112 | 20 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): | 30 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
3113 | @@ -24,6 +34,12 @@ | |||
3114 | 24 | return False | 34 | return False |
3115 | 25 | 35 | ||
3116 | 26 | def download(self, source, dest): | 36 | def download(self, source, dest): |
3117 | 37 | """ | ||
3118 | 38 | Download an archive file. | ||
3119 | 39 | |||
3120 | 40 | :param str source: URL pointing to an archive file. | ||
3121 | 41 | :param str dest: Local path location to download archive file to. | ||
3122 | 42 | """ | ||
3123 | 27 | # propogate all exceptions | 43 | # propogate all exceptions |
3124 | 28 | # URLError, OSError, etc | 44 | # URLError, OSError, etc |
3125 | 29 | proto, netloc, path, params, query, fragment = urlparse.urlparse(source) | 45 | proto, netloc, path, params, query, fragment = urlparse.urlparse(source) |
3126 | @@ -48,7 +64,30 @@ | |||
3127 | 48 | os.unlink(dest) | 64 | os.unlink(dest) |
3128 | 49 | raise e | 65 | raise e |
3129 | 50 | 66 | ||
3131 | 51 | def install(self, source): | 67 | # Mandatory file validation via Sha1 or MD5 hashing. |
3132 | 68 | def download_and_validate(self, url, hashsum, validate="sha1"): | ||
3133 | 69 | tempfile, headers = urlretrieve(url) | ||
3134 | 70 | check_hash(tempfile, hashsum, validate) | ||
3135 | 71 | return tempfile | ||
3136 | 72 | |||
3137 | 73 | def install(self, source, dest=None, checksum=None, hash_type='sha1'): | ||
3138 | 74 | """ | ||
3139 | 75 | Download and install an archive file, with optional checksum validation. | ||
3140 | 76 | |||
3141 | 77 | The checksum can also be given on the `source` URL's fragment. | ||
3142 | 78 | For example:: | ||
3143 | 79 | |||
3144 | 80 | handler.install('http://example.com/file.tgz#sha1=deadbeef') | ||
3145 | 81 | |||
3146 | 82 | :param str source: URL pointing to an archive file. | ||
3147 | 83 | :param str dest: Local destination path to install to. If not given, | ||
3148 | 84 | installs to `$CHARM_DIR/archives/archive_file_name`. | ||
3149 | 85 | :param str checksum: If given, validate the archive file after download. | ||
3150 | 86 | :param str hash_type: Algorithm used to generate `checksum`. | ||
3151 | 87 | Can be any hash alrgorithm supported by :mod:`hashlib`, | ||
3152 | 88 | such as md5, sha1, sha256, sha512, etc. | ||
3153 | 89 | |||
3154 | 90 | """ | ||
3155 | 52 | url_parts = self.parse_url(source) | 91 | url_parts = self.parse_url(source) |
3156 | 53 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') | 92 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') |
3157 | 54 | if not os.path.exists(dest_dir): | 93 | if not os.path.exists(dest_dir): |
3158 | @@ -60,4 +99,10 @@ | |||
3159 | 60 | raise UnhandledSource(e.reason) | 99 | raise UnhandledSource(e.reason) |
3160 | 61 | except OSError as e: | 100 | except OSError as e: |
3161 | 62 | raise UnhandledSource(e.strerror) | 101 | raise UnhandledSource(e.strerror) |
3163 | 63 | return extract(dld_file) | 102 | options = urlparse.parse_qs(url_parts.fragment) |
3164 | 103 | for key, value in options.items(): | ||
3165 | 104 | if key in hashlib.algorithms: | ||
3166 | 105 | check_hash(dld_file, value, key) | ||
3167 | 106 | if checksum: | ||
3168 | 107 | check_hash(dld_file, checksum, hash_type) | ||
3169 | 108 | return extract(dld_file, dest) | ||
3170 | 64 | 109 | ||
3171 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
3172 | --- hooks/charmhelpers/fetch/bzrurl.py 2014-03-05 12:57:20 +0000 | |||
3173 | +++ hooks/charmhelpers/fetch/bzrurl.py 2014-09-26 08:15:24 +0000 | |||
3174 | @@ -39,7 +39,8 @@ | |||
3175 | 39 | def install(self, source): | 39 | def install(self, source): |
3176 | 40 | url_parts = self.parse_url(source) | 40 | url_parts = self.parse_url(source) |
3177 | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] |
3179 | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
3180 | 43 | branch_name) | ||
3181 | 43 | if not os.path.exists(dest_dir): | 44 | if not os.path.exists(dest_dir): |
3182 | 44 | mkdir(dest_dir, perms=0755) | 45 | mkdir(dest_dir, perms=0755) |
3183 | 45 | try: | 46 | try: |
3184 | 46 | 47 | ||
3185 | === modified file 'hooks/rabbit_utils.py' | |||
3186 | --- hooks/rabbit_utils.py 2014-06-11 19:58:50 +0000 | |||
3187 | +++ hooks/rabbit_utils.py 2014-09-26 08:15:24 +0000 | |||
3188 | @@ -6,7 +6,6 @@ | |||
3189 | 6 | import subprocess | 6 | import subprocess |
3190 | 7 | import glob | 7 | import glob |
3191 | 8 | from lib.utils import render_template | 8 | from lib.utils import render_template |
3192 | 9 | import apt_pkg as apt | ||
3193 | 10 | 9 | ||
3194 | 11 | from charmhelpers.contrib.openstack.utils import ( | 10 | from charmhelpers.contrib.openstack.utils import ( |
3195 | 12 | get_hostname, | 11 | get_hostname, |
3196 | @@ -21,7 +20,12 @@ | |||
3197 | 21 | service_name | 20 | service_name |
3198 | 22 | ) | 21 | ) |
3199 | 23 | 22 | ||
3201 | 24 | from charmhelpers.core.host import pwgen, mkdir, write_file | 23 | from charmhelpers.core.host import ( |
3202 | 24 | pwgen, | ||
3203 | 25 | mkdir, | ||
3204 | 26 | write_file, | ||
3205 | 27 | cmp_pkgrevno, | ||
3206 | 28 | ) | ||
3207 | 25 | 29 | ||
3208 | 26 | from charmhelpers.contrib.peerstorage import ( | 30 | from charmhelpers.contrib.peerstorage import ( |
3209 | 27 | peer_store, | 31 | peer_store, |
3210 | @@ -103,21 +107,9 @@ | |||
3211 | 103 | subprocess.check_call(cmd) | 107 | subprocess.check_call(cmd) |
3212 | 104 | 108 | ||
3213 | 105 | 109 | ||
3214 | 106 | def compare_version(base_version): | ||
3215 | 107 | apt.init() | ||
3216 | 108 | cache = apt.Cache() | ||
3217 | 109 | pkg = cache['rabbitmq-server'] | ||
3218 | 110 | if pkg.current_ver: | ||
3219 | 111 | return apt.version_compare( | ||
3220 | 112 | apt.upstream_version(pkg.current_ver.ver_str), | ||
3221 | 113 | base_version) | ||
3222 | 114 | else: | ||
3223 | 115 | return False | ||
3224 | 116 | |||
3225 | 117 | |||
3226 | 118 | def cluster_with(): | 110 | def cluster_with(): |
3227 | 119 | log('Clustering with new node') | 111 | log('Clustering with new node') |
3229 | 120 | if compare_version('3.0.1') >= 0: | 112 | if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0: |
3230 | 121 | cluster_cmd = 'join_cluster' | 113 | cluster_cmd = 'join_cluster' |
3231 | 122 | else: | 114 | else: |
3232 | 123 | cluster_cmd = 'cluster' | 115 | cluster_cmd = 'cluster' |
3233 | @@ -167,7 +159,7 @@ | |||
3234 | 167 | cmd = [RABBITMQ_CTL, 'start_app'] | 159 | cmd = [RABBITMQ_CTL, 'start_app'] |
3235 | 168 | subprocess.check_call(cmd) | 160 | subprocess.check_call(cmd) |
3236 | 169 | log('Host clustered with %s.' % node) | 161 | log('Host clustered with %s.' % node) |
3238 | 170 | if compare_version('3.0.1') >= 0: | 162 | if cmp_pkgrevno('rabbitmq-server', '3.0.1') >= 0: |
3239 | 171 | cmd = [RABBITMQ_CTL, 'set_policy', 'HA', | 163 | cmd = [RABBITMQ_CTL, 'set_policy', 'HA', |
3240 | 172 | '^(?!amq\.).*', '{"ha-mode": "all"}'] | 164 | '^(?!amq\.).*', '{"ha-mode": "all"}'] |
3241 | 173 | subprocess.check_call(cmd) | 165 | subprocess.check_call(cmd) |
This looks great! I deployed, it worked fine. (since it was more than just a charm-helpers sync I wanted to check). Thanks, Chris!