Merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup into lp:charms/trusty/openstack-zeromq
- Trusty Tahr (14.04)
- tidyup
- Merge into trunk
Proposed by
Liam Young
Status: | Rejected |
---|---|
Rejected by: | James Page |
Proposed branch: | lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup |
Merge into: | lp:charms/trusty/openstack-zeromq |
Diff against target: |
2562 lines (+1786/-195) 24 files modified
charm-helpers-hooks.yaml (+2/-0) hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+225/-0) hooks/charmhelpers/contrib/network/ip.py (+194/-19) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-8) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+5/-4) hooks/charmhelpers/contrib/openstack/context.py (+204/-69) hooks/charmhelpers/contrib/openstack/ip.py (+1/-1) hooks/charmhelpers/contrib/openstack/utils.py (+28/-1) hooks/charmhelpers/contrib/storage/linux/ceph.py (+388/-0) hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0) hooks/charmhelpers/contrib/storage/linux/lvm.py (+88/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+53/-0) hooks/charmhelpers/core/hookenv.py (+17/-4) hooks/charmhelpers/core/host.py (+38/-6) hooks/charmhelpers/core/services/helpers.py (+119/-5) hooks/charmhelpers/core/sysctl.py (+34/-0) hooks/charmhelpers/fetch/__init__.py (+19/-5) hooks/charmhelpers/fetch/archiveurl.py (+49/-4) hooks/zeromq_context.py (+41/-0) hooks/zeromq_hooks.py (+17/-56) hooks/zeromq_utils.py (+78/-0) templates/matchmaker_ring.json (+1/-0) tests/charmhelpers/contrib/amulet/deployment.py (+19/-13) |
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Needs Fixing | ||
Review via email: mp+238712@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
James Page (james-page) : | # |
review:
Needs Fixing
Unmerged revisions
- 22. By Liam Young
-
Tidy up charm and bring format inline with other os charms
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-hooks.yaml' | |||
2 | --- charm-helpers-hooks.yaml 2014-09-08 13:55:11 +0000 | |||
3 | +++ charm-helpers-hooks.yaml 2014-10-17 13:06:36 +0000 | |||
4 | @@ -5,3 +5,5 @@ | |||
5 | 5 | - fetch | 5 | - fetch |
6 | 6 | - contrib.openstack | 6 | - contrib.openstack |
7 | 7 | - contrib.network | 7 | - contrib.network |
8 | 8 | - contrib.hahelpers | ||
9 | 9 | - contrib.storage | ||
10 | 8 | 10 | ||
11 | === added directory 'hooks/charmhelpers/contrib/hahelpers' | |||
12 | === added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py' | |||
13 | === added file 'hooks/charmhelpers/contrib/hahelpers/apache.py' | |||
14 | --- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000 | |||
15 | +++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-17 13:06:36 +0000 | |||
16 | @@ -0,0 +1,66 @@ | |||
17 | 1 | # | ||
18 | 2 | # Copyright 2012 Canonical Ltd. | ||
19 | 3 | # | ||
20 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
21 | 5 | # | ||
22 | 6 | # Authors: | ||
23 | 7 | # James Page <james.page@ubuntu.com> | ||
24 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
25 | 9 | # | ||
26 | 10 | |||
27 | 11 | import subprocess | ||
28 | 12 | |||
29 | 13 | from charmhelpers.core.hookenv import ( | ||
30 | 14 | config as config_get, | ||
31 | 15 | relation_get, | ||
32 | 16 | relation_ids, | ||
33 | 17 | related_units as relation_list, | ||
34 | 18 | log, | ||
35 | 19 | INFO, | ||
36 | 20 | ) | ||
37 | 21 | |||
38 | 22 | |||
39 | 23 | def get_cert(cn=None): | ||
40 | 24 | # TODO: deal with multiple https endpoints via charm config | ||
41 | 25 | cert = config_get('ssl_cert') | ||
42 | 26 | key = config_get('ssl_key') | ||
43 | 27 | if not (cert and key): | ||
44 | 28 | log("Inspecting identity-service relations for SSL certificate.", | ||
45 | 29 | level=INFO) | ||
46 | 30 | cert = key = None | ||
47 | 31 | if cn: | ||
48 | 32 | ssl_cert_attr = 'ssl_cert_{}'.format(cn) | ||
49 | 33 | ssl_key_attr = 'ssl_key_{}'.format(cn) | ||
50 | 34 | else: | ||
51 | 35 | ssl_cert_attr = 'ssl_cert' | ||
52 | 36 | ssl_key_attr = 'ssl_key' | ||
53 | 37 | for r_id in relation_ids('identity-service'): | ||
54 | 38 | for unit in relation_list(r_id): | ||
55 | 39 | if not cert: | ||
56 | 40 | cert = relation_get(ssl_cert_attr, | ||
57 | 41 | rid=r_id, unit=unit) | ||
58 | 42 | if not key: | ||
59 | 43 | key = relation_get(ssl_key_attr, | ||
60 | 44 | rid=r_id, unit=unit) | ||
61 | 45 | return (cert, key) | ||
62 | 46 | |||
63 | 47 | |||
64 | 48 | def get_ca_cert(): | ||
65 | 49 | ca_cert = config_get('ssl_ca') | ||
66 | 50 | if ca_cert is None: | ||
67 | 51 | log("Inspecting identity-service relations for CA SSL certificate.", | ||
68 | 52 | level=INFO) | ||
69 | 53 | for r_id in relation_ids('identity-service'): | ||
70 | 54 | for unit in relation_list(r_id): | ||
71 | 55 | if ca_cert is None: | ||
72 | 56 | ca_cert = relation_get('ca_cert', | ||
73 | 57 | rid=r_id, unit=unit) | ||
74 | 58 | return ca_cert | ||
75 | 59 | |||
76 | 60 | |||
77 | 61 | def install_ca_cert(ca_cert): | ||
78 | 62 | if ca_cert: | ||
79 | 63 | with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', | ||
80 | 64 | 'w') as crt: | ||
81 | 65 | crt.write(ca_cert) | ||
82 | 66 | subprocess.check_call(['update-ca-certificates', '--fresh']) | ||
83 | 0 | 67 | ||
84 | === added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
85 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000 | |||
86 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-17 13:06:36 +0000 | |||
87 | @@ -0,0 +1,225 @@ | |||
88 | 1 | # | ||
89 | 2 | # Copyright 2012 Canonical Ltd. | ||
90 | 3 | # | ||
91 | 4 | # Authors: | ||
92 | 5 | # James Page <james.page@ubuntu.com> | ||
93 | 6 | # Adam Gandelman <adamg@ubuntu.com> | ||
94 | 7 | # | ||
95 | 8 | |||
96 | 9 | """ | ||
97 | 10 | Helpers for clustering and determining "cluster leadership" and other | ||
98 | 11 | clustering-related helpers. | ||
99 | 12 | """ | ||
100 | 13 | |||
101 | 14 | import subprocess | ||
102 | 15 | import os | ||
103 | 16 | |||
104 | 17 | from socket import gethostname as get_unit_hostname | ||
105 | 18 | |||
106 | 19 | from charmhelpers.core.hookenv import ( | ||
107 | 20 | log, | ||
108 | 21 | relation_ids, | ||
109 | 22 | related_units as relation_list, | ||
110 | 23 | relation_get, | ||
111 | 24 | config as config_get, | ||
112 | 25 | INFO, | ||
113 | 26 | ERROR, | ||
114 | 27 | WARNING, | ||
115 | 28 | unit_get, | ||
116 | 29 | ) | ||
117 | 30 | |||
118 | 31 | |||
119 | 32 | class HAIncompleteConfig(Exception): | ||
120 | 33 | pass | ||
121 | 34 | |||
122 | 35 | |||
123 | 36 | def is_elected_leader(resource): | ||
124 | 37 | """ | ||
125 | 38 | Returns True if the charm executing this is the elected cluster leader. | ||
126 | 39 | |||
127 | 40 | It relies on two mechanisms to determine leadership: | ||
128 | 41 | 1. If the charm is part of a corosync cluster, call corosync to | ||
129 | 42 | determine leadership. | ||
130 | 43 | 2. If the charm is not part of a corosync cluster, the leader is | ||
131 | 44 | determined as being "the alive unit with the lowest unit numer". In | ||
132 | 45 | other words, the oldest surviving unit. | ||
133 | 46 | """ | ||
134 | 47 | if is_clustered(): | ||
135 | 48 | if not is_crm_leader(resource): | ||
136 | 49 | log('Deferring action to CRM leader.', level=INFO) | ||
137 | 50 | return False | ||
138 | 51 | else: | ||
139 | 52 | peers = peer_units() | ||
140 | 53 | if peers and not oldest_peer(peers): | ||
141 | 54 | log('Deferring action to oldest service unit.', level=INFO) | ||
142 | 55 | return False | ||
143 | 56 | return True | ||
144 | 57 | |||
145 | 58 | |||
146 | 59 | def is_clustered(): | ||
147 | 60 | for r_id in (relation_ids('ha') or []): | ||
148 | 61 | for unit in (relation_list(r_id) or []): | ||
149 | 62 | clustered = relation_get('clustered', | ||
150 | 63 | rid=r_id, | ||
151 | 64 | unit=unit) | ||
152 | 65 | if clustered: | ||
153 | 66 | return True | ||
154 | 67 | return False | ||
155 | 68 | |||
156 | 69 | |||
157 | 70 | def is_crm_leader(resource): | ||
158 | 71 | """ | ||
159 | 72 | Returns True if the charm calling this is the elected corosync leader, | ||
160 | 73 | as returned by calling the external "crm" command. | ||
161 | 74 | """ | ||
162 | 75 | cmd = [ | ||
163 | 76 | "crm", "resource", | ||
164 | 77 | "show", resource | ||
165 | 78 | ] | ||
166 | 79 | try: | ||
167 | 80 | status = subprocess.check_output(cmd) | ||
168 | 81 | except subprocess.CalledProcessError: | ||
169 | 82 | return False | ||
170 | 83 | else: | ||
171 | 84 | if get_unit_hostname() in status: | ||
172 | 85 | return True | ||
173 | 86 | else: | ||
174 | 87 | return False | ||
175 | 88 | |||
176 | 89 | |||
177 | 90 | def is_leader(resource): | ||
178 | 91 | log("is_leader is deprecated. Please consider using is_crm_leader " | ||
179 | 92 | "instead.", level=WARNING) | ||
180 | 93 | return is_crm_leader(resource) | ||
181 | 94 | |||
182 | 95 | |||
183 | 96 | def peer_units(peer_relation="cluster"): | ||
184 | 97 | peers = [] | ||
185 | 98 | for r_id in (relation_ids(peer_relation) or []): | ||
186 | 99 | for unit in (relation_list(r_id) or []): | ||
187 | 100 | peers.append(unit) | ||
188 | 101 | return peers | ||
189 | 102 | |||
190 | 103 | |||
191 | 104 | def peer_ips(peer_relation='cluster', addr_key='private-address'): | ||
192 | 105 | '''Return a dict of peers and their private-address''' | ||
193 | 106 | peers = {} | ||
194 | 107 | for r_id in relation_ids(peer_relation): | ||
195 | 108 | for unit in relation_list(r_id): | ||
196 | 109 | peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) | ||
197 | 110 | return peers | ||
198 | 111 | |||
199 | 112 | |||
200 | 113 | def oldest_peer(peers): | ||
201 | 114 | """Determines who the oldest peer is by comparing unit numbers.""" | ||
202 | 115 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | ||
203 | 116 | for peer in peers: | ||
204 | 117 | remote_unit_no = int(peer.split('/')[1]) | ||
205 | 118 | if remote_unit_no < local_unit_no: | ||
206 | 119 | return False | ||
207 | 120 | return True | ||
208 | 121 | |||
209 | 122 | |||
210 | 123 | def eligible_leader(resource): | ||
211 | 124 | log("eligible_leader is deprecated. Please consider using " | ||
212 | 125 | "is_elected_leader instead.", level=WARNING) | ||
213 | 126 | return is_elected_leader(resource) | ||
214 | 127 | |||
215 | 128 | |||
216 | 129 | def https(): | ||
217 | 130 | ''' | ||
218 | 131 | Determines whether enough data has been provided in configuration | ||
219 | 132 | or relation data to configure HTTPS | ||
220 | 133 | . | ||
221 | 134 | returns: boolean | ||
222 | 135 | ''' | ||
223 | 136 | if config_get('use-https') == "yes": | ||
224 | 137 | return True | ||
225 | 138 | if config_get('ssl_cert') and config_get('ssl_key'): | ||
226 | 139 | return True | ||
227 | 140 | for r_id in relation_ids('identity-service'): | ||
228 | 141 | for unit in relation_list(r_id): | ||
229 | 142 | # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN | ||
230 | 143 | rel_state = [ | ||
231 | 144 | relation_get('https_keystone', rid=r_id, unit=unit), | ||
232 | 145 | relation_get('ca_cert', rid=r_id, unit=unit), | ||
233 | 146 | ] | ||
234 | 147 | # NOTE: works around (LP: #1203241) | ||
235 | 148 | if (None not in rel_state) and ('' not in rel_state): | ||
236 | 149 | return True | ||
237 | 150 | return False | ||
238 | 151 | |||
239 | 152 | |||
240 | 153 | def determine_api_port(public_port): | ||
241 | 154 | ''' | ||
242 | 155 | Determine correct API server listening port based on | ||
243 | 156 | existence of HTTPS reverse proxy and/or haproxy. | ||
244 | 157 | |||
245 | 158 | public_port: int: standard public port for given service | ||
246 | 159 | |||
247 | 160 | returns: int: the correct listening port for the API service | ||
248 | 161 | ''' | ||
249 | 162 | i = 0 | ||
250 | 163 | if len(peer_units()) > 0 or is_clustered(): | ||
251 | 164 | i += 1 | ||
252 | 165 | if https(): | ||
253 | 166 | i += 1 | ||
254 | 167 | return public_port - (i * 10) | ||
255 | 168 | |||
256 | 169 | |||
257 | 170 | def determine_apache_port(public_port): | ||
258 | 171 | ''' | ||
259 | 172 | Description: Determine correct apache listening port based on public IP + | ||
260 | 173 | state of the cluster. | ||
261 | 174 | |||
262 | 175 | public_port: int: standard public port for given service | ||
263 | 176 | |||
264 | 177 | returns: int: the correct listening port for the HAProxy service | ||
265 | 178 | ''' | ||
266 | 179 | i = 0 | ||
267 | 180 | if len(peer_units()) > 0 or is_clustered(): | ||
268 | 181 | i += 1 | ||
269 | 182 | return public_port - (i * 10) | ||
270 | 183 | |||
271 | 184 | |||
272 | 185 | def get_hacluster_config(): | ||
273 | 186 | ''' | ||
274 | 187 | Obtains all relevant configuration from charm configuration required | ||
275 | 188 | for initiating a relation to hacluster: | ||
276 | 189 | |||
277 | 190 | ha-bindiface, ha-mcastport, vip | ||
278 | 191 | |||
279 | 192 | returns: dict: A dict containing settings keyed by setting name. | ||
280 | 193 | raises: HAIncompleteConfig if settings are missing. | ||
281 | 194 | ''' | ||
282 | 195 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] | ||
283 | 196 | conf = {} | ||
284 | 197 | for setting in settings: | ||
285 | 198 | conf[setting] = config_get(setting) | ||
286 | 199 | missing = [] | ||
287 | 200 | [missing.append(s) for s, v in conf.iteritems() if v is None] | ||
288 | 201 | if missing: | ||
289 | 202 | log('Insufficient config data to configure hacluster.', level=ERROR) | ||
290 | 203 | raise HAIncompleteConfig | ||
291 | 204 | return conf | ||
292 | 205 | |||
293 | 206 | |||
294 | 207 | def canonical_url(configs, vip_setting='vip'): | ||
295 | 208 | ''' | ||
296 | 209 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
297 | 210 | configuration and hacluster. | ||
298 | 211 | |||
299 | 212 | :configs : OSTemplateRenderer: A config tempating object to inspect for | ||
300 | 213 | a complete https context. | ||
301 | 214 | |||
302 | 215 | :vip_setting: str: Setting in charm config that specifies | ||
303 | 216 | VIP address. | ||
304 | 217 | ''' | ||
305 | 218 | scheme = 'http' | ||
306 | 219 | if 'https' in configs.complete_contexts(): | ||
307 | 220 | scheme = 'https' | ||
308 | 221 | if is_clustered(): | ||
309 | 222 | addr = config_get(vip_setting) | ||
310 | 223 | else: | ||
311 | 224 | addr = unit_get('private-address') | ||
312 | 225 | return '%s://%s' % (scheme, addr) | ||
313 | 0 | 226 | ||
314 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
315 | --- hooks/charmhelpers/contrib/network/ip.py 2014-09-08 14:18:52 +0000 | |||
316 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-10-17 13:06:36 +0000 | |||
317 | @@ -1,10 +1,16 @@ | |||
318 | 1 | import glob | ||
319 | 2 | import re | ||
320 | 3 | import subprocess | ||
321 | 1 | import sys | 4 | import sys |
322 | 2 | 5 | ||
323 | 3 | from functools import partial | 6 | from functools import partial |
324 | 4 | 7 | ||
325 | 8 | from charmhelpers.core.hookenv import unit_get | ||
326 | 5 | from charmhelpers.fetch import apt_install | 9 | from charmhelpers.fetch import apt_install |
327 | 6 | from charmhelpers.core.hookenv import ( | 10 | from charmhelpers.core.hookenv import ( |
329 | 7 | ERROR, log, config, | 11 | WARNING, |
330 | 12 | ERROR, | ||
331 | 13 | log | ||
332 | 8 | ) | 14 | ) |
333 | 9 | 15 | ||
334 | 10 | try: | 16 | try: |
335 | @@ -51,6 +57,8 @@ | |||
336 | 51 | else: | 57 | else: |
337 | 52 | if fatal: | 58 | if fatal: |
338 | 53 | not_found_error_out() | 59 | not_found_error_out() |
339 | 60 | else: | ||
340 | 61 | return None | ||
341 | 54 | 62 | ||
342 | 55 | _validate_cidr(network) | 63 | _validate_cidr(network) |
343 | 56 | network = netaddr.IPNetwork(network) | 64 | network = netaddr.IPNetwork(network) |
344 | @@ -132,7 +140,8 @@ | |||
345 | 132 | if address.version == 4 and netifaces.AF_INET in addresses: | 140 | if address.version == 4 and netifaces.AF_INET in addresses: |
346 | 133 | addr = addresses[netifaces.AF_INET][0]['addr'] | 141 | addr = addresses[netifaces.AF_INET][0]['addr'] |
347 | 134 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | 142 | netmask = addresses[netifaces.AF_INET][0]['netmask'] |
349 | 135 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 143 | network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
350 | 144 | cidr = network.cidr | ||
351 | 136 | if address in cidr: | 145 | if address in cidr: |
352 | 137 | if key == 'iface': | 146 | if key == 'iface': |
353 | 138 | return iface | 147 | return iface |
354 | @@ -141,11 +150,14 @@ | |||
355 | 141 | if address.version == 6 and netifaces.AF_INET6 in addresses: | 150 | if address.version == 6 and netifaces.AF_INET6 in addresses: |
356 | 142 | for addr in addresses[netifaces.AF_INET6]: | 151 | for addr in addresses[netifaces.AF_INET6]: |
357 | 143 | if not addr['addr'].startswith('fe80'): | 152 | if not addr['addr'].startswith('fe80'): |
360 | 144 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | 153 | network = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
361 | 145 | addr['netmask'])) | 154 | addr['netmask'])) |
362 | 155 | cidr = network.cidr | ||
363 | 146 | if address in cidr: | 156 | if address in cidr: |
364 | 147 | if key == 'iface': | 157 | if key == 'iface': |
365 | 148 | return iface | 158 | return iface |
366 | 159 | elif key == 'netmask' and cidr: | ||
367 | 160 | return str(cidr).split('/')[1] | ||
368 | 149 | else: | 161 | else: |
369 | 150 | return addr[key] | 162 | return addr[key] |
370 | 151 | return None | 163 | return None |
371 | @@ -156,19 +168,182 @@ | |||
372 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 168 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
373 | 157 | 169 | ||
374 | 158 | 170 | ||
376 | 159 | def get_ipv6_addr(iface="eth0"): | 171 | def format_ipv6_addr(address): |
377 | 172 | """ | ||
378 | 173 | IPv6 needs to be wrapped with [] in url link to parse correctly. | ||
379 | 174 | """ | ||
380 | 175 | if is_ipv6(address): | ||
381 | 176 | address = "[%s]" % address | ||
382 | 177 | else: | ||
383 | 178 | log("Not a valid ipv6 address: %s" % address, level=WARNING) | ||
384 | 179 | address = None | ||
385 | 180 | |||
386 | 181 | return address | ||
387 | 182 | |||
388 | 183 | |||
389 | 184 | def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, | ||
390 | 185 | fatal=True, exc_list=None): | ||
391 | 186 | """ | ||
392 | 187 | Return the assigned IP address for a given interface, if any, or []. | ||
393 | 188 | """ | ||
394 | 189 | # Extract nic if passed /dev/ethX | ||
395 | 190 | if '/' in iface: | ||
396 | 191 | iface = iface.split('/')[-1] | ||
397 | 192 | if not exc_list: | ||
398 | 193 | exc_list = [] | ||
399 | 160 | try: | 194 | try: |
414 | 161 | iface_addrs = netifaces.ifaddresses(iface) | 195 | inet_num = getattr(netifaces, inet_type) |
415 | 162 | if netifaces.AF_INET6 not in iface_addrs: | 196 | except AttributeError: |
416 | 163 | raise Exception("Interface '%s' doesn't have an ipv6 address." % iface) | 197 | raise Exception('Unknown inet type ' + str(inet_type)) |
417 | 164 | 198 | ||
418 | 165 | addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6] | 199 | interfaces = netifaces.interfaces() |
419 | 166 | ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80') | 200 | if inc_aliases: |
420 | 167 | and config('vip') != a['addr']] | 201 | ifaces = [] |
421 | 168 | if not ipv6_addr: | 202 | for _iface in interfaces: |
422 | 169 | raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) | 203 | if iface == _iface or _iface.split(':')[0] == iface: |
423 | 170 | 204 | ifaces.append(_iface) | |
424 | 171 | return ipv6_addr[0] | 205 | if fatal and not ifaces: |
425 | 172 | 206 | raise Exception("Invalid interface '%s'" % iface) | |
426 | 173 | except ValueError: | 207 | ifaces.sort() |
427 | 174 | raise ValueError("Invalid interface '%s'" % iface) | 208 | else: |
428 | 209 | if iface not in interfaces: | ||
429 | 210 | if fatal: | ||
430 | 211 | raise Exception("%s not found " % (iface)) | ||
431 | 212 | else: | ||
432 | 213 | return [] | ||
433 | 214 | else: | ||
434 | 215 | ifaces = [iface] | ||
435 | 216 | |||
436 | 217 | addresses = [] | ||
437 | 218 | for netiface in ifaces: | ||
438 | 219 | net_info = netifaces.ifaddresses(netiface) | ||
439 | 220 | if inet_num in net_info: | ||
440 | 221 | for entry in net_info[inet_num]: | ||
441 | 222 | if 'addr' in entry and entry['addr'] not in exc_list: | ||
442 | 223 | addresses.append(entry['addr']) | ||
443 | 224 | if fatal and not addresses: | ||
444 | 225 | raise Exception("Interface '%s' doesn't have any %s addresses." % | ||
445 | 226 | (iface, inet_type)) | ||
446 | 227 | return addresses | ||
447 | 228 | |||
448 | 229 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') | ||
449 | 230 | |||
450 | 231 | |||
451 | 232 | def get_iface_from_addr(addr): | ||
452 | 233 | """Work out on which interface the provided address is configured.""" | ||
453 | 234 | for iface in netifaces.interfaces(): | ||
454 | 235 | addresses = netifaces.ifaddresses(iface) | ||
455 | 236 | for inet_type in addresses: | ||
456 | 237 | for _addr in addresses[inet_type]: | ||
457 | 238 | _addr = _addr['addr'] | ||
458 | 239 | # link local | ||
459 | 240 | ll_key = re.compile("(.+)%.*") | ||
460 | 241 | raw = re.match(ll_key, _addr) | ||
461 | 242 | if raw: | ||
462 | 243 | _addr = raw.group(1) | ||
463 | 244 | if _addr == addr: | ||
464 | 245 | log("Address '%s' is configured on iface '%s'" % | ||
465 | 246 | (addr, iface)) | ||
466 | 247 | return iface | ||
467 | 248 | |||
468 | 249 | msg = "Unable to infer net iface on which '%s' is configured" % (addr) | ||
469 | 250 | raise Exception(msg) | ||
470 | 251 | |||
471 | 252 | |||
472 | 253 | def sniff_iface(f): | ||
473 | 254 | """If no iface provided, inject net iface inferred from unit private | ||
474 | 255 | address. | ||
475 | 256 | """ | ||
476 | 257 | def iface_sniffer(*args, **kwargs): | ||
477 | 258 | if not kwargs.get('iface', None): | ||
478 | 259 | kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) | ||
479 | 260 | |||
480 | 261 | return f(*args, **kwargs) | ||
481 | 262 | |||
482 | 263 | return iface_sniffer | ||
483 | 264 | |||
484 | 265 | |||
485 | 266 | @sniff_iface | ||
486 | 267 | def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, | ||
487 | 268 | dynamic_only=True): | ||
488 | 269 | """Get assigned IPv6 address for a given interface. | ||
489 | 270 | |||
490 | 271 | Returns list of addresses found. If no address found, returns empty list. | ||
491 | 272 | |||
492 | 273 | If iface is None, we infer the current primary interface by doing a reverse | ||
493 | 274 | lookup on the unit private-address. | ||
494 | 275 | |||
495 | 276 | We currently only support scope global IPv6 addresses i.e. non-temporary | ||
496 | 277 | addresses. If no global IPv6 address is found, return the first one found | ||
497 | 278 | in the ipv6 address list. | ||
498 | 279 | """ | ||
499 | 280 | addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', | ||
500 | 281 | inc_aliases=inc_aliases, fatal=fatal, | ||
501 | 282 | exc_list=exc_list) | ||
502 | 283 | |||
503 | 284 | if addresses: | ||
504 | 285 | global_addrs = [] | ||
505 | 286 | for addr in addresses: | ||
506 | 287 | key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") | ||
507 | 288 | m = re.match(key_scope_link_local, addr) | ||
508 | 289 | if m: | ||
509 | 290 | eui_64_mac = m.group(1) | ||
510 | 291 | iface = m.group(2) | ||
511 | 292 | else: | ||
512 | 293 | global_addrs.append(addr) | ||
513 | 294 | |||
514 | 295 | if global_addrs: | ||
515 | 296 | # Make sure any found global addresses are not temporary | ||
516 | 297 | cmd = ['ip', 'addr', 'show', iface] | ||
517 | 298 | out = subprocess.check_output(cmd) | ||
518 | 299 | if dynamic_only: | ||
519 | 300 | key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") | ||
520 | 301 | else: | ||
521 | 302 | key = re.compile("inet6 (.+)/[0-9]+ scope global.*") | ||
522 | 303 | |||
523 | 304 | addrs = [] | ||
524 | 305 | for line in out.split('\n'): | ||
525 | 306 | line = line.strip() | ||
526 | 307 | m = re.match(key, line) | ||
527 | 308 | if m and 'temporary' not in line: | ||
528 | 309 | # Return the first valid address we find | ||
529 | 310 | for addr in global_addrs: | ||
530 | 311 | if m.group(1) == addr: | ||
531 | 312 | if not dynamic_only or \ | ||
532 | 313 | m.group(1).endswith(eui_64_mac): | ||
533 | 314 | addrs.append(addr) | ||
534 | 315 | |||
535 | 316 | if addrs: | ||
536 | 317 | return addrs | ||
537 | 318 | |||
538 | 319 | if fatal: | ||
539 | 320 | raise Exception("Interface '%s' doesn't have a scope global " | ||
540 | 321 | "non-temporary ipv6 address." % iface) | ||
541 | 322 | |||
542 | 323 | return [] | ||
543 | 324 | |||
544 | 325 | |||
545 | 326 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): | ||
546 | 327 | """ | ||
547 | 328 | Return a list of bridges on the system or [] | ||
548 | 329 | """ | ||
549 | 330 | b_rgex = vnic_dir + '/*/bridge' | ||
550 | 331 | return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] | ||
551 | 332 | |||
552 | 333 | |||
553 | 334 | def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): | ||
554 | 335 | """ | ||
555 | 336 | Return a list of nics comprising a given bridge on the system or [] | ||
556 | 337 | """ | ||
557 | 338 | brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) | ||
558 | 339 | return [x.split('/')[-1] for x in glob.glob(brif_rgex)] | ||
559 | 340 | |||
560 | 341 | |||
561 | 342 | def is_bridge_member(nic): | ||
562 | 343 | """ | ||
563 | 344 | Check if a given nic is a member of a bridge | ||
564 | 345 | """ | ||
565 | 346 | for bridge in get_bridges(): | ||
566 | 347 | if nic in get_bridge_nics(bridge): | ||
567 | 348 | return True | ||
568 | 349 | return False | ||
569 | 175 | 350 | ||
570 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
571 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-02 11:19:19 +0000 | |||
572 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-17 13:06:36 +0000 | |||
573 | @@ -10,32 +10,62 @@ | |||
574 | 10 | that is specifically for use by OpenStack charms. | 10 | that is specifically for use by OpenStack charms. |
575 | 11 | """ | 11 | """ |
576 | 12 | 12 | ||
578 | 13 | def __init__(self, series=None, openstack=None, source=None): | 13 | def __init__(self, series=None, openstack=None, source=None, stable=True): |
579 | 14 | """Initialize the deployment environment.""" | 14 | """Initialize the deployment environment.""" |
580 | 15 | super(OpenStackAmuletDeployment, self).__init__(series) | 15 | super(OpenStackAmuletDeployment, self).__init__(series) |
581 | 16 | self.openstack = openstack | 16 | self.openstack = openstack |
582 | 17 | self.source = source | 17 | self.source = source |
583 | 18 | self.stable = stable | ||
584 | 19 | # Note(coreycb): this needs to be changed when new next branches come | ||
585 | 20 | # out. | ||
586 | 21 | self.current_next = "trusty" | ||
587 | 22 | |||
588 | 23 | def _determine_branch_locations(self, other_services): | ||
589 | 24 | """Determine the branch locations for the other services. | ||
590 | 25 | |||
591 | 26 | Determine if the local branch being tested is derived from its | ||
592 | 27 | stable or next (dev) branch, and based on this, use the corresonding | ||
593 | 28 | stable or next branches for the other_services.""" | ||
594 | 29 | base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] | ||
595 | 30 | |||
596 | 31 | if self.stable: | ||
597 | 32 | for svc in other_services: | ||
598 | 33 | temp = 'lp:charms/{}' | ||
599 | 34 | svc['location'] = temp.format(svc['name']) | ||
600 | 35 | else: | ||
601 | 36 | for svc in other_services: | ||
602 | 37 | if svc['name'] in base_charms: | ||
603 | 38 | temp = 'lp:charms/{}' | ||
604 | 39 | svc['location'] = temp.format(svc['name']) | ||
605 | 40 | else: | ||
606 | 41 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' | ||
607 | 42 | svc['location'] = temp.format(self.current_next, | ||
608 | 43 | svc['name']) | ||
609 | 44 | return other_services | ||
610 | 18 | 45 | ||
611 | 19 | def _add_services(self, this_service, other_services): | 46 | def _add_services(self, this_service, other_services): |
613 | 20 | """Add services to the deployment and set openstack-origin.""" | 47 | """Add services to the deployment and set openstack-origin/source.""" |
614 | 48 | other_services = self._determine_branch_locations(other_services) | ||
615 | 49 | |||
616 | 21 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | 50 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
617 | 22 | other_services) | 51 | other_services) |
619 | 23 | name = 0 | 52 | |
620 | 24 | services = other_services | 53 | services = other_services |
621 | 25 | services.append(this_service) | 54 | services.append(this_service) |
623 | 26 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | 55 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
624 | 56 | 'ceph-osd', 'ceph-radosgw'] | ||
625 | 27 | 57 | ||
626 | 28 | if self.openstack: | 58 | if self.openstack: |
627 | 29 | for svc in services: | 59 | for svc in services: |
629 | 30 | if svc[name] not in use_source: | 60 | if svc['name'] not in use_source: |
630 | 31 | config = {'openstack-origin': self.openstack} | 61 | config = {'openstack-origin': self.openstack} |
632 | 32 | self.d.configure(svc[name], config) | 62 | self.d.configure(svc['name'], config) |
633 | 33 | 63 | ||
634 | 34 | if self.source: | 64 | if self.source: |
635 | 35 | for svc in services: | 65 | for svc in services: |
637 | 36 | if svc[name] in use_source: | 66 | if svc['name'] in use_source: |
638 | 37 | config = {'source': self.source} | 67 | config = {'source': self.source} |
640 | 38 | self.d.configure(svc[name], config) | 68 | self.d.configure(svc['name'], config) |
641 | 39 | 69 | ||
642 | 40 | def _configure_services(self, configs): | 70 | def _configure_services(self, configs): |
643 | 41 | """Configure all of the services.""" | 71 | """Configure all of the services.""" |
644 | 42 | 72 | ||
645 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
646 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-02 11:19:19 +0000 | |||
647 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-17 13:06:36 +0000 | |||
648 | @@ -187,15 +187,16 @@ | |||
649 | 187 | 187 | ||
650 | 188 | f = opener.open("http://download.cirros-cloud.net/version/released") | 188 | f = opener.open("http://download.cirros-cloud.net/version/released") |
651 | 189 | version = f.read().strip() | 189 | version = f.read().strip() |
653 | 190 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | 190 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
654 | 191 | local_path = os.path.join('tests', cirros_img) | ||
655 | 191 | 192 | ||
657 | 192 | if not os.path.exists(cirros_img): | 193 | if not os.path.exists(local_path): |
658 | 193 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | 194 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
659 | 194 | version, cirros_img) | 195 | version, cirros_img) |
661 | 195 | opener.retrieve(cirros_url, cirros_img) | 196 | opener.retrieve(cirros_url, local_path) |
662 | 196 | f.close() | 197 | f.close() |
663 | 197 | 198 | ||
665 | 198 | with open(cirros_img) as f: | 199 | with open(local_path) as f: |
666 | 199 | image = glance.images.create(name=image_name, is_public=True, | 200 | image = glance.images.create(name=image_name, is_public=True, |
667 | 200 | disk_format='qcow2', | 201 | disk_format='qcow2', |
668 | 201 | container_format='bare', data=f) | 202 | container_format='bare', data=f) |
669 | 202 | 203 | ||
670 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
671 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-09-02 11:19:19 +0000 | |||
672 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-17 13:06:36 +0000 | |||
673 | @@ -8,7 +8,6 @@ | |||
674 | 8 | check_call | 8 | check_call |
675 | 9 | ) | 9 | ) |
676 | 10 | 10 | ||
677 | 11 | |||
678 | 12 | from charmhelpers.fetch import ( | 11 | from charmhelpers.fetch import ( |
679 | 13 | apt_install, | 12 | apt_install, |
680 | 14 | filter_installed_packages, | 13 | filter_installed_packages, |
681 | @@ -28,6 +27,11 @@ | |||
682 | 28 | INFO | 27 | INFO |
683 | 29 | ) | 28 | ) |
684 | 30 | 29 | ||
685 | 30 | from charmhelpers.core.host import ( | ||
686 | 31 | mkdir, | ||
687 | 32 | write_file | ||
688 | 33 | ) | ||
689 | 34 | |||
690 | 31 | from charmhelpers.contrib.hahelpers.cluster import ( | 35 | from charmhelpers.contrib.hahelpers.cluster import ( |
691 | 32 | determine_apache_port, | 36 | determine_apache_port, |
692 | 33 | determine_api_port, | 37 | determine_api_port, |
693 | @@ -38,6 +42,7 @@ | |||
694 | 38 | from charmhelpers.contrib.hahelpers.apache import ( | 42 | from charmhelpers.contrib.hahelpers.apache import ( |
695 | 39 | get_cert, | 43 | get_cert, |
696 | 40 | get_ca_cert, | 44 | get_ca_cert, |
697 | 45 | install_ca_cert, | ||
698 | 41 | ) | 46 | ) |
699 | 42 | 47 | ||
700 | 43 | from charmhelpers.contrib.openstack.neutron import ( | 48 | from charmhelpers.contrib.openstack.neutron import ( |
701 | @@ -47,8 +52,13 @@ | |||
702 | 47 | from charmhelpers.contrib.network.ip import ( | 52 | from charmhelpers.contrib.network.ip import ( |
703 | 48 | get_address_in_network, | 53 | get_address_in_network, |
704 | 49 | get_ipv6_addr, | 54 | get_ipv6_addr, |
705 | 55 | get_netmask_for_address, | ||
706 | 56 | format_ipv6_addr, | ||
707 | 57 | is_address_in_network | ||
708 | 50 | ) | 58 | ) |
709 | 51 | 59 | ||
710 | 60 | from charmhelpers.contrib.openstack.utils import get_host_ip | ||
711 | 61 | |||
712 | 52 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 62 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
713 | 53 | 63 | ||
714 | 54 | 64 | ||
715 | @@ -168,8 +178,10 @@ | |||
716 | 168 | for rid in relation_ids('shared-db'): | 178 | for rid in relation_ids('shared-db'): |
717 | 169 | for unit in related_units(rid): | 179 | for unit in related_units(rid): |
718 | 170 | rdata = relation_get(rid=rid, unit=unit) | 180 | rdata = relation_get(rid=rid, unit=unit) |
719 | 181 | host = rdata.get('db_host') | ||
720 | 182 | host = format_ipv6_addr(host) or host | ||
721 | 171 | ctxt = { | 183 | ctxt = { |
723 | 172 | 'database_host': rdata.get('db_host'), | 184 | 'database_host': host, |
724 | 173 | 'database': self.database, | 185 | 'database': self.database, |
725 | 174 | 'database_user': self.user, | 186 | 'database_user': self.user, |
726 | 175 | 'database_password': rdata.get(password_setting), | 187 | 'database_password': rdata.get(password_setting), |
727 | @@ -245,10 +257,15 @@ | |||
728 | 245 | for rid in relation_ids('identity-service'): | 257 | for rid in relation_ids('identity-service'): |
729 | 246 | for unit in related_units(rid): | 258 | for unit in related_units(rid): |
730 | 247 | rdata = relation_get(rid=rid, unit=unit) | 259 | rdata = relation_get(rid=rid, unit=unit) |
731 | 260 | serv_host = rdata.get('service_host') | ||
732 | 261 | serv_host = format_ipv6_addr(serv_host) or serv_host | ||
733 | 262 | auth_host = rdata.get('auth_host') | ||
734 | 263 | auth_host = format_ipv6_addr(auth_host) or auth_host | ||
735 | 264 | |||
736 | 248 | ctxt = { | 265 | ctxt = { |
737 | 249 | 'service_port': rdata.get('service_port'), | 266 | 'service_port': rdata.get('service_port'), |
740 | 250 | 'service_host': rdata.get('service_host'), | 267 | 'service_host': serv_host, |
741 | 251 | 'auth_host': rdata.get('auth_host'), | 268 | 'auth_host': auth_host, |
742 | 252 | 'auth_port': rdata.get('auth_port'), | 269 | 'auth_port': rdata.get('auth_port'), |
743 | 253 | 'admin_tenant_name': rdata.get('service_tenant'), | 270 | 'admin_tenant_name': rdata.get('service_tenant'), |
744 | 254 | 'admin_user': rdata.get('service_username'), | 271 | 'admin_user': rdata.get('service_username'), |
745 | @@ -297,11 +314,13 @@ | |||
746 | 297 | for unit in related_units(rid): | 314 | for unit in related_units(rid): |
747 | 298 | if relation_get('clustered', rid=rid, unit=unit): | 315 | if relation_get('clustered', rid=rid, unit=unit): |
748 | 299 | ctxt['clustered'] = True | 316 | ctxt['clustered'] = True |
751 | 300 | ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, | 317 | vip = relation_get('vip', rid=rid, unit=unit) |
752 | 301 | unit=unit) | 318 | vip = format_ipv6_addr(vip) or vip |
753 | 319 | ctxt['rabbitmq_host'] = vip | ||
754 | 302 | else: | 320 | else: |
757 | 303 | ctxt['rabbitmq_host'] = relation_get('private-address', | 321 | host = relation_get('private-address', rid=rid, unit=unit) |
758 | 304 | rid=rid, unit=unit) | 322 | host = format_ipv6_addr(host) or host |
759 | 323 | ctxt['rabbitmq_host'] = host | ||
760 | 305 | ctxt.update({ | 324 | ctxt.update({ |
761 | 306 | 'rabbitmq_user': username, | 325 | 'rabbitmq_user': username, |
762 | 307 | 'rabbitmq_password': relation_get('password', rid=rid, | 326 | 'rabbitmq_password': relation_get('password', rid=rid, |
763 | @@ -340,8 +359,9 @@ | |||
764 | 340 | and len(related_units(rid)) > 1: | 359 | and len(related_units(rid)) > 1: |
765 | 341 | rabbitmq_hosts = [] | 360 | rabbitmq_hosts = [] |
766 | 342 | for unit in related_units(rid): | 361 | for unit in related_units(rid): |
769 | 343 | rabbitmq_hosts.append(relation_get('private-address', | 362 | host = relation_get('private-address', rid=rid, unit=unit) |
770 | 344 | rid=rid, unit=unit)) | 363 | host = format_ipv6_addr(host) or host |
771 | 364 | rabbitmq_hosts.append(host) | ||
772 | 345 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) | 365 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) |
773 | 346 | if not context_complete(ctxt): | 366 | if not context_complete(ctxt): |
774 | 347 | return {} | 367 | return {} |
775 | @@ -370,6 +390,7 @@ | |||
776 | 370 | ceph_addr = \ | 390 | ceph_addr = \ |
777 | 371 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | 391 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ |
778 | 372 | relation_get('private-address', rid=rid, unit=unit) | 392 | relation_get('private-address', rid=rid, unit=unit) |
779 | 393 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | ||
780 | 373 | mon_hosts.append(ceph_addr) | 394 | mon_hosts.append(ceph_addr) |
781 | 374 | 395 | ||
782 | 375 | ctxt = { | 396 | ctxt = { |
783 | @@ -390,6 +411,9 @@ | |||
784 | 390 | return ctxt | 411 | return ctxt |
785 | 391 | 412 | ||
786 | 392 | 413 | ||
787 | 414 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | ||
788 | 415 | |||
789 | 416 | |||
790 | 393 | class HAProxyContext(OSContextGenerator): | 417 | class HAProxyContext(OSContextGenerator): |
791 | 394 | interfaces = ['cluster'] | 418 | interfaces = ['cluster'] |
792 | 395 | 419 | ||
793 | @@ -402,25 +426,63 @@ | |||
794 | 402 | if not relation_ids('cluster'): | 426 | if not relation_ids('cluster'): |
795 | 403 | return {} | 427 | return {} |
796 | 404 | 428 | ||
797 | 429 | l_unit = local_unit().replace('/', '-') | ||
798 | 430 | |||
799 | 431 | if config('prefer-ipv6'): | ||
800 | 432 | addr = get_ipv6_addr(exc_list=[config('vip')])[0] | ||
801 | 433 | else: | ||
802 | 434 | addr = get_host_ip(unit_get('private-address')) | ||
803 | 435 | |||
804 | 405 | cluster_hosts = {} | 436 | cluster_hosts = {} |
818 | 406 | l_unit = local_unit().replace('/', '-') | 437 | |
819 | 407 | if config('prefer-ipv6'): | 438 | # NOTE(jamespage): build out map of configured network endpoints |
820 | 408 | addr = get_ipv6_addr() | 439 | # and associated backends |
821 | 409 | else: | 440 | for addr_type in ADDRESS_TYPES: |
822 | 410 | addr = unit_get('private-address') | 441 | laddr = get_address_in_network( |
823 | 411 | cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), | 442 | config('os-{}-network'.format(addr_type))) |
824 | 412 | addr) | 443 | if laddr: |
825 | 413 | 444 | cluster_hosts[laddr] = {} | |
826 | 414 | for rid in relation_ids('cluster'): | 445 | cluster_hosts[laddr]['network'] = "{}/{}".format( |
827 | 415 | for unit in related_units(rid): | 446 | laddr, |
828 | 416 | _unit = unit.replace('/', '-') | 447 | get_netmask_for_address(laddr) |
829 | 417 | addr = relation_get('private-address', rid=rid, unit=unit) | 448 | ) |
830 | 418 | cluster_hosts[_unit] = addr | 449 | cluster_hosts[laddr]['backends'] = {} |
831 | 450 | cluster_hosts[laddr]['backends'][l_unit] = laddr | ||
832 | 451 | for rid in relation_ids('cluster'): | ||
833 | 452 | for unit in related_units(rid): | ||
834 | 453 | _unit = unit.replace('/', '-') | ||
835 | 454 | _laddr = relation_get('{}-address'.format(addr_type), | ||
836 | 455 | rid=rid, unit=unit) | ||
837 | 456 | if _laddr: | ||
838 | 457 | cluster_hosts[laddr]['backends'][_unit] = _laddr | ||
839 | 458 | |||
840 | 459 | # NOTE(jamespage) no split configurations found, just use | ||
841 | 460 | # private addresses | ||
842 | 461 | if not cluster_hosts: | ||
843 | 462 | cluster_hosts[addr] = {} | ||
844 | 463 | cluster_hosts[addr]['network'] = "{}/{}".format( | ||
845 | 464 | addr, | ||
846 | 465 | get_netmask_for_address(addr) | ||
847 | 466 | ) | ||
848 | 467 | cluster_hosts[addr]['backends'] = {} | ||
849 | 468 | cluster_hosts[addr]['backends'][l_unit] = addr | ||
850 | 469 | for rid in relation_ids('cluster'): | ||
851 | 470 | for unit in related_units(rid): | ||
852 | 471 | _unit = unit.replace('/', '-') | ||
853 | 472 | _laddr = relation_get('private-address', | ||
854 | 473 | rid=rid, unit=unit) | ||
855 | 474 | if _laddr: | ||
856 | 475 | cluster_hosts[addr]['backends'][_unit] = _laddr | ||
857 | 419 | 476 | ||
858 | 420 | ctxt = { | 477 | ctxt = { |
860 | 421 | 'units': cluster_hosts, | 478 | 'frontends': cluster_hosts, |
861 | 422 | } | 479 | } |
862 | 423 | 480 | ||
863 | 481 | if config('haproxy-server-timeout'): | ||
864 | 482 | ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') | ||
865 | 483 | if config('haproxy-client-timeout'): | ||
866 | 484 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | ||
867 | 485 | |||
868 | 424 | if config('prefer-ipv6'): | 486 | if config('prefer-ipv6'): |
869 | 425 | ctxt['local_host'] = 'ip6-localhost' | 487 | ctxt['local_host'] = 'ip6-localhost' |
870 | 426 | ctxt['haproxy_host'] = '::' | 488 | ctxt['haproxy_host'] = '::' |
871 | @@ -430,12 +492,13 @@ | |||
872 | 430 | ctxt['haproxy_host'] = '0.0.0.0' | 492 | ctxt['haproxy_host'] = '0.0.0.0' |
873 | 431 | ctxt['stat_port'] = ':8888' | 493 | ctxt['stat_port'] = ':8888' |
874 | 432 | 494 | ||
881 | 433 | if len(cluster_hosts.keys()) > 1: | 495 | for frontend in cluster_hosts: |
882 | 434 | # Enable haproxy when we have enough peers. | 496 | if len(cluster_hosts[frontend]['backends']) > 1: |
883 | 435 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 497 | # Enable haproxy when we have enough peers. |
884 | 436 | with open('/etc/default/haproxy', 'w') as out: | 498 | log('Ensuring haproxy enabled in /etc/default/haproxy.') |
885 | 437 | out.write('ENABLED=1\n') | 499 | with open('/etc/default/haproxy', 'w') as out: |
886 | 438 | return ctxt | 500 | out.write('ENABLED=1\n') |
887 | 501 | return ctxt | ||
888 | 439 | log('HAProxy context is incomplete, this unit has no peers.') | 502 | log('HAProxy context is incomplete, this unit has no peers.') |
889 | 440 | return {} | 503 | return {} |
890 | 441 | 504 | ||
891 | @@ -490,22 +553,36 @@ | |||
892 | 490 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] | 553 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] |
893 | 491 | check_call(cmd) | 554 | check_call(cmd) |
894 | 492 | 555 | ||
898 | 493 | def configure_cert(self): | 556 | def configure_cert(self, cn=None): |
896 | 494 | if not os.path.isdir('/etc/apache2/ssl'): | ||
897 | 495 | os.mkdir('/etc/apache2/ssl') | ||
899 | 496 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) | 557 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) |
907 | 497 | if not os.path.isdir(ssl_dir): | 558 | mkdir(path=ssl_dir) |
908 | 498 | os.mkdir(ssl_dir) | 559 | cert, key = get_cert(cn) |
909 | 499 | cert, key = get_cert() | 560 | if cn: |
910 | 500 | with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: | 561 | cert_filename = 'cert_{}'.format(cn) |
911 | 501 | cert_out.write(b64decode(cert)) | 562 | key_filename = 'key_{}'.format(cn) |
912 | 502 | with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: | 563 | else: |
913 | 503 | key_out.write(b64decode(key)) | 564 | cert_filename = 'cert' |
914 | 565 | key_filename = 'key' | ||
915 | 566 | write_file(path=os.path.join(ssl_dir, cert_filename), | ||
916 | 567 | content=b64decode(cert)) | ||
917 | 568 | write_file(path=os.path.join(ssl_dir, key_filename), | ||
918 | 569 | content=b64decode(key)) | ||
919 | 570 | |||
920 | 571 | def configure_ca(self): | ||
921 | 504 | ca_cert = get_ca_cert() | 572 | ca_cert = get_ca_cert() |
922 | 505 | if ca_cert: | 573 | if ca_cert: |
926 | 506 | with open(CA_CERT_PATH, 'w') as ca_out: | 574 | install_ca_cert(b64decode(ca_cert)) |
927 | 507 | ca_out.write(b64decode(ca_cert)) | 575 | |
928 | 508 | check_call(['update-ca-certificates']) | 576 | def canonical_names(self): |
929 | 577 | '''Figure out which canonical names clients will access this service''' | ||
930 | 578 | cns = [] | ||
931 | 579 | for r_id in relation_ids('identity-service'): | ||
932 | 580 | for unit in related_units(r_id): | ||
933 | 581 | rdata = relation_get(rid=r_id, unit=unit) | ||
934 | 582 | for k in rdata: | ||
935 | 583 | if k.startswith('ssl_key_'): | ||
936 | 584 | cns.append(k.lstrip('ssl_key_')) | ||
937 | 585 | return list(set(cns)) | ||
938 | 509 | 586 | ||
939 | 510 | def __call__(self): | 587 | def __call__(self): |
940 | 511 | if isinstance(self.external_ports, basestring): | 588 | if isinstance(self.external_ports, basestring): |
941 | @@ -513,21 +590,47 @@ | |||
942 | 513 | if (not self.external_ports or not https()): | 590 | if (not self.external_ports or not https()): |
943 | 514 | return {} | 591 | return {} |
944 | 515 | 592 | ||
946 | 516 | self.configure_cert() | 593 | self.configure_ca() |
947 | 517 | self.enable_modules() | 594 | self.enable_modules() |
948 | 518 | 595 | ||
949 | 519 | ctxt = { | 596 | ctxt = { |
950 | 520 | 'namespace': self.service_namespace, | 597 | 'namespace': self.service_namespace, |
953 | 521 | 'private_address': unit_get('private-address'), | 598 | 'endpoints': [], |
954 | 522 | 'endpoints': [] | 599 | 'ext_ports': [] |
955 | 523 | } | 600 | } |
963 | 524 | if is_clustered(): | 601 | |
964 | 525 | ctxt['private_address'] = config('vip') | 602 | for cn in self.canonical_names(): |
965 | 526 | for api_port in self.external_ports: | 603 | self.configure_cert(cn) |
966 | 527 | ext_port = determine_apache_port(api_port) | 604 | |
967 | 528 | int_port = determine_api_port(api_port) | 605 | addresses = [] |
968 | 529 | portmap = (int(ext_port), int(int_port)) | 606 | vips = [] |
969 | 530 | ctxt['endpoints'].append(portmap) | 607 | if config('vip'): |
970 | 608 | vips = config('vip').split() | ||
971 | 609 | |||
972 | 610 | for network_type in ['os-internal-network', | ||
973 | 611 | 'os-admin-network', | ||
974 | 612 | 'os-public-network']: | ||
975 | 613 | address = get_address_in_network(config(network_type), | ||
976 | 614 | unit_get('private-address')) | ||
977 | 615 | if len(vips) > 0 and is_clustered(): | ||
978 | 616 | for vip in vips: | ||
979 | 617 | if is_address_in_network(config(network_type), | ||
980 | 618 | vip): | ||
981 | 619 | addresses.append((address, vip)) | ||
982 | 620 | break | ||
983 | 621 | elif is_clustered(): | ||
984 | 622 | addresses.append((address, config('vip'))) | ||
985 | 623 | else: | ||
986 | 624 | addresses.append((address, address)) | ||
987 | 625 | |||
988 | 626 | for address, endpoint in set(addresses): | ||
989 | 627 | for api_port in self.external_ports: | ||
990 | 628 | ext_port = determine_apache_port(api_port) | ||
991 | 629 | int_port = determine_api_port(api_port) | ||
992 | 630 | portmap = (address, endpoint, int(ext_port), int(int_port)) | ||
993 | 631 | ctxt['endpoints'].append(portmap) | ||
994 | 632 | ctxt['ext_ports'].append(int(ext_port)) | ||
995 | 633 | ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) | ||
996 | 531 | return ctxt | 634 | return ctxt |
997 | 532 | 635 | ||
998 | 533 | 636 | ||
999 | @@ -657,22 +760,22 @@ | |||
1000 | 657 | 760 | ||
1001 | 658 | class OSConfigFlagContext(OSContextGenerator): | 761 | class OSConfigFlagContext(OSContextGenerator): |
1002 | 659 | 762 | ||
1019 | 660 | """ | 763 | """ |
1020 | 661 | Responsible for adding user-defined config-flags in charm config to a | 764 | Responsible for adding user-defined config-flags in charm config to a |
1021 | 662 | template context. | 765 | template context. |
1022 | 663 | 766 | ||
1023 | 664 | NOTE: the value of config-flags may be a comma-separated list of | 767 | NOTE: the value of config-flags may be a comma-separated list of |
1024 | 665 | key=value pairs and some Openstack config files support | 768 | key=value pairs and some Openstack config files support |
1025 | 666 | comma-separated lists as values. | 769 | comma-separated lists as values. |
1026 | 667 | """ | 770 | """ |
1027 | 668 | 771 | ||
1028 | 669 | def __call__(self): | 772 | def __call__(self): |
1029 | 670 | config_flags = config('config-flags') | 773 | config_flags = config('config-flags') |
1030 | 671 | if not config_flags: | 774 | if not config_flags: |
1031 | 672 | return {} | 775 | return {} |
1032 | 673 | 776 | ||
1033 | 674 | flags = config_flags_parser(config_flags) | 777 | flags = config_flags_parser(config_flags) |
1034 | 675 | return {'user_config_flags': flags} | 778 | return {'user_config_flags': flags} |
1035 | 676 | 779 | ||
1036 | 677 | 780 | ||
1037 | 678 | class SubordinateConfigContext(OSContextGenerator): | 781 | class SubordinateConfigContext(OSContextGenerator): |
1038 | @@ -787,3 +890,35 @@ | |||
1039 | 787 | 'use_syslog': config('use-syslog') | 890 | 'use_syslog': config('use-syslog') |
1040 | 788 | } | 891 | } |
1041 | 789 | return ctxt | 892 | return ctxt |
1042 | 893 | |||
1043 | 894 | |||
1044 | 895 | class BindHostContext(OSContextGenerator): | ||
1045 | 896 | |||
1046 | 897 | def __call__(self): | ||
1047 | 898 | if config('prefer-ipv6'): | ||
1048 | 899 | return { | ||
1049 | 900 | 'bind_host': '::' | ||
1050 | 901 | } | ||
1051 | 902 | else: | ||
1052 | 903 | return { | ||
1053 | 904 | 'bind_host': '0.0.0.0' | ||
1054 | 905 | } | ||
1055 | 906 | |||
1056 | 907 | |||
1057 | 908 | class WorkerConfigContext(OSContextGenerator): | ||
1058 | 909 | |||
1059 | 910 | @property | ||
1060 | 911 | def num_cpus(self): | ||
1061 | 912 | try: | ||
1062 | 913 | from psutil import NUM_CPUS | ||
1063 | 914 | except ImportError: | ||
1064 | 915 | apt_install('python-psutil', fatal=True) | ||
1065 | 916 | from psutil import NUM_CPUS | ||
1066 | 917 | return NUM_CPUS | ||
1067 | 918 | |||
1068 | 919 | def __call__(self): | ||
1069 | 920 | multiplier = config('worker-multiplier') or 1 | ||
1070 | 921 | ctxt = { | ||
1071 | 922 | "workers": self.num_cpus * multiplier | ||
1072 | 923 | } | ||
1073 | 924 | return ctxt | ||
1074 | 790 | 925 | ||
1075 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
1076 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-09-02 11:19:19 +0000 | |||
1077 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-17 13:06:36 +0000 | |||
1078 | @@ -66,7 +66,7 @@ | |||
1079 | 66 | resolved_address = vip | 66 | resolved_address = vip |
1080 | 67 | else: | 67 | else: |
1081 | 68 | if config('prefer-ipv6'): | 68 | if config('prefer-ipv6'): |
1083 | 69 | fallback_addr = get_ipv6_addr() | 69 | fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
1084 | 70 | else: | 70 | else: |
1085 | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) | 71 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) |
1086 | 72 | resolved_address = get_address_in_network( | 72 | resolved_address = get_address_in_network( |
1087 | 73 | 73 | ||
1088 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
1089 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-09-08 13:55:11 +0000 | |||
1090 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-17 13:06:36 +0000 | |||
1091 | @@ -4,6 +4,7 @@ | |||
1092 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1093 | 5 | 5 | ||
1094 | 6 | import subprocess | 6 | import subprocess |
1095 | 7 | import json | ||
1096 | 7 | import os | 8 | import os |
1097 | 8 | import socket | 9 | import socket |
1098 | 9 | import sys | 10 | import sys |
1099 | @@ -13,7 +14,9 @@ | |||
1100 | 13 | log as juju_log, | 14 | log as juju_log, |
1101 | 14 | charm_dir, | 15 | charm_dir, |
1102 | 15 | ERROR, | 16 | ERROR, |
1104 | 16 | INFO | 17 | INFO, |
1105 | 18 | relation_ids, | ||
1106 | 19 | relation_set | ||
1107 | 17 | ) | 20 | ) |
1108 | 18 | 21 | ||
1109 | 19 | from charmhelpers.contrib.storage.linux.lvm import ( | 22 | from charmhelpers.contrib.storage.linux.lvm import ( |
1110 | @@ -22,6 +25,10 @@ | |||
1111 | 22 | remove_lvm_physical_volume, | 25 | remove_lvm_physical_volume, |
1112 | 23 | ) | 26 | ) |
1113 | 24 | 27 | ||
1114 | 28 | from charmhelpers.contrib.network.ip import ( | ||
1115 | 29 | get_ipv6_addr | ||
1116 | 30 | ) | ||
1117 | 31 | |||
1118 | 25 | from charmhelpers.core.host import lsb_release, mounts, umount | 32 | from charmhelpers.core.host import lsb_release, mounts, umount |
1119 | 26 | from charmhelpers.fetch import apt_install, apt_cache | 33 | from charmhelpers.fetch import apt_install, apt_cache |
1120 | 27 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | 34 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
1121 | @@ -71,6 +78,8 @@ | |||
1122 | 71 | ('1.12.0', 'icehouse'), | 78 | ('1.12.0', 'icehouse'), |
1123 | 72 | ('1.11.0', 'icehouse'), | 79 | ('1.11.0', 'icehouse'), |
1124 | 73 | ('2.0.0', 'juno'), | 80 | ('2.0.0', 'juno'), |
1125 | 81 | ('2.1.0', 'juno'), | ||
1126 | 82 | ('2.2.0', 'juno'), | ||
1127 | 74 | ]) | 83 | ]) |
1128 | 75 | 84 | ||
1129 | 76 | DEFAULT_LOOPBACK_SIZE = '5G' | 85 | DEFAULT_LOOPBACK_SIZE = '5G' |
1130 | @@ -457,3 +466,21 @@ | |||
1131 | 457 | return result | 466 | return result |
1132 | 458 | else: | 467 | else: |
1133 | 459 | return result.split('.')[0] | 468 | return result.split('.')[0] |
1134 | 469 | |||
1135 | 470 | |||
1136 | 471 | def sync_db_with_multi_ipv6_addresses(database, database_user, | ||
1137 | 472 | relation_prefix=None): | ||
1138 | 473 | hosts = get_ipv6_addr(dynamic_only=False) | ||
1139 | 474 | |||
1140 | 475 | kwargs = {'database': database, | ||
1141 | 476 | 'username': database_user, | ||
1142 | 477 | 'hostname': json.dumps(hosts)} | ||
1143 | 478 | |||
1144 | 479 | if relation_prefix: | ||
1145 | 480 | keys = kwargs.keys() | ||
1146 | 481 | for key in keys: | ||
1147 | 482 | kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] | ||
1148 | 483 | del kwargs[key] | ||
1149 | 484 | |||
1150 | 485 | for rid in relation_ids('shared-db'): | ||
1151 | 486 | relation_set(relation_id=rid, **kwargs) | ||
1152 | 460 | 487 | ||
1153 | === added directory 'hooks/charmhelpers/contrib/storage' | |||
1154 | === added file 'hooks/charmhelpers/contrib/storage/__init__.py' | |||
1155 | === added directory 'hooks/charmhelpers/contrib/storage/linux' | |||
1156 | === added file 'hooks/charmhelpers/contrib/storage/linux/__init__.py' | |||
1157 | === added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
1158 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000 | |||
1159 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-17 13:06:36 +0000 | |||
1160 | @@ -0,0 +1,388 @@ | |||
1161 | 1 | # | ||
1162 | 2 | # Copyright 2012 Canonical Ltd. | ||
1163 | 3 | # | ||
1164 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
1165 | 5 | # | ||
1166 | 6 | # Authors: | ||
1167 | 7 | # James Page <james.page@ubuntu.com> | ||
1168 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
1169 | 9 | # | ||
1170 | 10 | |||
1171 | 11 | import os | ||
1172 | 12 | import shutil | ||
1173 | 13 | import json | ||
1174 | 14 | import time | ||
1175 | 15 | |||
1176 | 16 | from subprocess import ( | ||
1177 | 17 | check_call, | ||
1178 | 18 | check_output, | ||
1179 | 19 | CalledProcessError | ||
1180 | 20 | ) | ||
1181 | 21 | |||
1182 | 22 | from charmhelpers.core.hookenv import ( | ||
1183 | 23 | relation_get, | ||
1184 | 24 | relation_ids, | ||
1185 | 25 | related_units, | ||
1186 | 26 | log, | ||
1187 | 27 | INFO, | ||
1188 | 28 | WARNING, | ||
1189 | 29 | ERROR | ||
1190 | 30 | ) | ||
1191 | 31 | |||
1192 | 32 | from charmhelpers.core.host import ( | ||
1193 | 33 | mount, | ||
1194 | 34 | mounts, | ||
1195 | 35 | service_start, | ||
1196 | 36 | service_stop, | ||
1197 | 37 | service_running, | ||
1198 | 38 | umount, | ||
1199 | 39 | ) | ||
1200 | 40 | |||
1201 | 41 | from charmhelpers.fetch import ( | ||
1202 | 42 | apt_install, | ||
1203 | 43 | ) | ||
1204 | 44 | |||
1205 | 45 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' | ||
1206 | 46 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | ||
1207 | 47 | |||
1208 | 48 | CEPH_CONF = """[global] | ||
1209 | 49 | auth supported = {auth} | ||
1210 | 50 | keyring = {keyring} | ||
1211 | 51 | mon host = {mon_hosts} | ||
1212 | 52 | log to syslog = {use_syslog} | ||
1213 | 53 | err to syslog = {use_syslog} | ||
1214 | 54 | clog to syslog = {use_syslog} | ||
1215 | 55 | """ | ||
1216 | 56 | |||
1217 | 57 | |||
1218 | 58 | def install(): | ||
1219 | 59 | ''' Basic Ceph client installation ''' | ||
1220 | 60 | ceph_dir = "/etc/ceph" | ||
1221 | 61 | if not os.path.exists(ceph_dir): | ||
1222 | 62 | os.mkdir(ceph_dir) | ||
1223 | 63 | apt_install('ceph-common', fatal=True) | ||
1224 | 64 | |||
1225 | 65 | |||
1226 | 66 | def rbd_exists(service, pool, rbd_img): | ||
1227 | 67 | ''' Check to see if a RADOS block device exists ''' | ||
1228 | 68 | try: | ||
1229 | 69 | out = check_output(['rbd', 'list', '--id', service, | ||
1230 | 70 | '--pool', pool]) | ||
1231 | 71 | except CalledProcessError: | ||
1232 | 72 | return False | ||
1233 | 73 | else: | ||
1234 | 74 | return rbd_img in out | ||
1235 | 75 | |||
1236 | 76 | |||
1237 | 77 | def create_rbd_image(service, pool, image, sizemb): | ||
1238 | 78 | ''' Create a new RADOS block device ''' | ||
1239 | 79 | cmd = [ | ||
1240 | 80 | 'rbd', | ||
1241 | 81 | 'create', | ||
1242 | 82 | image, | ||
1243 | 83 | '--size', | ||
1244 | 84 | str(sizemb), | ||
1245 | 85 | '--id', | ||
1246 | 86 | service, | ||
1247 | 87 | '--pool', | ||
1248 | 88 | pool | ||
1249 | 89 | ] | ||
1250 | 90 | check_call(cmd) | ||
1251 | 91 | |||
1252 | 92 | |||
1253 | 93 | def pool_exists(service, name): | ||
1254 | 94 | ''' Check to see if a RADOS pool already exists ''' | ||
1255 | 95 | try: | ||
1256 | 96 | out = check_output(['rados', '--id', service, 'lspools']) | ||
1257 | 97 | except CalledProcessError: | ||
1258 | 98 | return False | ||
1259 | 99 | else: | ||
1260 | 100 | return name in out | ||
1261 | 101 | |||
1262 | 102 | |||
1263 | 103 | def get_osds(service): | ||
1264 | 104 | ''' | ||
1265 | 105 | Return a list of all Ceph Object Storage Daemons | ||
1266 | 106 | currently in the cluster | ||
1267 | 107 | ''' | ||
1268 | 108 | version = ceph_version() | ||
1269 | 109 | if version and version >= '0.56': | ||
1270 | 110 | return json.loads(check_output(['ceph', '--id', service, | ||
1271 | 111 | 'osd', 'ls', '--format=json'])) | ||
1272 | 112 | else: | ||
1273 | 113 | return None | ||
1274 | 114 | |||
1275 | 115 | |||
1276 | 116 | def create_pool(service, name, replicas=3): | ||
1277 | 117 | ''' Create a new RADOS pool ''' | ||
1278 | 118 | if pool_exists(service, name): | ||
1279 | 119 | log("Ceph pool {} already exists, skipping creation".format(name), | ||
1280 | 120 | level=WARNING) | ||
1281 | 121 | return | ||
1282 | 122 | # Calculate the number of placement groups based | ||
1283 | 123 | # on upstream recommended best practices. | ||
1284 | 124 | osds = get_osds(service) | ||
1285 | 125 | if osds: | ||
1286 | 126 | pgnum = (len(osds) * 100 / replicas) | ||
1287 | 127 | else: | ||
1288 | 128 | # NOTE(james-page): Default to 200 for older ceph versions | ||
1289 | 129 | # which don't support OSD query from cli | ||
1290 | 130 | pgnum = 200 | ||
1291 | 131 | cmd = [ | ||
1292 | 132 | 'ceph', '--id', service, | ||
1293 | 133 | 'osd', 'pool', 'create', | ||
1294 | 134 | name, str(pgnum) | ||
1295 | 135 | ] | ||
1296 | 136 | check_call(cmd) | ||
1297 | 137 | cmd = [ | ||
1298 | 138 | 'ceph', '--id', service, | ||
1299 | 139 | 'osd', 'pool', 'set', name, | ||
1300 | 140 | 'size', str(replicas) | ||
1301 | 141 | ] | ||
1302 | 142 | check_call(cmd) | ||
1303 | 143 | |||
1304 | 144 | |||
1305 | 145 | def delete_pool(service, name): | ||
1306 | 146 | ''' Delete a RADOS pool from ceph ''' | ||
1307 | 147 | cmd = [ | ||
1308 | 148 | 'ceph', '--id', service, | ||
1309 | 149 | 'osd', 'pool', 'delete', | ||
1310 | 150 | name, '--yes-i-really-really-mean-it' | ||
1311 | 151 | ] | ||
1312 | 152 | check_call(cmd) | ||
1313 | 153 | |||
1314 | 154 | |||
1315 | 155 | def _keyfile_path(service): | ||
1316 | 156 | return KEYFILE.format(service) | ||
1317 | 157 | |||
1318 | 158 | |||
1319 | 159 | def _keyring_path(service): | ||
1320 | 160 | return KEYRING.format(service) | ||
1321 | 161 | |||
1322 | 162 | |||
1323 | 163 | def create_keyring(service, key): | ||
1324 | 164 | ''' Create a new Ceph keyring containing key''' | ||
1325 | 165 | keyring = _keyring_path(service) | ||
1326 | 166 | if os.path.exists(keyring): | ||
1327 | 167 | log('ceph: Keyring exists at %s.' % keyring, level=WARNING) | ||
1328 | 168 | return | ||
1329 | 169 | cmd = [ | ||
1330 | 170 | 'ceph-authtool', | ||
1331 | 171 | keyring, | ||
1332 | 172 | '--create-keyring', | ||
1333 | 173 | '--name=client.{}'.format(service), | ||
1334 | 174 | '--add-key={}'.format(key) | ||
1335 | 175 | ] | ||
1336 | 176 | check_call(cmd) | ||
1337 | 177 | log('ceph: Created new ring at %s.' % keyring, level=INFO) | ||
1338 | 178 | |||
1339 | 179 | |||
1340 | 180 | def create_key_file(service, key): | ||
1341 | 181 | ''' Create a file containing key ''' | ||
1342 | 182 | keyfile = _keyfile_path(service) | ||
1343 | 183 | if os.path.exists(keyfile): | ||
1344 | 184 | log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) | ||
1345 | 185 | return | ||
1346 | 186 | with open(keyfile, 'w') as fd: | ||
1347 | 187 | fd.write(key) | ||
1348 | 188 | log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) | ||
1349 | 189 | |||
1350 | 190 | |||
1351 | 191 | def get_ceph_nodes(): | ||
1352 | 192 | ''' Query named relation 'ceph' to detemine current nodes ''' | ||
1353 | 193 | hosts = [] | ||
1354 | 194 | for r_id in relation_ids('ceph'): | ||
1355 | 195 | for unit in related_units(r_id): | ||
1356 | 196 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | ||
1357 | 197 | return hosts | ||
1358 | 198 | |||
1359 | 199 | |||
1360 | 200 | def configure(service, key, auth, use_syslog): | ||
1361 | 201 | ''' Perform basic configuration of Ceph ''' | ||
1362 | 202 | create_keyring(service, key) | ||
1363 | 203 | create_key_file(service, key) | ||
1364 | 204 | hosts = get_ceph_nodes() | ||
1365 | 205 | with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: | ||
1366 | 206 | ceph_conf.write(CEPH_CONF.format(auth=auth, | ||
1367 | 207 | keyring=_keyring_path(service), | ||
1368 | 208 | mon_hosts=",".join(map(str, hosts)), | ||
1369 | 209 | use_syslog=use_syslog)) | ||
1370 | 210 | modprobe('rbd') | ||
1371 | 211 | |||
1372 | 212 | |||
1373 | 213 | def image_mapped(name): | ||
1374 | 214 | ''' Determine whether a RADOS block device is mapped locally ''' | ||
1375 | 215 | try: | ||
1376 | 216 | out = check_output(['rbd', 'showmapped']) | ||
1377 | 217 | except CalledProcessError: | ||
1378 | 218 | return False | ||
1379 | 219 | else: | ||
1380 | 220 | return name in out | ||
1381 | 221 | |||
1382 | 222 | |||
1383 | 223 | def map_block_storage(service, pool, image): | ||
1384 | 224 | ''' Map a RADOS block device for local use ''' | ||
1385 | 225 | cmd = [ | ||
1386 | 226 | 'rbd', | ||
1387 | 227 | 'map', | ||
1388 | 228 | '{}/{}'.format(pool, image), | ||
1389 | 229 | '--user', | ||
1390 | 230 | service, | ||
1391 | 231 | '--secret', | ||
1392 | 232 | _keyfile_path(service), | ||
1393 | 233 | ] | ||
1394 | 234 | check_call(cmd) | ||
1395 | 235 | |||
1396 | 236 | |||
1397 | 237 | def filesystem_mounted(fs): | ||
1398 | 238 | ''' Determine whether a filesytems is already mounted ''' | ||
1399 | 239 | return fs in [f for f, m in mounts()] | ||
1400 | 240 | |||
1401 | 241 | |||
1402 | 242 | def make_filesystem(blk_device, fstype='ext4', timeout=10): | ||
1403 | 243 | ''' Make a new filesystem on the specified block device ''' | ||
1404 | 244 | count = 0 | ||
1405 | 245 | e_noent = os.errno.ENOENT | ||
1406 | 246 | while not os.path.exists(blk_device): | ||
1407 | 247 | if count >= timeout: | ||
1408 | 248 | log('ceph: gave up waiting on block device %s' % blk_device, | ||
1409 | 249 | level=ERROR) | ||
1410 | 250 | raise IOError(e_noent, os.strerror(e_noent), blk_device) | ||
1411 | 251 | log('ceph: waiting for block device %s to appear' % blk_device, | ||
1412 | 252 | level=INFO) | ||
1413 | 253 | count += 1 | ||
1414 | 254 | time.sleep(1) | ||
1415 | 255 | else: | ||
1416 | 256 | log('ceph: Formatting block device %s as filesystem %s.' % | ||
1417 | 257 | (blk_device, fstype), level=INFO) | ||
1418 | 258 | check_call(['mkfs', '-t', fstype, blk_device]) | ||
1419 | 259 | |||
1420 | 260 | |||
1421 | 261 | def place_data_on_block_device(blk_device, data_src_dst): | ||
1422 | 262 | ''' Migrate data in data_src_dst to blk_device and then remount ''' | ||
1423 | 263 | # mount block device into /mnt | ||
1424 | 264 | mount(blk_device, '/mnt') | ||
1425 | 265 | # copy data to /mnt | ||
1426 | 266 | copy_files(data_src_dst, '/mnt') | ||
1427 | 267 | # umount block device | ||
1428 | 268 | umount('/mnt') | ||
1429 | 269 | # Grab user/group ID's from original source | ||
1430 | 270 | _dir = os.stat(data_src_dst) | ||
1431 | 271 | uid = _dir.st_uid | ||
1432 | 272 | gid = _dir.st_gid | ||
1433 | 273 | # re-mount where the data should originally be | ||
1434 | 274 | # TODO: persist is currently a NO-OP in core.host | ||
1435 | 275 | mount(blk_device, data_src_dst, persist=True) | ||
1436 | 276 | # ensure original ownership of new mount. | ||
1437 | 277 | os.chown(data_src_dst, uid, gid) | ||
1438 | 278 | |||
1439 | 279 | |||
1440 | 280 | # TODO: re-use | ||
1441 | 281 | def modprobe(module): | ||
1442 | 282 | ''' Load a kernel module and configure for auto-load on reboot ''' | ||
1443 | 283 | log('ceph: Loading kernel module', level=INFO) | ||
1444 | 284 | cmd = ['modprobe', module] | ||
1445 | 285 | check_call(cmd) | ||
1446 | 286 | with open('/etc/modules', 'r+') as modules: | ||
1447 | 287 | if module not in modules.read(): | ||
1448 | 288 | modules.write(module) | ||
1449 | 289 | |||
1450 | 290 | |||
1451 | 291 | def copy_files(src, dst, symlinks=False, ignore=None): | ||
1452 | 292 | ''' Copy files from src to dst ''' | ||
1453 | 293 | for item in os.listdir(src): | ||
1454 | 294 | s = os.path.join(src, item) | ||
1455 | 295 | d = os.path.join(dst, item) | ||
1456 | 296 | if os.path.isdir(s): | ||
1457 | 297 | shutil.copytree(s, d, symlinks, ignore) | ||
1458 | 298 | else: | ||
1459 | 299 | shutil.copy2(s, d) | ||
1460 | 300 | |||
1461 | 301 | |||
1462 | 302 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | ||
1463 | 303 | blk_device, fstype, system_services=[], | ||
1464 | 304 | replicas=3): | ||
1465 | 305 | """ | ||
1466 | 306 | NOTE: This function must only be called from a single service unit for | ||
1467 | 307 | the same rbd_img otherwise data loss will occur. | ||
1468 | 308 | |||
1469 | 309 | Ensures given pool and RBD image exists, is mapped to a block device, | ||
1470 | 310 | and the device is formatted and mounted at the given mount_point. | ||
1471 | 311 | |||
1472 | 312 | If formatting a device for the first time, data existing at mount_point | ||
1473 | 313 | will be migrated to the RBD device before being re-mounted. | ||
1474 | 314 | |||
1475 | 315 | All services listed in system_services will be stopped prior to data | ||
1476 | 316 | migration and restarted when complete. | ||
1477 | 317 | """ | ||
1478 | 318 | # Ensure pool, RBD image, RBD mappings are in place. | ||
1479 | 319 | if not pool_exists(service, pool): | ||
1480 | 320 | log('ceph: Creating new pool {}.'.format(pool)) | ||
1481 | 321 | create_pool(service, pool, replicas=replicas) | ||
1482 | 322 | |||
1483 | 323 | if not rbd_exists(service, pool, rbd_img): | ||
1484 | 324 | log('ceph: Creating RBD image ({}).'.format(rbd_img)) | ||
1485 | 325 | create_rbd_image(service, pool, rbd_img, sizemb) | ||
1486 | 326 | |||
1487 | 327 | if not image_mapped(rbd_img): | ||
1488 | 328 | log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) | ||
1489 | 329 | map_block_storage(service, pool, rbd_img) | ||
1490 | 330 | |||
1491 | 331 | # make file system | ||
1492 | 332 | # TODO: What happens if for whatever reason this is run again and | ||
1493 | 333 | # the data is already in the rbd device and/or is mounted?? | ||
1494 | 334 | # When it is mounted already, it will fail to make the fs | ||
1495 | 335 | # XXX: This is really sketchy! Need to at least add an fstab entry | ||
1496 | 336 | # otherwise this hook will blow away existing data if its executed | ||
1497 | 337 | # after a reboot. | ||
1498 | 338 | if not filesystem_mounted(mount_point): | ||
1499 | 339 | make_filesystem(blk_device, fstype) | ||
1500 | 340 | |||
1501 | 341 | for svc in system_services: | ||
1502 | 342 | if service_running(svc): | ||
1503 | 343 | log('ceph: Stopping services {} prior to migrating data.' | ||
1504 | 344 | .format(svc)) | ||
1505 | 345 | service_stop(svc) | ||
1506 | 346 | |||
1507 | 347 | place_data_on_block_device(blk_device, mount_point) | ||
1508 | 348 | |||
1509 | 349 | for svc in system_services: | ||
1510 | 350 | log('ceph: Starting service {} after migrating data.' | ||
1511 | 351 | .format(svc)) | ||
1512 | 352 | service_start(svc) | ||
1513 | 353 | |||
1514 | 354 | |||
1515 | 355 | def ensure_ceph_keyring(service, user=None, group=None): | ||
1516 | 356 | ''' | ||
1517 | 357 | Ensures a ceph keyring is created for a named service | ||
1518 | 358 | and optionally ensures user and group ownership. | ||
1519 | 359 | |||
1520 | 360 | Returns False if no ceph key is available in relation state. | ||
1521 | 361 | ''' | ||
1522 | 362 | key = None | ||
1523 | 363 | for rid in relation_ids('ceph'): | ||
1524 | 364 | for unit in related_units(rid): | ||
1525 | 365 | key = relation_get('key', rid=rid, unit=unit) | ||
1526 | 366 | if key: | ||
1527 | 367 | break | ||
1528 | 368 | if not key: | ||
1529 | 369 | return False | ||
1530 | 370 | create_keyring(service=service, key=key) | ||
1531 | 371 | keyring = _keyring_path(service) | ||
1532 | 372 | if user and group: | ||
1533 | 373 | check_call(['chown', '%s.%s' % (user, group), keyring]) | ||
1534 | 374 | return True | ||
1535 | 375 | |||
1536 | 376 | |||
1537 | 377 | def ceph_version(): | ||
1538 | 378 | ''' Retrieve the local version of ceph ''' | ||
1539 | 379 | if os.path.exists('/usr/bin/ceph'): | ||
1540 | 380 | cmd = ['ceph', '-v'] | ||
1541 | 381 | output = check_output(cmd) | ||
1542 | 382 | output = output.split() | ||
1543 | 383 | if len(output) > 3: | ||
1544 | 384 | return output[2] | ||
1545 | 385 | else: | ||
1546 | 386 | return None | ||
1547 | 387 | else: | ||
1548 | 388 | return None | ||
1549 | 0 | 389 | ||
1550 | === added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
1551 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000 | |||
1552 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-10-17 13:06:36 +0000 | |||
1553 | @@ -0,0 +1,62 @@ | |||
1554 | 1 | |||
1555 | 2 | import os | ||
1556 | 3 | import re | ||
1557 | 4 | |||
1558 | 5 | from subprocess import ( | ||
1559 | 6 | check_call, | ||
1560 | 7 | check_output, | ||
1561 | 8 | ) | ||
1562 | 9 | |||
1563 | 10 | |||
1564 | 11 | ################################################## | ||
1565 | 12 | # loopback device helpers. | ||
1566 | 13 | ################################################## | ||
1567 | 14 | def loopback_devices(): | ||
1568 | 15 | ''' | ||
1569 | 16 | Parse through 'losetup -a' output to determine currently mapped | ||
1570 | 17 | loopback devices. Output is expected to look like: | ||
1571 | 18 | |||
1572 | 19 | /dev/loop0: [0807]:961814 (/tmp/my.img) | ||
1573 | 20 | |||
1574 | 21 | :returns: dict: a dict mapping {loopback_dev: backing_file} | ||
1575 | 22 | ''' | ||
1576 | 23 | loopbacks = {} | ||
1577 | 24 | cmd = ['losetup', '-a'] | ||
1578 | 25 | devs = [d.strip().split(' ') for d in | ||
1579 | 26 | check_output(cmd).splitlines() if d != ''] | ||
1580 | 27 | for dev, _, f in devs: | ||
1581 | 28 | loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] | ||
1582 | 29 | return loopbacks | ||
1583 | 30 | |||
1584 | 31 | |||
1585 | 32 | def create_loopback(file_path): | ||
1586 | 33 | ''' | ||
1587 | 34 | Create a loopback device for a given backing file. | ||
1588 | 35 | |||
1589 | 36 | :returns: str: Full path to new loopback device (eg, /dev/loop0) | ||
1590 | 37 | ''' | ||
1591 | 38 | file_path = os.path.abspath(file_path) | ||
1592 | 39 | check_call(['losetup', '--find', file_path]) | ||
1593 | 40 | for d, f in loopback_devices().iteritems(): | ||
1594 | 41 | if f == file_path: | ||
1595 | 42 | return d | ||
1596 | 43 | |||
1597 | 44 | |||
1598 | 45 | def ensure_loopback_device(path, size): | ||
1599 | 46 | ''' | ||
1600 | 47 | Ensure a loopback device exists for a given backing file path and size. | ||
1601 | 48 | If it a loopback device is not mapped to file, a new one will be created. | ||
1602 | 49 | |||
1603 | 50 | TODO: Confirm size of found loopback device. | ||
1604 | 51 | |||
1605 | 52 | :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) | ||
1606 | 53 | ''' | ||
1607 | 54 | for d, f in loopback_devices().iteritems(): | ||
1608 | 55 | if f == path: | ||
1609 | 56 | return d | ||
1610 | 57 | |||
1611 | 58 | if not os.path.exists(path): | ||
1612 | 59 | cmd = ['truncate', '--size', size, path] | ||
1613 | 60 | check_call(cmd) | ||
1614 | 61 | |||
1615 | 62 | return create_loopback(path) | ||
1616 | 0 | 63 | ||
1617 | === added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py' | |||
1618 | --- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000 | |||
1619 | +++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-10-17 13:06:36 +0000 | |||
1620 | @@ -0,0 +1,88 @@ | |||
1621 | 1 | from subprocess import ( | ||
1622 | 2 | CalledProcessError, | ||
1623 | 3 | check_call, | ||
1624 | 4 | check_output, | ||
1625 | 5 | Popen, | ||
1626 | 6 | PIPE, | ||
1627 | 7 | ) | ||
1628 | 8 | |||
1629 | 9 | |||
1630 | 10 | ################################################## | ||
1631 | 11 | # LVM helpers. | ||
1632 | 12 | ################################################## | ||
1633 | 13 | def deactivate_lvm_volume_group(block_device): | ||
1634 | 14 | ''' | ||
1635 | 15 | Deactivate any volume gruop associated with an LVM physical volume. | ||
1636 | 16 | |||
1637 | 17 | :param block_device: str: Full path to LVM physical volume | ||
1638 | 18 | ''' | ||
1639 | 19 | vg = list_lvm_volume_group(block_device) | ||
1640 | 20 | if vg: | ||
1641 | 21 | cmd = ['vgchange', '-an', vg] | ||
1642 | 22 | check_call(cmd) | ||
1643 | 23 | |||
1644 | 24 | |||
1645 | 25 | def is_lvm_physical_volume(block_device): | ||
1646 | 26 | ''' | ||
1647 | 27 | Determine whether a block device is initialized as an LVM PV. | ||
1648 | 28 | |||
1649 | 29 | :param block_device: str: Full path of block device to inspect. | ||
1650 | 30 | |||
1651 | 31 | :returns: boolean: True if block device is a PV, False if not. | ||
1652 | 32 | ''' | ||
1653 | 33 | try: | ||
1654 | 34 | check_output(['pvdisplay', block_device]) | ||
1655 | 35 | return True | ||
1656 | 36 | except CalledProcessError: | ||
1657 | 37 | return False | ||
1658 | 38 | |||
1659 | 39 | |||
1660 | 40 | def remove_lvm_physical_volume(block_device): | ||
1661 | 41 | ''' | ||
1662 | 42 | Remove LVM PV signatures from a given block device. | ||
1663 | 43 | |||
1664 | 44 | :param block_device: str: Full path of block device to scrub. | ||
1665 | 45 | ''' | ||
1666 | 46 | p = Popen(['pvremove', '-ff', block_device], | ||
1667 | 47 | stdin=PIPE) | ||
1668 | 48 | p.communicate(input='y\n') | ||
1669 | 49 | |||
1670 | 50 | |||
1671 | 51 | def list_lvm_volume_group(block_device): | ||
1672 | 52 | ''' | ||
1673 | 53 | List LVM volume group associated with a given block device. | ||
1674 | 54 | |||
1675 | 55 | Assumes block device is a valid LVM PV. | ||
1676 | 56 | |||
1677 | 57 | :param block_device: str: Full path of block device to inspect. | ||
1678 | 58 | |||
1679 | 59 | :returns: str: Name of volume group associated with block device or None | ||
1680 | 60 | ''' | ||
1681 | 61 | vg = None | ||
1682 | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() | ||
1683 | 63 | for l in pvd: | ||
1684 | 64 | if l.strip().startswith('VG Name'): | ||
1685 | 65 | vg = ' '.join(l.strip().split()[2:]) | ||
1686 | 66 | return vg | ||
1687 | 67 | |||
1688 | 68 | |||
1689 | 69 | def create_lvm_physical_volume(block_device): | ||
1690 | 70 | ''' | ||
1691 | 71 | Initialize a block device as an LVM physical volume. | ||
1692 | 72 | |||
1693 | 73 | :param block_device: str: Full path of block device to initialize. | ||
1694 | 74 | |||
1695 | 75 | ''' | ||
1696 | 76 | check_call(['pvcreate', block_device]) | ||
1697 | 77 | |||
1698 | 78 | |||
1699 | 79 | def create_lvm_volume_group(volume_group, block_device): | ||
1700 | 80 | ''' | ||
1701 | 81 | Create an LVM volume group backed by a given block device. | ||
1702 | 82 | |||
1703 | 83 | Assumes block device has already been initialized as an LVM PV. | ||
1704 | 84 | |||
1705 | 85 | :param volume_group: str: Name of volume group to create. | ||
1706 | 86 | :block_device: str: Full path of PV-initialized block device. | ||
1707 | 87 | ''' | ||
1708 | 88 | check_call(['vgcreate', volume_group, block_device]) | ||
1709 | 0 | 89 | ||
1710 | === added file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
1711 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000 | |||
1712 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-10-17 13:06:36 +0000 | |||
1713 | @@ -0,0 +1,53 @@ | |||
1714 | 1 | import os | ||
1715 | 2 | import re | ||
1716 | 3 | from stat import S_ISBLK | ||
1717 | 4 | |||
1718 | 5 | from subprocess import ( | ||
1719 | 6 | check_call, | ||
1720 | 7 | check_output, | ||
1721 | 8 | call | ||
1722 | 9 | ) | ||
1723 | 10 | |||
1724 | 11 | |||
1725 | 12 | def is_block_device(path): | ||
1726 | 13 | ''' | ||
1727 | 14 | Confirm device at path is a valid block device node. | ||
1728 | 15 | |||
1729 | 16 | :returns: boolean: True if path is a block device, False if not. | ||
1730 | 17 | ''' | ||
1731 | 18 | if not os.path.exists(path): | ||
1732 | 19 | return False | ||
1733 | 20 | return S_ISBLK(os.stat(path).st_mode) | ||
1734 | 21 | |||
1735 | 22 | |||
1736 | 23 | def zap_disk(block_device): | ||
1737 | 24 | ''' | ||
1738 | 25 | Clear a block device of partition table. Relies on sgdisk, which is | ||
1739 | 26 | installed as pat of the 'gdisk' package in Ubuntu. | ||
1740 | 27 | |||
1741 | 28 | :param block_device: str: Full path of block device to clean. | ||
1742 | 29 | ''' | ||
1743 | 30 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up | ||
1744 | 31 | call(['sgdisk', '--zap-all', '--mbrtogpt', | ||
1745 | 32 | '--clear', block_device]) | ||
1746 | 33 | dev_end = check_output(['blockdev', '--getsz', block_device]) | ||
1747 | 34 | gpt_end = int(dev_end.split()[0]) - 100 | ||
1748 | 35 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | ||
1749 | 36 | 'bs=1M', 'count=1']) | ||
1750 | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | ||
1751 | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) | ||
1752 | 39 | |||
1753 | 40 | |||
1754 | 41 | def is_device_mounted(device): | ||
1755 | 42 | '''Given a device path, return True if that device is mounted, and False | ||
1756 | 43 | if it isn't. | ||
1757 | 44 | |||
1758 | 45 | :param device: str: Full path of the device to check. | ||
1759 | 46 | :returns: boolean: True if the path represents a mounted device, False if | ||
1760 | 47 | it doesn't. | ||
1761 | 48 | ''' | ||
1762 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | ||
1763 | 50 | out = check_output(['mount']) | ||
1764 | 51 | if is_partition: | ||
1765 | 52 | return bool(re.search(device + r"\b", out)) | ||
1766 | 53 | return bool(re.search(device + r"[0-9]+\b", out)) | ||
1767 | 0 | 54 | ||
1768 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1769 | --- hooks/charmhelpers/core/hookenv.py 2014-09-02 11:17:14 +0000 | |||
1770 | +++ hooks/charmhelpers/core/hookenv.py 2014-10-17 13:06:36 +0000 | |||
1771 | @@ -203,6 +203,17 @@ | |||
1772 | 203 | if os.path.exists(self.path): | 203 | if os.path.exists(self.path): |
1773 | 204 | self.load_previous() | 204 | self.load_previous() |
1774 | 205 | 205 | ||
1775 | 206 | def __getitem__(self, key): | ||
1776 | 207 | """For regular dict lookups, check the current juju config first, | ||
1777 | 208 | then the previous (saved) copy. This ensures that user-saved values | ||
1778 | 209 | will be returned by a dict lookup. | ||
1779 | 210 | |||
1780 | 211 | """ | ||
1781 | 212 | try: | ||
1782 | 213 | return dict.__getitem__(self, key) | ||
1783 | 214 | except KeyError: | ||
1784 | 215 | return (self._prev_dict or {})[key] | ||
1785 | 216 | |||
1786 | 206 | def load_previous(self, path=None): | 217 | def load_previous(self, path=None): |
1787 | 207 | """Load previous copy of config from disk. | 218 | """Load previous copy of config from disk. |
1788 | 208 | 219 | ||
1789 | @@ -475,9 +486,10 @@ | |||
1790 | 475 | hooks.execute(sys.argv) | 486 | hooks.execute(sys.argv) |
1791 | 476 | """ | 487 | """ |
1792 | 477 | 488 | ||
1794 | 478 | def __init__(self): | 489 | def __init__(self, config_save=True): |
1795 | 479 | super(Hooks, self).__init__() | 490 | super(Hooks, self).__init__() |
1796 | 480 | self._hooks = {} | 491 | self._hooks = {} |
1797 | 492 | self._config_save = config_save | ||
1798 | 481 | 493 | ||
1799 | 482 | def register(self, name, function): | 494 | def register(self, name, function): |
1800 | 483 | """Register a hook""" | 495 | """Register a hook""" |
1801 | @@ -488,9 +500,10 @@ | |||
1802 | 488 | hook_name = os.path.basename(args[0]) | 500 | hook_name = os.path.basename(args[0]) |
1803 | 489 | if hook_name in self._hooks: | 501 | if hook_name in self._hooks: |
1804 | 490 | self._hooks[hook_name]() | 502 | self._hooks[hook_name]() |
1808 | 491 | cfg = config() | 503 | if self._config_save: |
1809 | 492 | if cfg.implicit_save: | 504 | cfg = config() |
1810 | 493 | cfg.save() | 505 | if cfg.implicit_save: |
1811 | 506 | cfg.save() | ||
1812 | 494 | else: | 507 | else: |
1813 | 495 | raise UnregisteredHookError(hook_name) | 508 | raise UnregisteredHookError(hook_name) |
1814 | 496 | 509 | ||
1815 | 497 | 510 | ||
1816 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1817 | --- hooks/charmhelpers/core/host.py 2014-09-02 11:17:14 +0000 | |||
1818 | +++ hooks/charmhelpers/core/host.py 2014-10-17 13:06:36 +0000 | |||
1819 | @@ -6,6 +6,7 @@ | |||
1820 | 6 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | 6 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> |
1821 | 7 | 7 | ||
1822 | 8 | import os | 8 | import os |
1823 | 9 | import re | ||
1824 | 9 | import pwd | 10 | import pwd |
1825 | 10 | import grp | 11 | import grp |
1826 | 11 | import random | 12 | import random |
1827 | @@ -68,8 +69,8 @@ | |||
1828 | 68 | """Determine whether a system service is available""" | 69 | """Determine whether a system service is available""" |
1829 | 69 | try: | 70 | try: |
1830 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | 71 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) |
1833 | 71 | except subprocess.CalledProcessError: | 72 | except subprocess.CalledProcessError as e: |
1834 | 72 | return False | 73 | return 'unrecognized service' not in e.output |
1835 | 73 | else: | 74 | else: |
1836 | 74 | return True | 75 | return True |
1837 | 75 | 76 | ||
1838 | @@ -209,10 +210,15 @@ | |||
1839 | 209 | return system_mounts | 210 | return system_mounts |
1840 | 210 | 211 | ||
1841 | 211 | 212 | ||
1844 | 212 | def file_hash(path): | 213 | def file_hash(path, hash_type='md5'): |
1845 | 213 | """Generate a md5 hash of the contents of 'path' or None if not found """ | 214 | """ |
1846 | 215 | Generate a hash checksum of the contents of 'path' or None if not found. | ||
1847 | 216 | |||
1848 | 217 | :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, | ||
1849 | 218 | such as md5, sha1, sha256, sha512, etc. | ||
1850 | 219 | """ | ||
1851 | 214 | if os.path.exists(path): | 220 | if os.path.exists(path): |
1853 | 215 | h = hashlib.md5() | 221 | h = getattr(hashlib, hash_type)() |
1854 | 216 | with open(path, 'r') as source: | 222 | with open(path, 'r') as source: |
1855 | 217 | h.update(source.read()) # IGNORE:E1101 - it does have update | 223 | h.update(source.read()) # IGNORE:E1101 - it does have update |
1856 | 218 | return h.hexdigest() | 224 | return h.hexdigest() |
1857 | @@ -220,6 +226,26 @@ | |||
1858 | 220 | return None | 226 | return None |
1859 | 221 | 227 | ||
1860 | 222 | 228 | ||
1861 | 229 | def check_hash(path, checksum, hash_type='md5'): | ||
1862 | 230 | """ | ||
1863 | 231 | Validate a file using a cryptographic checksum. | ||
1864 | 232 | |||
1865 | 233 | :param str checksum: Value of the checksum used to validate the file. | ||
1866 | 234 | :param str hash_type: Hash algorithm used to generate `checksum`. | ||
1867 | 235 | Can be any hash alrgorithm supported by :mod:`hashlib`, | ||
1868 | 236 | such as md5, sha1, sha256, sha512, etc. | ||
1869 | 237 | :raises ChecksumError: If the file fails the checksum | ||
1870 | 238 | |||
1871 | 239 | """ | ||
1872 | 240 | actual_checksum = file_hash(path, hash_type) | ||
1873 | 241 | if checksum != actual_checksum: | ||
1874 | 242 | raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) | ||
1875 | 243 | |||
1876 | 244 | |||
1877 | 245 | class ChecksumError(ValueError): | ||
1878 | 246 | pass | ||
1879 | 247 | |||
1880 | 248 | |||
1881 | 223 | def restart_on_change(restart_map, stopstart=False): | 249 | def restart_on_change(restart_map, stopstart=False): |
1882 | 224 | """Restart services based on configuration files changing | 250 | """Restart services based on configuration files changing |
1883 | 225 | 251 | ||
1884 | @@ -292,7 +318,13 @@ | |||
1885 | 292 | ip_output = (line for line in ip_output if line) | 318 | ip_output = (line for line in ip_output if line) |
1886 | 293 | for line in ip_output: | 319 | for line in ip_output: |
1887 | 294 | if line.split()[1].startswith(int_type): | 320 | if line.split()[1].startswith(int_type): |
1889 | 295 | interfaces.append(line.split()[1].replace(":", "")) | 321 | matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) |
1890 | 322 | if matched: | ||
1891 | 323 | interface = matched.groups()[0] | ||
1892 | 324 | else: | ||
1893 | 325 | interface = line.split()[1].replace(":", "") | ||
1894 | 326 | interfaces.append(interface) | ||
1895 | 327 | |||
1896 | 296 | return interfaces | 328 | return interfaces |
1897 | 297 | 329 | ||
1898 | 298 | 330 | ||
1899 | 299 | 331 | ||
1900 | === modified file 'hooks/charmhelpers/core/services/helpers.py' | |||
1901 | --- hooks/charmhelpers/core/services/helpers.py 2014-09-02 11:17:14 +0000 | |||
1902 | +++ hooks/charmhelpers/core/services/helpers.py 2014-10-17 13:06:36 +0000 | |||
1903 | @@ -1,3 +1,5 @@ | |||
1904 | 1 | import os | ||
1905 | 2 | import yaml | ||
1906 | 1 | from charmhelpers.core import hookenv | 3 | from charmhelpers.core import hookenv |
1907 | 2 | from charmhelpers.core import templating | 4 | from charmhelpers.core import templating |
1908 | 3 | 5 | ||
1909 | @@ -19,15 +21,21 @@ | |||
1910 | 19 | the `name` attribute that are complete will used to populate the dictionary | 21 | the `name` attribute that are complete will used to populate the dictionary |
1911 | 20 | values (see `get_data`, below). | 22 | values (see `get_data`, below). |
1912 | 21 | 23 | ||
1915 | 22 | The generated context will be namespaced under the interface type, to prevent | 24 | The generated context will be namespaced under the relation :attr:`name`, |
1916 | 23 | potential naming conflicts. | 25 | to prevent potential naming conflicts. |
1917 | 26 | |||
1918 | 27 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
1919 | 28 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
1920 | 24 | """ | 29 | """ |
1921 | 25 | name = None | 30 | name = None |
1922 | 26 | interface = None | 31 | interface = None |
1923 | 27 | required_keys = [] | 32 | required_keys = [] |
1924 | 28 | 33 | ||
1927 | 29 | def __init__(self, *args, **kwargs): | 34 | def __init__(self, name=None, additional_required_keys=None): |
1928 | 30 | super(RelationContext, self).__init__(*args, **kwargs) | 35 | if name is not None: |
1929 | 36 | self.name = name | ||
1930 | 37 | if additional_required_keys is not None: | ||
1931 | 38 | self.required_keys.extend(additional_required_keys) | ||
1932 | 31 | self.get_data() | 39 | self.get_data() |
1933 | 32 | 40 | ||
1934 | 33 | def __bool__(self): | 41 | def __bool__(self): |
1935 | @@ -101,9 +109,115 @@ | |||
1936 | 101 | return {} | 109 | return {} |
1937 | 102 | 110 | ||
1938 | 103 | 111 | ||
1939 | 112 | class MysqlRelation(RelationContext): | ||
1940 | 113 | """ | ||
1941 | 114 | Relation context for the `mysql` interface. | ||
1942 | 115 | |||
1943 | 116 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
1944 | 117 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
1945 | 118 | """ | ||
1946 | 119 | name = 'db' | ||
1947 | 120 | interface = 'mysql' | ||
1948 | 121 | required_keys = ['host', 'user', 'password', 'database'] | ||
1949 | 122 | |||
1950 | 123 | |||
1951 | 124 | class HttpRelation(RelationContext): | ||
1952 | 125 | """ | ||
1953 | 126 | Relation context for the `http` interface. | ||
1954 | 127 | |||
1955 | 128 | :param str name: Override the relation :attr:`name`, since it can vary from charm to charm | ||
1956 | 129 | :param list additional_required_keys: Extend the list of :attr:`required_keys` | ||
1957 | 130 | """ | ||
1958 | 131 | name = 'website' | ||
1959 | 132 | interface = 'http' | ||
1960 | 133 | required_keys = ['host', 'port'] | ||
1961 | 134 | |||
1962 | 135 | def provide_data(self): | ||
1963 | 136 | return { | ||
1964 | 137 | 'host': hookenv.unit_get('private-address'), | ||
1965 | 138 | 'port': 80, | ||
1966 | 139 | } | ||
1967 | 140 | |||
1968 | 141 | |||
1969 | 142 | class RequiredConfig(dict): | ||
1970 | 143 | """ | ||
1971 | 144 | Data context that loads config options with one or more mandatory options. | ||
1972 | 145 | |||
1973 | 146 | Once the required options have been changed from their default values, all | ||
1974 | 147 | config options will be available, namespaced under `config` to prevent | ||
1975 | 148 | potential naming conflicts (for example, between a config option and a | ||
1976 | 149 | relation property). | ||
1977 | 150 | |||
1978 | 151 | :param list *args: List of options that must be changed from their default values. | ||
1979 | 152 | """ | ||
1980 | 153 | |||
1981 | 154 | def __init__(self, *args): | ||
1982 | 155 | self.required_options = args | ||
1983 | 156 | self['config'] = hookenv.config() | ||
1984 | 157 | with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: | ||
1985 | 158 | self.config = yaml.load(fp).get('options', {}) | ||
1986 | 159 | |||
1987 | 160 | def __bool__(self): | ||
1988 | 161 | for option in self.required_options: | ||
1989 | 162 | if option not in self['config']: | ||
1990 | 163 | return False | ||
1991 | 164 | current_value = self['config'][option] | ||
1992 | 165 | default_value = self.config[option].get('default') | ||
1993 | 166 | if current_value == default_value: | ||
1994 | 167 | return False | ||
1995 | 168 | if current_value in (None, '') and default_value in (None, ''): | ||
1996 | 169 | return False | ||
1997 | 170 | return True | ||
1998 | 171 | |||
1999 | 172 | def __nonzero__(self): | ||
2000 | 173 | return self.__bool__() | ||
2001 | 174 | |||
2002 | 175 | |||
2003 | 176 | class StoredContext(dict): | ||
2004 | 177 | """ | ||
2005 | 178 | A data context that always returns the data that it was first created with. | ||
2006 | 179 | |||
2007 | 180 | This is useful to do a one-time generation of things like passwords, that | ||
2008 | 181 | will thereafter use the same value that was originally generated, instead | ||
2009 | 182 | of generating a new value each time it is run. | ||
2010 | 183 | """ | ||
2011 | 184 | def __init__(self, file_name, config_data): | ||
2012 | 185 | """ | ||
2013 | 186 | If the file exists, populate `self` with the data from the file. | ||
2014 | 187 | Otherwise, populate with the given data and persist it to the file. | ||
2015 | 188 | """ | ||
2016 | 189 | if os.path.exists(file_name): | ||
2017 | 190 | self.update(self.read_context(file_name)) | ||
2018 | 191 | else: | ||
2019 | 192 | self.store_context(file_name, config_data) | ||
2020 | 193 | self.update(config_data) | ||
2021 | 194 | |||
2022 | 195 | def store_context(self, file_name, config_data): | ||
2023 | 196 | if not os.path.isabs(file_name): | ||
2024 | 197 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
2025 | 198 | with open(file_name, 'w') as file_stream: | ||
2026 | 199 | os.fchmod(file_stream.fileno(), 0600) | ||
2027 | 200 | yaml.dump(config_data, file_stream) | ||
2028 | 201 | |||
2029 | 202 | def read_context(self, file_name): | ||
2030 | 203 | if not os.path.isabs(file_name): | ||
2031 | 204 | file_name = os.path.join(hookenv.charm_dir(), file_name) | ||
2032 | 205 | with open(file_name, 'r') as file_stream: | ||
2033 | 206 | data = yaml.load(file_stream) | ||
2034 | 207 | if not data: | ||
2035 | 208 | raise OSError("%s is empty" % file_name) | ||
2036 | 209 | return data | ||
2037 | 210 | |||
2038 | 211 | |||
2039 | 104 | class TemplateCallback(ManagerCallback): | 212 | class TemplateCallback(ManagerCallback): |
2040 | 105 | """ | 213 | """ |
2042 | 106 | Callback class that will render a template, for use as a ready action. | 214 | Callback class that will render a Jinja2 template, for use as a ready action. |
2043 | 215 | |||
2044 | 216 | :param str source: The template source file, relative to `$CHARM_DIR/templates` | ||
2045 | 217 | :param str target: The target to write the rendered template to | ||
2046 | 218 | :param str owner: The owner of the rendered file | ||
2047 | 219 | :param str group: The group of the rendered file | ||
2048 | 220 | :param int perms: The permissions of the rendered file | ||
2049 | 107 | """ | 221 | """ |
2050 | 108 | def __init__(self, source, target, owner='root', group='root', perms=0444): | 222 | def __init__(self, source, target, owner='root', group='root', perms=0444): |
2051 | 109 | self.source = source | 223 | self.source = source |
2052 | 110 | 224 | ||
2053 | === added file 'hooks/charmhelpers/core/sysctl.py' | |||
2054 | --- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000 | |||
2055 | +++ hooks/charmhelpers/core/sysctl.py 2014-10-17 13:06:36 +0000 | |||
2056 | @@ -0,0 +1,34 @@ | |||
2057 | 1 | #!/usr/bin/env python | ||
2058 | 2 | # -*- coding: utf-8 -*- | ||
2059 | 3 | |||
2060 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
2061 | 5 | |||
2062 | 6 | import yaml | ||
2063 | 7 | |||
2064 | 8 | from subprocess import check_call | ||
2065 | 9 | |||
2066 | 10 | from charmhelpers.core.hookenv import ( | ||
2067 | 11 | log, | ||
2068 | 12 | DEBUG, | ||
2069 | 13 | ) | ||
2070 | 14 | |||
2071 | 15 | |||
2072 | 16 | def create(sysctl_dict, sysctl_file): | ||
2073 | 17 | """Creates a sysctl.conf file from a YAML associative array | ||
2074 | 18 | |||
2075 | 19 | :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } | ||
2076 | 20 | :type sysctl_dict: dict | ||
2077 | 21 | :param sysctl_file: path to the sysctl file to be saved | ||
2078 | 22 | :type sysctl_file: str or unicode | ||
2079 | 23 | :returns: None | ||
2080 | 24 | """ | ||
2081 | 25 | sysctl_dict = yaml.load(sysctl_dict) | ||
2082 | 26 | |||
2083 | 27 | with open(sysctl_file, "w") as fd: | ||
2084 | 28 | for key, value in sysctl_dict.items(): | ||
2085 | 29 | fd.write("{}={}\n".format(key, value)) | ||
2086 | 30 | |||
2087 | 31 | log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), | ||
2088 | 32 | level=DEBUG) | ||
2089 | 33 | |||
2090 | 34 | check_call(["sysctl", "-p", sysctl_file]) | ||
2091 | 0 | 35 | ||
2092 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
2093 | --- hooks/charmhelpers/fetch/__init__.py 2014-09-02 11:17:14 +0000 | |||
2094 | +++ hooks/charmhelpers/fetch/__init__.py 2014-10-17 13:06:36 +0000 | |||
2095 | @@ -208,7 +208,8 @@ | |||
2096 | 208 | """Add a package source to this system. | 208 | """Add a package source to this system. |
2097 | 209 | 209 | ||
2098 | 210 | @param source: a URL or sources.list entry, as supported by | 210 | @param source: a URL or sources.list entry, as supported by |
2100 | 211 | add-apt-repository(1). Examples: | 211 | add-apt-repository(1). Examples:: |
2101 | 212 | |||
2102 | 212 | ppa:charmers/example | 213 | ppa:charmers/example |
2103 | 213 | deb https://stub:key@private.example.com/ubuntu trusty main | 214 | deb https://stub:key@private.example.com/ubuntu trusty main |
2104 | 214 | 215 | ||
2105 | @@ -311,22 +312,35 @@ | |||
2106 | 311 | apt_update(fatal=True) | 312 | apt_update(fatal=True) |
2107 | 312 | 313 | ||
2108 | 313 | 314 | ||
2110 | 314 | def install_remote(source): | 315 | def install_remote(source, *args, **kwargs): |
2111 | 315 | """ | 316 | """ |
2112 | 316 | Install a file tree from a remote source | 317 | Install a file tree from a remote source |
2113 | 317 | 318 | ||
2114 | 318 | The specified source should be a url of the form: | 319 | The specified source should be a url of the form: |
2115 | 319 | scheme://[host]/path[#[option=value][&...]] | 320 | scheme://[host]/path[#[option=value][&...]] |
2116 | 320 | 321 | ||
2119 | 321 | Schemes supported are based on this modules submodules | 322 | Schemes supported are based on this modules submodules. |
2120 | 322 | Options supported are submodule-specific""" | 323 | Options supported are submodule-specific. |
2121 | 324 | Additional arguments are passed through to the submodule. | ||
2122 | 325 | |||
2123 | 326 | For example:: | ||
2124 | 327 | |||
2125 | 328 | dest = install_remote('http://example.com/archive.tgz', | ||
2126 | 329 | checksum='deadbeef', | ||
2127 | 330 | hash_type='sha1') | ||
2128 | 331 | |||
2129 | 332 | This will download `archive.tgz`, validate it using SHA1 and, if | ||
2130 | 333 | the file is ok, extract it and return the directory in which it | ||
2131 | 334 | was extracted. If the checksum fails, it will raise | ||
2132 | 335 | :class:`charmhelpers.core.host.ChecksumError`. | ||
2133 | 336 | """ | ||
2134 | 323 | # We ONLY check for True here because can_handle may return a string | 337 | # We ONLY check for True here because can_handle may return a string |
2135 | 324 | # explaining why it can't handle a given source. | 338 | # explaining why it can't handle a given source. |
2136 | 325 | handlers = [h for h in plugins() if h.can_handle(source) is True] | 339 | handlers = [h for h in plugins() if h.can_handle(source) is True] |
2137 | 326 | installed_to = None | 340 | installed_to = None |
2138 | 327 | for handler in handlers: | 341 | for handler in handlers: |
2139 | 328 | try: | 342 | try: |
2141 | 329 | installed_to = handler.install(source) | 343 | installed_to = handler.install(source, *args, **kwargs) |
2142 | 330 | except UnhandledSource: | 344 | except UnhandledSource: |
2143 | 331 | pass | 345 | pass |
2144 | 332 | if not installed_to: | 346 | if not installed_to: |
2145 | 333 | 347 | ||
2146 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
2147 | --- hooks/charmhelpers/fetch/archiveurl.py 2014-09-02 11:17:14 +0000 | |||
2148 | +++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-17 13:06:36 +0000 | |||
2149 | @@ -1,6 +1,8 @@ | |||
2150 | 1 | import os | 1 | import os |
2151 | 2 | import urllib2 | 2 | import urllib2 |
2152 | 3 | from urllib import urlretrieve | ||
2153 | 3 | import urlparse | 4 | import urlparse |
2154 | 5 | import hashlib | ||
2155 | 4 | 6 | ||
2156 | 5 | from charmhelpers.fetch import ( | 7 | from charmhelpers.fetch import ( |
2157 | 6 | BaseFetchHandler, | 8 | BaseFetchHandler, |
2158 | @@ -10,11 +12,19 @@ | |||
2159 | 10 | get_archive_handler, | 12 | get_archive_handler, |
2160 | 11 | extract, | 13 | extract, |
2161 | 12 | ) | 14 | ) |
2163 | 13 | from charmhelpers.core.host import mkdir | 15 | from charmhelpers.core.host import mkdir, check_hash |
2164 | 14 | 16 | ||
2165 | 15 | 17 | ||
2166 | 16 | class ArchiveUrlFetchHandler(BaseFetchHandler): | 18 | class ArchiveUrlFetchHandler(BaseFetchHandler): |
2168 | 17 | """Handler for archives via generic URLs""" | 19 | """ |
2169 | 20 | Handler to download archive files from arbitrary URLs. | ||
2170 | 21 | |||
2171 | 22 | Can fetch from http, https, ftp, and file URLs. | ||
2172 | 23 | |||
2173 | 24 | Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. | ||
2174 | 25 | |||
2175 | 26 | Installs the contents of the archive in $CHARM_DIR/fetched/. | ||
2176 | 27 | """ | ||
2177 | 18 | def can_handle(self, source): | 28 | def can_handle(self, source): |
2178 | 19 | url_parts = self.parse_url(source) | 29 | url_parts = self.parse_url(source) |
2179 | 20 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): | 30 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
2180 | @@ -24,6 +34,12 @@ | |||
2181 | 24 | return False | 34 | return False |
2182 | 25 | 35 | ||
2183 | 26 | def download(self, source, dest): | 36 | def download(self, source, dest): |
2184 | 37 | """ | ||
2185 | 38 | Download an archive file. | ||
2186 | 39 | |||
2187 | 40 | :param str source: URL pointing to an archive file. | ||
2188 | 41 | :param str dest: Local path location to download archive file to. | ||
2189 | 42 | """ | ||
2190 | 27 | # propogate all exceptions | 43 | # propogate all exceptions |
2191 | 28 | # URLError, OSError, etc | 44 | # URLError, OSError, etc |
2192 | 29 | proto, netloc, path, params, query, fragment = urlparse.urlparse(source) | 45 | proto, netloc, path, params, query, fragment = urlparse.urlparse(source) |
2193 | @@ -48,7 +64,30 @@ | |||
2194 | 48 | os.unlink(dest) | 64 | os.unlink(dest) |
2195 | 49 | raise e | 65 | raise e |
2196 | 50 | 66 | ||
2198 | 51 | def install(self, source): | 67 | # Mandatory file validation via Sha1 or MD5 hashing. |
2199 | 68 | def download_and_validate(self, url, hashsum, validate="sha1"): | ||
2200 | 69 | tempfile, headers = urlretrieve(url) | ||
2201 | 70 | check_hash(tempfile, hashsum, validate) | ||
2202 | 71 | return tempfile | ||
2203 | 72 | |||
2204 | 73 | def install(self, source, dest=None, checksum=None, hash_type='sha1'): | ||
2205 | 74 | """ | ||
2206 | 75 | Download and install an archive file, with optional checksum validation. | ||
2207 | 76 | |||
2208 | 77 | The checksum can also be given on the `source` URL's fragment. | ||
2209 | 78 | For example:: | ||
2210 | 79 | |||
2211 | 80 | handler.install('http://example.com/file.tgz#sha1=deadbeef') | ||
2212 | 81 | |||
2213 | 82 | :param str source: URL pointing to an archive file. | ||
2214 | 83 | :param str dest: Local destination path to install to. If not given, | ||
2215 | 84 | installs to `$CHARM_DIR/archives/archive_file_name`. | ||
2216 | 85 | :param str checksum: If given, validate the archive file after download. | ||
2217 | 86 | :param str hash_type: Algorithm used to generate `checksum`. | ||
2218 | 87 | Can be any hash alrgorithm supported by :mod:`hashlib`, | ||
2219 | 88 | such as md5, sha1, sha256, sha512, etc. | ||
2220 | 89 | |||
2221 | 90 | """ | ||
2222 | 52 | url_parts = self.parse_url(source) | 91 | url_parts = self.parse_url(source) |
2223 | 53 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') | 92 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') |
2224 | 54 | if not os.path.exists(dest_dir): | 93 | if not os.path.exists(dest_dir): |
2225 | @@ -60,4 +99,10 @@ | |||
2226 | 60 | raise UnhandledSource(e.reason) | 99 | raise UnhandledSource(e.reason) |
2227 | 61 | except OSError as e: | 100 | except OSError as e: |
2228 | 62 | raise UnhandledSource(e.strerror) | 101 | raise UnhandledSource(e.strerror) |
2230 | 63 | return extract(dld_file) | 102 | options = urlparse.parse_qs(url_parts.fragment) |
2231 | 103 | for key, value in options.items(): | ||
2232 | 104 | if key in hashlib.algorithms: | ||
2233 | 105 | check_hash(dld_file, value, key) | ||
2234 | 106 | if checksum: | ||
2235 | 107 | check_hash(dld_file, checksum, hash_type) | ||
2236 | 108 | return extract(dld_file, dest) | ||
2237 | 64 | 109 | ||
2238 | === added file 'hooks/zeromq_context.py' | |||
2239 | --- hooks/zeromq_context.py 1970-01-01 00:00:00 +0000 | |||
2240 | +++ hooks/zeromq_context.py 2014-10-17 13:06:36 +0000 | |||
2241 | @@ -0,0 +1,41 @@ | |||
2242 | 1 | import json | ||
2243 | 2 | from charmhelpers.core.hookenv import ( | ||
2244 | 3 | relation_ids, | ||
2245 | 4 | related_units, | ||
2246 | 5 | relation_get, | ||
2247 | 6 | unit_private_ip, | ||
2248 | 7 | ) | ||
2249 | 8 | from charmhelpers.contrib.openstack import context | ||
2250 | 9 | import socket | ||
2251 | 10 | |||
2252 | 11 | class MatchmakerContext(context.OSContextGenerator): | ||
2253 | 12 | |||
2254 | 13 | def __call__(self): | ||
2255 | 14 | topics = {} | ||
2256 | 15 | for rid in relation_ids('topology'): | ||
2257 | 16 | for unit in related_units(rid): | ||
2258 | 17 | topic_info = relation_get(unit=unit, rid=rid) | ||
2259 | 18 | if 'topics' in topic_info and 'host' in topic_info: | ||
2260 | 19 | for topic in topic_info['topics'].split(): | ||
2261 | 20 | if topic in topics: | ||
2262 | 21 | topics[topic].append(topic_info['host']) | ||
2263 | 22 | else: | ||
2264 | 23 | topics[topic] = [topic_info['host']] | ||
2265 | 24 | for rid in relation_ids('zeromq-configuration'): | ||
2266 | 25 | for unit in related_units(rid): | ||
2267 | 26 | topic_info = relation_get(unit=unit, rid=rid) | ||
2268 | 27 | topic_info['host'] = socket.gethostname() | ||
2269 | 28 | if 'topics' in topic_info: | ||
2270 | 29 | for topic in topic_info['topics'].split(): | ||
2271 | 30 | if topic in topics: | ||
2272 | 31 | topics[topic].append(topic_info['host']) | ||
2273 | 32 | else: | ||
2274 | 33 | topics[topic] = [topic_info['host']] | ||
2275 | 34 | return {'topology': json.dumps(topics, indent=4)} | ||
2276 | 35 | |||
2277 | 36 | |||
2278 | 37 | class OsloZMQContext(context.OSContextGenerator): | ||
2279 | 38 | |||
2280 | 39 | def __call__(self): | ||
2281 | 40 | |||
2282 | 41 | return {'zmq_host': socket.gethostname()} | ||
2283 | 0 | 42 | ||
2284 | === modified file 'hooks/zeromq_hooks.py' | |||
2285 | --- hooks/zeromq_hooks.py 2014-10-15 11:29:28 +0000 | |||
2286 | +++ hooks/zeromq_hooks.py 2014-10-17 13:06:36 +0000 | |||
2287 | @@ -1,13 +1,10 @@ | |||
2288 | 1 | #!/usr/bin/python | 1 | #!/usr/bin/python |
2289 | 2 | 2 | ||
2292 | 3 | import json | 3 | import socket |
2291 | 4 | import shutil | ||
2293 | 5 | import sys | 4 | import sys |
2294 | 6 | import uuid | 5 | import uuid |
2295 | 7 | import socket | ||
2296 | 8 | 6 | ||
2297 | 9 | from charmhelpers.fetch import add_source | 7 | from charmhelpers.fetch import add_source |
2298 | 10 | from charmhelpers.core.templating import render | ||
2299 | 11 | from charmhelpers.fetch import apt_install, apt_update | 8 | from charmhelpers.fetch import apt_install, apt_update |
2300 | 12 | from charmhelpers.core.host import ( | 9 | from charmhelpers.core.host import ( |
2301 | 13 | adduser, | 10 | adduser, |
2302 | @@ -19,66 +16,37 @@ | |||
2303 | 19 | from charmhelpers.core.hookenv import ( | 16 | from charmhelpers.core.hookenv import ( |
2304 | 20 | Hooks, | 17 | Hooks, |
2305 | 21 | UnregisteredHookError, | 18 | UnregisteredHookError, |
2306 | 22 | charm_dir, | ||
2307 | 23 | log, | 19 | log, |
2308 | 24 | relation_get, | 20 | relation_get, |
2309 | 25 | relation_ids, | 21 | relation_ids, |
2310 | 26 | relation_set, | 22 | relation_set, |
2312 | 27 | related_units, | 23 | ) |
2313 | 24 | from zeromq_utils import ( | ||
2314 | 25 | determine_packages, | ||
2315 | 26 | get_principle_topics, | ||
2316 | 27 | register_configs, | ||
2317 | 28 | restart_map, | ||
2318 | 29 | write_oslo_upstart, | ||
2319 | 28 | ) | 30 | ) |
2320 | 29 | 31 | ||
2321 | 30 | hooks = Hooks() | 32 | hooks = Hooks() |
2322 | 33 | CONFIGS = register_configs() | ||
2323 | 31 | 34 | ||
2324 | 32 | 35 | ||
2325 | 33 | @hooks.hook('install') | 36 | @hooks.hook('install') |
2326 | 34 | def install(): | 37 | def install(): |
2327 | 35 | add_source('ppa:james-page/0mq') | 38 | add_source('ppa:james-page/0mq') |
2328 | 36 | apt_update() | 39 | apt_update() |
2330 | 37 | apt_install(['python-zmq', 'python-oslo.messaging'], fatal=True) | 40 | apt_install(determine_packages(), fatal=True) |
2331 | 38 | adduser('oslo', password='oslo', system_user=True) | 41 | adduser('oslo', password='oslo', system_user=True) |
2332 | 39 | mkdir('/etc/oslo/', owner='oslo', group='oslo', perms=0755) | 42 | mkdir('/etc/oslo/', owner='oslo', group='oslo', perms=0755) |
2333 | 40 | 43 | ||
2334 | 41 | 44 | ||
2335 | 42 | def write_mapping(): | ||
2336 | 43 | topics = {} | ||
2337 | 44 | topology_file = '/etc/oslo/matchmaker_ring.json' | ||
2338 | 45 | for rid in relation_ids('topology'): | ||
2339 | 46 | for unit in related_units(rid): | ||
2340 | 47 | topic_info = relation_get(unit=unit, rid=rid) | ||
2341 | 48 | if 'topics' in topic_info and 'host' in topic_info: | ||
2342 | 49 | for topic in topic_info['topics'].split(): | ||
2343 | 50 | if topic in topics: | ||
2344 | 51 | topics[topic].append(topic_info['host']) | ||
2345 | 52 | else: | ||
2346 | 53 | topics[topic] = [topic_info['host']] | ||
2347 | 54 | for rid in relation_ids('zeromq-configuration'): | ||
2348 | 55 | for unit in related_units(rid): | ||
2349 | 56 | topic_info = relation_get(unit=unit, rid=rid) | ||
2350 | 57 | topic_info['host'] = socket.gethostname() | ||
2351 | 58 | if 'topics' in topic_info: | ||
2352 | 59 | for topic in topic_info['topics'].split(): | ||
2353 | 60 | if topic in topics: | ||
2354 | 61 | topics[topic].append(topic_info['host']) | ||
2355 | 62 | else: | ||
2356 | 63 | topics[topic] = [topic_info['host']] | ||
2357 | 64 | with open(topology_file, 'w') as outfile: | ||
2358 | 65 | json.dump(topics, outfile, indent=4) | ||
2359 | 66 | oslo_msg_file = 'oslo-messaging.conf' | ||
2360 | 67 | |||
2361 | 68 | ctxt = { | ||
2362 | 69 | 'zmq_host': socket.gethostname(), | ||
2363 | 70 | } | ||
2364 | 71 | render(oslo_msg_file, '/etc/oslo/oslo-messaging.conf', ctxt) | ||
2365 | 72 | |||
2366 | 73 | @hooks.hook('config-changed') | 45 | @hooks.hook('config-changed') |
2371 | 74 | @restart_on_change({ | 46 | @restart_on_change(restart_map(), stopstart=True) |
2368 | 75 | '/etc/oslo/oslo-messaging.conf': ['oslo-messaging-zmq-receiver'], | ||
2369 | 76 | '/etc/init/oslo-messaging-zmq-receiver.conf': ['oslo-messaging-zmq-receiver'] | ||
2370 | 77 | }) | ||
2372 | 78 | def config_changed(): | 47 | def config_changed(): |
2376 | 79 | upstart_file = charm_dir() + '/files/' + 'oslo-messaging-zmq-receiver.conf' | 48 | write_oslo_upstart() |
2377 | 80 | shutil.copyfile(upstart_file, '/etc/init/oslo-messaging-zmq-receiver.conf') | 49 | CONFIGS.write_all() |
2375 | 81 | write_mapping() | ||
2378 | 82 | for rid in relation_ids('zeromq-configuration'): | 50 | for rid in relation_ids('zeromq-configuration'): |
2379 | 83 | relation_set(relation_id=rid, host=socket.gethostname()) | 51 | relation_set(relation_id=rid, host=socket.gethostname()) |
2380 | 84 | configuration_relation_joined(rid=rid, remote_restart=True) | 52 | configuration_relation_joined(rid=rid, remote_restart=True) |
2381 | @@ -91,17 +59,9 @@ | |||
2382 | 91 | if remote_restart: | 59 | if remote_restart: |
2383 | 92 | relation_set(relation_id=rid, nonce=str(uuid.uuid4())) | 60 | relation_set(relation_id=rid, nonce=str(uuid.uuid4())) |
2384 | 93 | 61 | ||
2385 | 94 | def get_principle_topics(): | ||
2386 | 95 | princile_topics = [] | ||
2387 | 96 | for rid in relation_ids('zeromq-configuration'): | ||
2388 | 97 | for unit in related_units(rid): | ||
2389 | 98 | topics = relation_get(attribute='topics', unit=unit, rid=rid) | ||
2390 | 99 | if topics: | ||
2391 | 100 | princile_topics += topics.split() | ||
2392 | 101 | return princile_topics | ||
2393 | 102 | |||
2394 | 103 | 62 | ||
2395 | 104 | @hooks.hook('zeromq-configuration-relation-changed') | 63 | @hooks.hook('zeromq-configuration-relation-changed') |
2396 | 64 | @restart_on_change(restart_map(), stopstart=True) | ||
2397 | 105 | def configuration_relation_changed(): | 65 | def configuration_relation_changed(): |
2398 | 106 | rel_info = relation_get() | 66 | rel_info = relation_get() |
2399 | 107 | if 'users' in rel_info: | 67 | if 'users' in rel_info: |
2400 | @@ -112,13 +72,14 @@ | |||
2401 | 112 | topics = " ".join(get_principle_topics()) | 72 | topics = " ".join(get_principle_topics()) |
2402 | 113 | relation_set(relation_id=rid, topics=topics, | 73 | relation_set(relation_id=rid, topics=topics, |
2403 | 114 | host=socket.gethostname()) | 74 | host=socket.gethostname()) |
2405 | 115 | write_mapping() | 75 | CONFIGS.write_all() |
2406 | 116 | 76 | ||
2407 | 117 | 77 | ||
2408 | 118 | @hooks.hook('topology-relation-changed', | 78 | @hooks.hook('topology-relation-changed', |
2409 | 119 | 'topology-relation-departed') | 79 | 'topology-relation-departed') |
2410 | 80 | @restart_on_change(restart_map(), stopstart=True) | ||
2411 | 120 | def topology_relation_changed(): | 81 | def topology_relation_changed(): |
2413 | 121 | write_mapping() | 82 | CONFIGS.write_all() |
2414 | 122 | # NOTE: drop when auto-reload of config file is implemented | 83 | # NOTE: drop when auto-reload of config file is implemented |
2415 | 123 | for rid in relation_ids('zeromq-configuration'): | 84 | for rid in relation_ids('zeromq-configuration'): |
2416 | 124 | configuration_relation_joined(rid=rid, remote_restart=True) | 85 | configuration_relation_joined(rid=rid, remote_restart=True) |
2417 | 125 | 86 | ||
2418 | === added file 'hooks/zeromq_utils.py' | |||
2419 | --- hooks/zeromq_utils.py 1970-01-01 00:00:00 +0000 | |||
2420 | +++ hooks/zeromq_utils.py 2014-10-17 13:06:36 +0000 | |||
2421 | @@ -0,0 +1,78 @@ | |||
2422 | 1 | from collections import OrderedDict | ||
2423 | 2 | from copy import deepcopy | ||
2424 | 3 | import zeromq_context | ||
2425 | 4 | from charmhelpers.contrib.openstack import templating | ||
2426 | 5 | import shutil | ||
2427 | 6 | from charmhelpers.core.host import ( | ||
2428 | 7 | service_running, | ||
2429 | 8 | service_start, | ||
2430 | 9 | ) | ||
2431 | 10 | from charmhelpers.core.hookenv import charm_dir | ||
2432 | 11 | from charmhelpers.core.hookenv import ( | ||
2433 | 12 | relation_get, | ||
2434 | 13 | relation_ids, | ||
2435 | 14 | related_units, | ||
2436 | 15 | ) | ||
2437 | 16 | MATCHMAKER_CONF = "/etc/oslo/matchmaker_ring.json" | ||
2438 | 17 | OSLO_MSG_CONF = "/etc/oslo/oslo-messaging.conf" | ||
2439 | 18 | OSLO_UPSTART_CONF = "/etc/init/oslo-messaging-zmq-receiver.conf" | ||
2440 | 19 | TEMPLATES = 'templates/' | ||
2441 | 20 | |||
2442 | 21 | BASE_RESOURCE_MAP = OrderedDict([ | ||
2443 | 22 | (MATCHMAKER_CONF, { | ||
2444 | 23 | 'services': ['oslo-messaging-zmq-receiver'], | ||
2445 | 24 | 'contexts': [zeromq_context.MatchmakerContext()], | ||
2446 | 25 | }), | ||
2447 | 26 | (OSLO_MSG_CONF, { | ||
2448 | 27 | 'services': ['oslo-messaging-zmq-receiver'], | ||
2449 | 28 | 'contexts': [zeromq_context.OsloZMQContext()], | ||
2450 | 29 | }), | ||
2451 | 30 | ]) | ||
2452 | 31 | BASE_PACKAGES = [ | ||
2453 | 32 | 'python-zmq', | ||
2454 | 33 | 'python-oslo.messaging', | ||
2455 | 34 | ] | ||
2456 | 35 | |||
2457 | 36 | |||
2458 | 37 | def determine_packages(): | ||
2459 | 38 | return BASE_PACKAGES | ||
2460 | 39 | |||
2461 | 40 | |||
2462 | 41 | def resource_map(): | ||
2463 | 42 | ''' | ||
2464 | 43 | Dynamically generate a map of resources that will be managed for a single | ||
2465 | 44 | hook execution. | ||
2466 | 45 | ''' | ||
2467 | 46 | resource_map = deepcopy(BASE_RESOURCE_MAP) | ||
2468 | 47 | return resource_map | ||
2469 | 48 | |||
2470 | 49 | |||
2471 | 50 | def register_configs(release=None): | ||
2472 | 51 | configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, | ||
2473 | 52 | openstack_release='juno') | ||
2474 | 53 | for cfg, rscs in resource_map().iteritems(): | ||
2475 | 54 | configs.register(cfg, rscs['contexts']) | ||
2476 | 55 | return configs | ||
2477 | 56 | |||
2478 | 57 | |||
2479 | 58 | def restart_map(): | ||
2480 | 59 | return OrderedDict([(cfg, v['services']) | ||
2481 | 60 | for cfg, v in resource_map().iteritems() | ||
2482 | 61 | if v['services']]) | ||
2483 | 62 | |||
2484 | 63 | |||
2485 | 64 | def write_oslo_upstart(): | ||
2486 | 65 | upstart_file = charm_dir() + '/files/oslo-messaging-zmq-receiver.conf' | ||
2487 | 66 | shutil.copyfile(upstart_file, OSLO_UPSTART_CONF) | ||
2488 | 67 | if not service_running('oslo-messaging-zmq-receiver'): | ||
2489 | 68 | service_start('oslo-messaging-zmq-receiver') | ||
2490 | 69 | |||
2491 | 70 | |||
2492 | 71 | def get_principle_topics(): | ||
2493 | 72 | princile_topics = [] | ||
2494 | 73 | for rid in relation_ids('zeromq-configuration'): | ||
2495 | 74 | for unit in related_units(rid): | ||
2496 | 75 | topics = relation_get(attribute='topics', unit=unit, rid=rid) | ||
2497 | 76 | if topics: | ||
2498 | 77 | princile_topics += topics.split() | ||
2499 | 78 | return princile_topics | ||
2500 | 0 | 79 | ||
2501 | === added file 'templates/matchmaker_ring.json' | |||
2502 | --- templates/matchmaker_ring.json 1970-01-01 00:00:00 +0000 | |||
2503 | +++ templates/matchmaker_ring.json 2014-10-17 13:06:36 +0000 | |||
2504 | @@ -0,0 +1,1 @@ | |||
2505 | 1 | {{ topology }} | ||
2506 | 0 | 2 | ||
2507 | === modified file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
2508 | --- tests/charmhelpers/contrib/amulet/deployment.py 2014-09-02 11:17:14 +0000 | |||
2509 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-17 13:06:36 +0000 | |||
2510 | @@ -24,25 +24,31 @@ | |||
2511 | 24 | """Add services. | 24 | """Add services. |
2512 | 25 | 25 | ||
2513 | 26 | Add services to the deployment where this_service is the local charm | 26 | Add services to the deployment where this_service is the local charm |
2516 | 27 | that we're focused on testing and other_services are the other | 27 | that we're testing and other_services are the other services that |
2517 | 28 | charms that come from the charm store. | 28 | are being used in the local amulet tests. |
2518 | 29 | """ | 29 | """ |
2523 | 30 | name, units = range(2) | 30 | if this_service['name'] != os.path.basename(os.getcwd()): |
2524 | 31 | 31 | s = this_service['name'] | |
2521 | 32 | if this_service[name] != os.path.basename(os.getcwd()): | ||
2522 | 33 | s = this_service[name] | ||
2525 | 34 | msg = "The charm's root directory name needs to be {}".format(s) | 32 | msg = "The charm's root directory name needs to be {}".format(s) |
2526 | 35 | amulet.raise_status(amulet.FAIL, msg=msg) | 33 | amulet.raise_status(amulet.FAIL, msg=msg) |
2527 | 36 | 34 | ||
2529 | 37 | self.d.add(this_service[name], units=this_service[units]) | 35 | if 'units' not in this_service: |
2530 | 36 | this_service['units'] = 1 | ||
2531 | 37 | |||
2532 | 38 | self.d.add(this_service['name'], units=this_service['units']) | ||
2533 | 38 | 39 | ||
2534 | 39 | for svc in other_services: | 40 | for svc in other_services: |
2539 | 40 | if self.series: | 41 | if 'location' in svc: |
2540 | 41 | self.d.add(svc[name], | 42 | branch_location = svc['location'] |
2541 | 42 | charm='cs:{}/{}'.format(self.series, svc[name]), | 43 | elif self.series: |
2542 | 43 | units=svc[units]) | 44 | branch_location = 'cs:{}/{}'.format(self.series, svc['name']), |
2543 | 44 | else: | 45 | else: |
2545 | 45 | self.d.add(svc[name], units=svc[units]) | 46 | branch_location = None |
2546 | 47 | |||
2547 | 48 | if 'units' not in svc: | ||
2548 | 49 | svc['units'] = 1 | ||
2549 | 50 | |||
2550 | 51 | self.d.add(svc['name'], charm=branch_location, units=svc['units']) | ||
2551 | 46 | 52 | ||
2552 | 47 | def _add_relations(self, relations): | 53 | def _add_relations(self, relations): |
2553 | 48 | """Add all of the relations for the services.""" | 54 | """Add all of the relations for the services.""" |
2554 | @@ -57,7 +63,7 @@ | |||
2555 | 57 | def _deploy(self): | 63 | def _deploy(self): |
2556 | 58 | """Deploy environment and wait for all hooks to finish executing.""" | 64 | """Deploy environment and wait for all hooks to finish executing.""" |
2557 | 59 | try: | 65 | try: |
2559 | 60 | self.d.setup() | 66 | self.d.setup(timeout=900) |
2560 | 61 | self.d.sentry.wait(timeout=900) | 67 | self.d.sentry.wait(timeout=900) |
2561 | 62 | except amulet.helpers.TimeoutError: | 68 | except amulet.helpers.TimeoutError: |
2562 | 63 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | 69 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") |