Merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha into lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next
- Trusty Tahr (14.04)
- next-support-ha
- Merge into next
Proposed by
Liam Young
Status: | Merged | ||||
---|---|---|---|---|---|
Merged at revision: | 32 | ||||
Proposed branch: | lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha | ||||
Merge into: | lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next | ||||
Diff against target: |
5611 lines (+4608/-116) 46 files modified
charm-helpers-hooks.yaml (+8/-1) config.yaml (+20/-0) files/ports.conf (+11/-0) hooks/ceph_radosgw_context.py (+29/-0) hooks/charmhelpers/__init__.py (+22/-0) hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+248/-0) hooks/charmhelpers/contrib/network/ip.py (+351/-0) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+92/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+278/-0) hooks/charmhelpers/contrib/openstack/context.py (+1038/-0) hooks/charmhelpers/contrib/openstack/ip.py (+93/-0) hooks/charmhelpers/contrib/openstack/neutron.py (+223/-0) hooks/charmhelpers/contrib/openstack/templates/__init__.py (+2/-0) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+15/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+58/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0) hooks/charmhelpers/contrib/openstack/templating.py (+279/-0) hooks/charmhelpers/contrib/openstack/utils.py (+625/-0) hooks/charmhelpers/contrib/python/packages.py (+77/-0) hooks/charmhelpers/contrib/storage/linux/ceph.py (+428/-0) hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0) hooks/charmhelpers/contrib/storage/linux/lvm.py (+89/-0) hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2) hooks/charmhelpers/core/decorators.py (+41/-0) hooks/charmhelpers/core/fstab.py (+10/-8) hooks/charmhelpers/core/hookenv.py (+36/-16) hooks/charmhelpers/core/host.py (+52/-24) hooks/charmhelpers/core/services/__init__.py (+2/-2) hooks/charmhelpers/core/services/helpers.py (+9/-5) hooks/charmhelpers/core/templating.py (+3/-2) hooks/charmhelpers/fetch/__init__.py (+22/-13) hooks/charmhelpers/fetch/archiveurl.py (+53/-16) hooks/charmhelpers/fetch/bzrurl.py (+5/-1) hooks/charmhelpers/fetch/giturl.py (+12/-5) hooks/hooks.py (+118/-8) hooks/utils.py (+36/-2) metadata.yaml (+6/-0) templates/ceph.conf (+1/-1) templates/rgw (+1/-1) tests/charmhelpers/__init__.py (+22/-0) tests/charmhelpers/contrib/amulet/deployment.py (+3/-3) tests/charmhelpers/contrib/amulet/utils.py (+6/-4) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1) tests/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1) |
||||
To merge this branch: | bzr merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha | ||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Approve | ||
Review via email:
|
Commit message
Description of the change
Add HA support
To post a comment you must log in.
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
James Page (james-page) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-hooks.yaml' | |||
2 | --- charm-helpers-hooks.yaml 2014-09-27 02:57:08 +0000 | |||
3 | +++ charm-helpers-hooks.yaml 2015-01-15 16:18:44 +0000 | |||
4 | @@ -5,5 +5,12 @@ | |||
5 | 5 | - fetch | 5 | - fetch |
6 | 6 | - contrib.storage.linux: | 6 | - contrib.storage.linux: |
7 | 7 | - utils | 7 | - utils |
8 | 8 | - contrib.hahelpers: | ||
9 | 9 | - apache | ||
10 | 10 | - cluster | ||
11 | 8 | - payload.execd | 11 | - payload.execd |
13 | 9 | - contrib.openstack.alternatives | 12 | - contrib.openstack|inc=* |
14 | 13 | - contrib.network.ip | ||
15 | 14 | - contrib.openstack.ip | ||
16 | 15 | - contrib.storage.linux | ||
17 | 16 | - contrib.python.packages | ||
18 | 10 | 17 | ||
19 | === modified file 'config.yaml' | |||
20 | --- config.yaml 2015-01-14 09:10:04 +0000 | |||
21 | +++ config.yaml 2015-01-15 16:18:44 +0000 | |||
22 | @@ -67,3 +67,23 @@ | |||
23 | 67 | . | 67 | . |
24 | 68 | Enable this option to disable use of Apache and enable the embedded | 68 | Enable this option to disable use of Apache and enable the embedded |
25 | 69 | web container feature. | 69 | web container feature. |
26 | 70 | vip: | ||
27 | 71 | type: string | ||
28 | 72 | default: | ||
29 | 73 | description: | | ||
30 | 74 | Virtual IP(s) to use to front API services in HA configuration. | ||
31 | 75 | . | ||
32 | 76 | If multiple networks are being used, a VIP should be provided for each | ||
33 | 77 | network, separated by spaces. | ||
34 | 78 | ha-bindiface: | ||
35 | 79 | type: string | ||
36 | 80 | default: eth0 | ||
37 | 81 | description: | | ||
38 | 82 | Default network interface on which HA cluster will bind to communication | ||
39 | 83 | with the other members of the HA Cluster. | ||
40 | 84 | ha-mcastport: | ||
41 | 85 | type: int | ||
42 | 86 | default: 5414 | ||
43 | 87 | description: | | ||
44 | 88 | Default multicast port number that will be used to communicate between | ||
45 | 89 | HA Cluster nodes. | ||
46 | 70 | 90 | ||
47 | === added file 'files/ports.conf' | |||
48 | --- files/ports.conf 1970-01-01 00:00:00 +0000 | |||
49 | +++ files/ports.conf 2015-01-15 16:18:44 +0000 | |||
50 | @@ -0,0 +1,11 @@ | |||
51 | 1 | Listen 70 | ||
52 | 2 | |||
53 | 3 | <IfModule ssl_module> | ||
54 | 4 | Listen 443 | ||
55 | 5 | </IfModule> | ||
56 | 6 | |||
57 | 7 | <IfModule mod_gnutls.c> | ||
58 | 8 | Listen 443 | ||
59 | 9 | </IfModule> | ||
60 | 10 | |||
61 | 11 | # vim: syntax=apache ts=4 sw=4 sts=4 sr noet | ||
62 | 0 | 12 | ||
63 | === added file 'hooks/ceph_radosgw_context.py' | |||
64 | --- hooks/ceph_radosgw_context.py 1970-01-01 00:00:00 +0000 | |||
65 | +++ hooks/ceph_radosgw_context.py 2015-01-15 16:18:44 +0000 | |||
66 | @@ -0,0 +1,29 @@ | |||
67 | 1 | from charmhelpers.contrib.openstack import context | ||
68 | 2 | from charmhelpers.contrib.hahelpers.cluster import ( | ||
69 | 3 | determine_api_port, | ||
70 | 4 | determine_apache_port, | ||
71 | 5 | ) | ||
72 | 6 | |||
73 | 7 | |||
74 | 8 | class HAProxyContext(context.HAProxyContext): | ||
75 | 9 | |||
76 | 10 | def __call__(self): | ||
77 | 11 | ctxt = super(HAProxyContext, self).__call__() | ||
78 | 12 | |||
79 | 13 | # Apache ports | ||
80 | 14 | a_cephradosgw_api = determine_apache_port(80, | ||
81 | 15 | singlenode_mode=True) | ||
82 | 16 | |||
83 | 17 | port_mapping = { | ||
84 | 18 | 'cephradosgw-server': [ | ||
85 | 19 | 80, a_cephradosgw_api] | ||
86 | 20 | } | ||
87 | 21 | |||
88 | 22 | ctxt['cephradosgw_bind_port'] = determine_api_port( | ||
89 | 23 | 80, | ||
90 | 24 | singlenode_mode=True, | ||
91 | 25 | ) | ||
92 | 26 | |||
93 | 27 | # for haproxy.conf | ||
94 | 28 | ctxt['service_ports'] = port_mapping | ||
95 | 29 | return ctxt | ||
96 | 0 | 30 | ||
97 | === modified file 'hooks/charmhelpers/__init__.py' | |||
98 | --- hooks/charmhelpers/__init__.py 2014-01-24 16:02:57 +0000 | |||
99 | +++ hooks/charmhelpers/__init__.py 2015-01-15 16:18:44 +0000 | |||
100 | @@ -0,0 +1,22 @@ | |||
101 | 1 | # Bootstrap charm-helpers, installing its dependencies if necessary using | ||
102 | 2 | # only standard libraries. | ||
103 | 3 | import subprocess | ||
104 | 4 | import sys | ||
105 | 5 | |||
106 | 6 | try: | ||
107 | 7 | import six # flake8: noqa | ||
108 | 8 | except ImportError: | ||
109 | 9 | if sys.version_info.major == 2: | ||
110 | 10 | subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) | ||
111 | 11 | else: | ||
112 | 12 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) | ||
113 | 13 | import six # flake8: noqa | ||
114 | 14 | |||
115 | 15 | try: | ||
116 | 16 | import yaml # flake8: noqa | ||
117 | 17 | except ImportError: | ||
118 | 18 | if sys.version_info.major == 2: | ||
119 | 19 | subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) | ||
120 | 20 | else: | ||
121 | 21 | subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) | ||
122 | 22 | import yaml # flake8: noqa | ||
123 | 0 | 23 | ||
124 | === added directory 'hooks/charmhelpers/contrib/hahelpers' | |||
125 | === added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py' | |||
126 | === added file 'hooks/charmhelpers/contrib/hahelpers/apache.py' | |||
127 | --- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000 | |||
128 | +++ hooks/charmhelpers/contrib/hahelpers/apache.py 2015-01-15 16:18:44 +0000 | |||
129 | @@ -0,0 +1,66 @@ | |||
130 | 1 | # | ||
131 | 2 | # Copyright 2012 Canonical Ltd. | ||
132 | 3 | # | ||
133 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
134 | 5 | # | ||
135 | 6 | # Authors: | ||
136 | 7 | # James Page <james.page@ubuntu.com> | ||
137 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
138 | 9 | # | ||
139 | 10 | |||
140 | 11 | import subprocess | ||
141 | 12 | |||
142 | 13 | from charmhelpers.core.hookenv import ( | ||
143 | 14 | config as config_get, | ||
144 | 15 | relation_get, | ||
145 | 16 | relation_ids, | ||
146 | 17 | related_units as relation_list, | ||
147 | 18 | log, | ||
148 | 19 | INFO, | ||
149 | 20 | ) | ||
150 | 21 | |||
151 | 22 | |||
152 | 23 | def get_cert(cn=None): | ||
153 | 24 | # TODO: deal with multiple https endpoints via charm config | ||
154 | 25 | cert = config_get('ssl_cert') | ||
155 | 26 | key = config_get('ssl_key') | ||
156 | 27 | if not (cert and key): | ||
157 | 28 | log("Inspecting identity-service relations for SSL certificate.", | ||
158 | 29 | level=INFO) | ||
159 | 30 | cert = key = None | ||
160 | 31 | if cn: | ||
161 | 32 | ssl_cert_attr = 'ssl_cert_{}'.format(cn) | ||
162 | 33 | ssl_key_attr = 'ssl_key_{}'.format(cn) | ||
163 | 34 | else: | ||
164 | 35 | ssl_cert_attr = 'ssl_cert' | ||
165 | 36 | ssl_key_attr = 'ssl_key' | ||
166 | 37 | for r_id in relation_ids('identity-service'): | ||
167 | 38 | for unit in relation_list(r_id): | ||
168 | 39 | if not cert: | ||
169 | 40 | cert = relation_get(ssl_cert_attr, | ||
170 | 41 | rid=r_id, unit=unit) | ||
171 | 42 | if not key: | ||
172 | 43 | key = relation_get(ssl_key_attr, | ||
173 | 44 | rid=r_id, unit=unit) | ||
174 | 45 | return (cert, key) | ||
175 | 46 | |||
176 | 47 | |||
177 | 48 | def get_ca_cert(): | ||
178 | 49 | ca_cert = config_get('ssl_ca') | ||
179 | 50 | if ca_cert is None: | ||
180 | 51 | log("Inspecting identity-service relations for CA SSL certificate.", | ||
181 | 52 | level=INFO) | ||
182 | 53 | for r_id in relation_ids('identity-service'): | ||
183 | 54 | for unit in relation_list(r_id): | ||
184 | 55 | if ca_cert is None: | ||
185 | 56 | ca_cert = relation_get('ca_cert', | ||
186 | 57 | rid=r_id, unit=unit) | ||
187 | 58 | return ca_cert | ||
188 | 59 | |||
189 | 60 | |||
190 | 61 | def install_ca_cert(ca_cert): | ||
191 | 62 | if ca_cert: | ||
192 | 63 | with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', | ||
193 | 64 | 'w') as crt: | ||
194 | 65 | crt.write(ca_cert) | ||
195 | 66 | subprocess.check_call(['update-ca-certificates', '--fresh']) | ||
196 | 0 | 67 | ||
197 | === added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
198 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000 | |||
199 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-01-15 16:18:44 +0000 | |||
200 | @@ -0,0 +1,248 @@ | |||
201 | 1 | # | ||
202 | 2 | # Copyright 2012 Canonical Ltd. | ||
203 | 3 | # | ||
204 | 4 | # Authors: | ||
205 | 5 | # James Page <james.page@ubuntu.com> | ||
206 | 6 | # Adam Gandelman <adamg@ubuntu.com> | ||
207 | 7 | # | ||
208 | 8 | |||
209 | 9 | """ | ||
210 | 10 | Helpers for clustering and determining "cluster leadership" and other | ||
211 | 11 | clustering-related helpers. | ||
212 | 12 | """ | ||
213 | 13 | |||
214 | 14 | import subprocess | ||
215 | 15 | import os | ||
216 | 16 | |||
217 | 17 | from socket import gethostname as get_unit_hostname | ||
218 | 18 | |||
219 | 19 | import six | ||
220 | 20 | |||
221 | 21 | from charmhelpers.core.hookenv import ( | ||
222 | 22 | log, | ||
223 | 23 | relation_ids, | ||
224 | 24 | related_units as relation_list, | ||
225 | 25 | relation_get, | ||
226 | 26 | config as config_get, | ||
227 | 27 | INFO, | ||
228 | 28 | ERROR, | ||
229 | 29 | WARNING, | ||
230 | 30 | unit_get, | ||
231 | 31 | ) | ||
232 | 32 | from charmhelpers.core.decorators import ( | ||
233 | 33 | retry_on_exception, | ||
234 | 34 | ) | ||
235 | 35 | |||
236 | 36 | |||
237 | 37 | class HAIncompleteConfig(Exception): | ||
238 | 38 | pass | ||
239 | 39 | |||
240 | 40 | |||
241 | 41 | class CRMResourceNotFound(Exception): | ||
242 | 42 | pass | ||
243 | 43 | |||
244 | 44 | |||
245 | 45 | def is_elected_leader(resource): | ||
246 | 46 | """ | ||
247 | 47 | Returns True if the charm executing this is the elected cluster leader. | ||
248 | 48 | |||
249 | 49 | It relies on two mechanisms to determine leadership: | ||
250 | 50 | 1. If the charm is part of a corosync cluster, call corosync to | ||
251 | 51 | determine leadership. | ||
252 | 52 | 2. If the charm is not part of a corosync cluster, the leader is | ||
253 | 53 | determined as being "the alive unit with the lowest unit numer". In | ||
254 | 54 | other words, the oldest surviving unit. | ||
255 | 55 | """ | ||
256 | 56 | if is_clustered(): | ||
257 | 57 | if not is_crm_leader(resource): | ||
258 | 58 | log('Deferring action to CRM leader.', level=INFO) | ||
259 | 59 | return False | ||
260 | 60 | else: | ||
261 | 61 | peers = peer_units() | ||
262 | 62 | if peers and not oldest_peer(peers): | ||
263 | 63 | log('Deferring action to oldest service unit.', level=INFO) | ||
264 | 64 | return False | ||
265 | 65 | return True | ||
266 | 66 | |||
267 | 67 | |||
268 | 68 | def is_clustered(): | ||
269 | 69 | for r_id in (relation_ids('ha') or []): | ||
270 | 70 | for unit in (relation_list(r_id) or []): | ||
271 | 71 | clustered = relation_get('clustered', | ||
272 | 72 | rid=r_id, | ||
273 | 73 | unit=unit) | ||
274 | 74 | if clustered: | ||
275 | 75 | return True | ||
276 | 76 | return False | ||
277 | 77 | |||
278 | 78 | |||
279 | 79 | @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) | ||
280 | 80 | def is_crm_leader(resource, retry=False): | ||
281 | 81 | """ | ||
282 | 82 | Returns True if the charm calling this is the elected corosync leader, | ||
283 | 83 | as returned by calling the external "crm" command. | ||
284 | 84 | |||
285 | 85 | We allow this operation to be retried to avoid the possibility of getting a | ||
286 | 86 | false negative. See LP #1396246 for more info. | ||
287 | 87 | """ | ||
288 | 88 | cmd = ['crm', 'resource', 'show', resource] | ||
289 | 89 | try: | ||
290 | 90 | status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
291 | 91 | if not isinstance(status, six.text_type): | ||
292 | 92 | status = six.text_type(status, "utf-8") | ||
293 | 93 | except subprocess.CalledProcessError: | ||
294 | 94 | status = None | ||
295 | 95 | |||
296 | 96 | if status and get_unit_hostname() in status: | ||
297 | 97 | return True | ||
298 | 98 | |||
299 | 99 | if status and "resource %s is NOT running" % (resource) in status: | ||
300 | 100 | raise CRMResourceNotFound("CRM resource %s not found" % (resource)) | ||
301 | 101 | |||
302 | 102 | return False | ||
303 | 103 | |||
304 | 104 | |||
305 | 105 | def is_leader(resource): | ||
306 | 106 | log("is_leader is deprecated. Please consider using is_crm_leader " | ||
307 | 107 | "instead.", level=WARNING) | ||
308 | 108 | return is_crm_leader(resource) | ||
309 | 109 | |||
310 | 110 | |||
311 | 111 | def peer_units(peer_relation="cluster"): | ||
312 | 112 | peers = [] | ||
313 | 113 | for r_id in (relation_ids(peer_relation) or []): | ||
314 | 114 | for unit in (relation_list(r_id) or []): | ||
315 | 115 | peers.append(unit) | ||
316 | 116 | return peers | ||
317 | 117 | |||
318 | 118 | |||
319 | 119 | def peer_ips(peer_relation='cluster', addr_key='private-address'): | ||
320 | 120 | '''Return a dict of peers and their private-address''' | ||
321 | 121 | peers = {} | ||
322 | 122 | for r_id in relation_ids(peer_relation): | ||
323 | 123 | for unit in relation_list(r_id): | ||
324 | 124 | peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) | ||
325 | 125 | return peers | ||
326 | 126 | |||
327 | 127 | |||
328 | 128 | def oldest_peer(peers): | ||
329 | 129 | """Determines who the oldest peer is by comparing unit numbers.""" | ||
330 | 130 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | ||
331 | 131 | for peer in peers: | ||
332 | 132 | remote_unit_no = int(peer.split('/')[1]) | ||
333 | 133 | if remote_unit_no < local_unit_no: | ||
334 | 134 | return False | ||
335 | 135 | return True | ||
336 | 136 | |||
337 | 137 | |||
338 | 138 | def eligible_leader(resource): | ||
339 | 139 | log("eligible_leader is deprecated. Please consider using " | ||
340 | 140 | "is_elected_leader instead.", level=WARNING) | ||
341 | 141 | return is_elected_leader(resource) | ||
342 | 142 | |||
343 | 143 | |||
344 | 144 | def https(): | ||
345 | 145 | ''' | ||
346 | 146 | Determines whether enough data has been provided in configuration | ||
347 | 147 | or relation data to configure HTTPS | ||
348 | 148 | . | ||
349 | 149 | returns: boolean | ||
350 | 150 | ''' | ||
351 | 151 | if config_get('use-https') == "yes": | ||
352 | 152 | return True | ||
353 | 153 | if config_get('ssl_cert') and config_get('ssl_key'): | ||
354 | 154 | return True | ||
355 | 155 | for r_id in relation_ids('identity-service'): | ||
356 | 156 | for unit in relation_list(r_id): | ||
357 | 157 | # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN | ||
358 | 158 | rel_state = [ | ||
359 | 159 | relation_get('https_keystone', rid=r_id, unit=unit), | ||
360 | 160 | relation_get('ca_cert', rid=r_id, unit=unit), | ||
361 | 161 | ] | ||
362 | 162 | # NOTE: works around (LP: #1203241) | ||
363 | 163 | if (None not in rel_state) and ('' not in rel_state): | ||
364 | 164 | return True | ||
365 | 165 | return False | ||
366 | 166 | |||
367 | 167 | |||
368 | 168 | def determine_api_port(public_port, singlenode_mode=False): | ||
369 | 169 | ''' | ||
370 | 170 | Determine correct API server listening port based on | ||
371 | 171 | existence of HTTPS reverse proxy and/or haproxy. | ||
372 | 172 | |||
373 | 173 | public_port: int: standard public port for given service | ||
374 | 174 | |||
375 | 175 | singlenode_mode: boolean: Shuffle ports when only a single unit is present | ||
376 | 176 | |||
377 | 177 | returns: int: the correct listening port for the API service | ||
378 | 178 | ''' | ||
379 | 179 | i = 0 | ||
380 | 180 | if singlenode_mode: | ||
381 | 181 | i += 1 | ||
382 | 182 | elif len(peer_units()) > 0 or is_clustered(): | ||
383 | 183 | i += 1 | ||
384 | 184 | if https(): | ||
385 | 185 | i += 1 | ||
386 | 186 | return public_port - (i * 10) | ||
387 | 187 | |||
388 | 188 | |||
389 | 189 | def determine_apache_port(public_port, singlenode_mode=False): | ||
390 | 190 | ''' | ||
391 | 191 | Description: Determine correct apache listening port based on public IP + | ||
392 | 192 | state of the cluster. | ||
393 | 193 | |||
394 | 194 | public_port: int: standard public port for given service | ||
395 | 195 | |||
396 | 196 | singlenode_mode: boolean: Shuffle ports when only a single unit is present | ||
397 | 197 | |||
398 | 198 | returns: int: the correct listening port for the HAProxy service | ||
399 | 199 | ''' | ||
400 | 200 | i = 0 | ||
401 | 201 | if singlenode_mode: | ||
402 | 202 | i += 1 | ||
403 | 203 | elif len(peer_units()) > 0 or is_clustered(): | ||
404 | 204 | i += 1 | ||
405 | 205 | return public_port - (i * 10) | ||
406 | 206 | |||
407 | 207 | |||
408 | 208 | def get_hacluster_config(): | ||
409 | 209 | ''' | ||
410 | 210 | Obtains all relevant configuration from charm configuration required | ||
411 | 211 | for initiating a relation to hacluster: | ||
412 | 212 | |||
413 | 213 | ha-bindiface, ha-mcastport, vip | ||
414 | 214 | |||
415 | 215 | returns: dict: A dict containing settings keyed by setting name. | ||
416 | 216 | raises: HAIncompleteConfig if settings are missing. | ||
417 | 217 | ''' | ||
418 | 218 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] | ||
419 | 219 | conf = {} | ||
420 | 220 | for setting in settings: | ||
421 | 221 | conf[setting] = config_get(setting) | ||
422 | 222 | missing = [] | ||
423 | 223 | [missing.append(s) for s, v in six.iteritems(conf) if v is None] | ||
424 | 224 | if missing: | ||
425 | 225 | log('Insufficient config data to configure hacluster.', level=ERROR) | ||
426 | 226 | raise HAIncompleteConfig | ||
427 | 227 | return conf | ||
428 | 228 | |||
429 | 229 | |||
430 | 230 | def canonical_url(configs, vip_setting='vip'): | ||
431 | 231 | ''' | ||
432 | 232 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
433 | 233 | configuration and hacluster. | ||
434 | 234 | |||
435 | 235 | :configs : OSTemplateRenderer: A config tempating object to inspect for | ||
436 | 236 | a complete https context. | ||
437 | 237 | |||
438 | 238 | :vip_setting: str: Setting in charm config that specifies | ||
439 | 239 | VIP address. | ||
440 | 240 | ''' | ||
441 | 241 | scheme = 'http' | ||
442 | 242 | if 'https' in configs.complete_contexts(): | ||
443 | 243 | scheme = 'https' | ||
444 | 244 | if is_clustered(): | ||
445 | 245 | addr = config_get(vip_setting) | ||
446 | 246 | else: | ||
447 | 247 | addr = unit_get('private-address') | ||
448 | 248 | return '%s://%s' % (scheme, addr) | ||
449 | 0 | 249 | ||
450 | === added directory 'hooks/charmhelpers/contrib/network' | |||
451 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' | |||
452 | === added file 'hooks/charmhelpers/contrib/network/ip.py' | |||
453 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 | |||
454 | +++ hooks/charmhelpers/contrib/network/ip.py 2015-01-15 16:18:44 +0000 | |||
455 | @@ -0,0 +1,351 @@ | |||
456 | 1 | import glob | ||
457 | 2 | import re | ||
458 | 3 | import subprocess | ||
459 | 4 | |||
460 | 5 | from functools import partial | ||
461 | 6 | |||
462 | 7 | from charmhelpers.core.hookenv import unit_get | ||
463 | 8 | from charmhelpers.fetch import apt_install | ||
464 | 9 | from charmhelpers.core.hookenv import ( | ||
465 | 10 | log | ||
466 | 11 | ) | ||
467 | 12 | |||
468 | 13 | try: | ||
469 | 14 | import netifaces | ||
470 | 15 | except ImportError: | ||
471 | 16 | apt_install('python-netifaces') | ||
472 | 17 | import netifaces | ||
473 | 18 | |||
474 | 19 | try: | ||
475 | 20 | import netaddr | ||
476 | 21 | except ImportError: | ||
477 | 22 | apt_install('python-netaddr') | ||
478 | 23 | import netaddr | ||
479 | 24 | |||
480 | 25 | |||
481 | 26 | def _validate_cidr(network): | ||
482 | 27 | try: | ||
483 | 28 | netaddr.IPNetwork(network) | ||
484 | 29 | except (netaddr.core.AddrFormatError, ValueError): | ||
485 | 30 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
486 | 31 | network) | ||
487 | 32 | |||
488 | 33 | |||
489 | 34 | def no_ip_found_error_out(network): | ||
490 | 35 | errmsg = ("No IP address found in network: %s" % network) | ||
491 | 36 | raise ValueError(errmsg) | ||
492 | 37 | |||
493 | 38 | |||
494 | 39 | def get_address_in_network(network, fallback=None, fatal=False): | ||
495 | 40 | """Get an IPv4 or IPv6 address within the network from the host. | ||
496 | 41 | |||
497 | 42 | :param network (str): CIDR presentation format. For example, | ||
498 | 43 | '192.168.1.0/24'. | ||
499 | 44 | :param fallback (str): If no address is found, return fallback. | ||
500 | 45 | :param fatal (boolean): If no address is found, fallback is not | ||
501 | 46 | set and fatal is True then exit(1). | ||
502 | 47 | """ | ||
503 | 48 | if network is None: | ||
504 | 49 | if fallback is not None: | ||
505 | 50 | return fallback | ||
506 | 51 | |||
507 | 52 | if fatal: | ||
508 | 53 | no_ip_found_error_out(network) | ||
509 | 54 | else: | ||
510 | 55 | return None | ||
511 | 56 | |||
512 | 57 | _validate_cidr(network) | ||
513 | 58 | network = netaddr.IPNetwork(network) | ||
514 | 59 | for iface in netifaces.interfaces(): | ||
515 | 60 | addresses = netifaces.ifaddresses(iface) | ||
516 | 61 | if network.version == 4 and netifaces.AF_INET in addresses: | ||
517 | 62 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
518 | 63 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
519 | 64 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
520 | 65 | if cidr in network: | ||
521 | 66 | return str(cidr.ip) | ||
522 | 67 | |||
523 | 68 | if network.version == 6 and netifaces.AF_INET6 in addresses: | ||
524 | 69 | for addr in addresses[netifaces.AF_INET6]: | ||
525 | 70 | if not addr['addr'].startswith('fe80'): | ||
526 | 71 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
527 | 72 | addr['netmask'])) | ||
528 | 73 | if cidr in network: | ||
529 | 74 | return str(cidr.ip) | ||
530 | 75 | |||
531 | 76 | if fallback is not None: | ||
532 | 77 | return fallback | ||
533 | 78 | |||
534 | 79 | if fatal: | ||
535 | 80 | no_ip_found_error_out(network) | ||
536 | 81 | |||
537 | 82 | return None | ||
538 | 83 | |||
539 | 84 | |||
540 | 85 | def is_ipv6(address): | ||
541 | 86 | """Determine whether provided address is IPv6 or not.""" | ||
542 | 87 | try: | ||
543 | 88 | address = netaddr.IPAddress(address) | ||
544 | 89 | except netaddr.AddrFormatError: | ||
545 | 90 | # probably a hostname - so not an address at all! | ||
546 | 91 | return False | ||
547 | 92 | |||
548 | 93 | return address.version == 6 | ||
549 | 94 | |||
550 | 95 | |||
551 | 96 | def is_address_in_network(network, address): | ||
552 | 97 | """ | ||
553 | 98 | Determine whether the provided address is within a network range. | ||
554 | 99 | |||
555 | 100 | :param network (str): CIDR presentation format. For example, | ||
556 | 101 | '192.168.1.0/24'. | ||
557 | 102 | :param address: An individual IPv4 or IPv6 address without a net | ||
558 | 103 | mask or subnet prefix. For example, '192.168.1.1'. | ||
559 | 104 | :returns boolean: Flag indicating whether address is in network. | ||
560 | 105 | """ | ||
561 | 106 | try: | ||
562 | 107 | network = netaddr.IPNetwork(network) | ||
563 | 108 | except (netaddr.core.AddrFormatError, ValueError): | ||
564 | 109 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
565 | 110 | network) | ||
566 | 111 | |||
567 | 112 | try: | ||
568 | 113 | address = netaddr.IPAddress(address) | ||
569 | 114 | except (netaddr.core.AddrFormatError, ValueError): | ||
570 | 115 | raise ValueError("Address (%s) is not in correct presentation format" % | ||
571 | 116 | address) | ||
572 | 117 | |||
573 | 118 | if address in network: | ||
574 | 119 | return True | ||
575 | 120 | else: | ||
576 | 121 | return False | ||
577 | 122 | |||
578 | 123 | |||
579 | 124 | def _get_for_address(address, key): | ||
580 | 125 | """Retrieve an attribute of or the physical interface that | ||
581 | 126 | the IP address provided could be bound to. | ||
582 | 127 | |||
583 | 128 | :param address (str): An individual IPv4 or IPv6 address without a net | ||
584 | 129 | mask or subnet prefix. For example, '192.168.1.1'. | ||
585 | 130 | :param key: 'iface' for the physical interface name or an attribute | ||
586 | 131 | of the configured interface, for example 'netmask'. | ||
587 | 132 | :returns str: Requested attribute or None if address is not bindable. | ||
588 | 133 | """ | ||
589 | 134 | address = netaddr.IPAddress(address) | ||
590 | 135 | for iface in netifaces.interfaces(): | ||
591 | 136 | addresses = netifaces.ifaddresses(iface) | ||
592 | 137 | if address.version == 4 and netifaces.AF_INET in addresses: | ||
593 | 138 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
594 | 139 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
595 | 140 | network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
596 | 141 | cidr = network.cidr | ||
597 | 142 | if address in cidr: | ||
598 | 143 | if key == 'iface': | ||
599 | 144 | return iface | ||
600 | 145 | else: | ||
601 | 146 | return addresses[netifaces.AF_INET][0][key] | ||
602 | 147 | |||
603 | 148 | if address.version == 6 and netifaces.AF_INET6 in addresses: | ||
604 | 149 | for addr in addresses[netifaces.AF_INET6]: | ||
605 | 150 | if not addr['addr'].startswith('fe80'): | ||
606 | 151 | network = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
607 | 152 | addr['netmask'])) | ||
608 | 153 | cidr = network.cidr | ||
609 | 154 | if address in cidr: | ||
610 | 155 | if key == 'iface': | ||
611 | 156 | return iface | ||
612 | 157 | elif key == 'netmask' and cidr: | ||
613 | 158 | return str(cidr).split('/')[1] | ||
614 | 159 | else: | ||
615 | 160 | return addr[key] | ||
616 | 161 | |||
617 | 162 | return None | ||
618 | 163 | |||
619 | 164 | |||
620 | 165 | get_iface_for_address = partial(_get_for_address, key='iface') | ||
621 | 166 | |||
622 | 167 | |||
623 | 168 | get_netmask_for_address = partial(_get_for_address, key='netmask') | ||
624 | 169 | |||
625 | 170 | |||
626 | 171 | def format_ipv6_addr(address): | ||
627 | 172 | """If address is IPv6, wrap it in '[]' otherwise return None. | ||
628 | 173 | |||
629 | 174 | This is required by most configuration files when specifying IPv6 | ||
630 | 175 | addresses. | ||
631 | 176 | """ | ||
632 | 177 | if is_ipv6(address): | ||
633 | 178 | return "[%s]" % address | ||
634 | 179 | |||
635 | 180 | return None | ||
636 | 181 | |||
637 | 182 | |||
638 | 183 | def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, | ||
639 | 184 | fatal=True, exc_list=None): | ||
640 | 185 | """Return the assigned IP address for a given interface, if any.""" | ||
641 | 186 | # Extract nic if passed /dev/ethX | ||
642 | 187 | if '/' in iface: | ||
643 | 188 | iface = iface.split('/')[-1] | ||
644 | 189 | |||
645 | 190 | if not exc_list: | ||
646 | 191 | exc_list = [] | ||
647 | 192 | |||
648 | 193 | try: | ||
649 | 194 | inet_num = getattr(netifaces, inet_type) | ||
650 | 195 | except AttributeError: | ||
651 | 196 | raise Exception("Unknown inet type '%s'" % str(inet_type)) | ||
652 | 197 | |||
653 | 198 | interfaces = netifaces.interfaces() | ||
654 | 199 | if inc_aliases: | ||
655 | 200 | ifaces = [] | ||
656 | 201 | for _iface in interfaces: | ||
657 | 202 | if iface == _iface or _iface.split(':')[0] == iface: | ||
658 | 203 | ifaces.append(_iface) | ||
659 | 204 | |||
660 | 205 | if fatal and not ifaces: | ||
661 | 206 | raise Exception("Invalid interface '%s'" % iface) | ||
662 | 207 | |||
663 | 208 | ifaces.sort() | ||
664 | 209 | else: | ||
665 | 210 | if iface not in interfaces: | ||
666 | 211 | if fatal: | ||
667 | 212 | raise Exception("Interface '%s' not found " % (iface)) | ||
668 | 213 | else: | ||
669 | 214 | return [] | ||
670 | 215 | |||
671 | 216 | else: | ||
672 | 217 | ifaces = [iface] | ||
673 | 218 | |||
674 | 219 | addresses = [] | ||
675 | 220 | for netiface in ifaces: | ||
676 | 221 | net_info = netifaces.ifaddresses(netiface) | ||
677 | 222 | if inet_num in net_info: | ||
678 | 223 | for entry in net_info[inet_num]: | ||
679 | 224 | if 'addr' in entry and entry['addr'] not in exc_list: | ||
680 | 225 | addresses.append(entry['addr']) | ||
681 | 226 | |||
682 | 227 | if fatal and not addresses: | ||
683 | 228 | raise Exception("Interface '%s' doesn't have any %s addresses." % | ||
684 | 229 | (iface, inet_type)) | ||
685 | 230 | |||
686 | 231 | return sorted(addresses) | ||
687 | 232 | |||
688 | 233 | |||
689 | 234 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') | ||
690 | 235 | |||
691 | 236 | |||
692 | 237 | def get_iface_from_addr(addr): | ||
693 | 238 | """Work out on which interface the provided address is configured.""" | ||
694 | 239 | for iface in netifaces.interfaces(): | ||
695 | 240 | addresses = netifaces.ifaddresses(iface) | ||
696 | 241 | for inet_type in addresses: | ||
697 | 242 | for _addr in addresses[inet_type]: | ||
698 | 243 | _addr = _addr['addr'] | ||
699 | 244 | # link local | ||
700 | 245 | ll_key = re.compile("(.+)%.*") | ||
701 | 246 | raw = re.match(ll_key, _addr) | ||
702 | 247 | if raw: | ||
703 | 248 | _addr = raw.group(1) | ||
704 | 249 | |||
705 | 250 | if _addr == addr: | ||
706 | 251 | log("Address '%s' is configured on iface '%s'" % | ||
707 | 252 | (addr, iface)) | ||
708 | 253 | return iface | ||
709 | 254 | |||
710 | 255 | msg = "Unable to infer net iface on which '%s' is configured" % (addr) | ||
711 | 256 | raise Exception(msg) | ||
712 | 257 | |||
713 | 258 | |||
714 | 259 | def sniff_iface(f): | ||
715 | 260 | """Ensure decorated function is called with a value for iface. | ||
716 | 261 | |||
717 | 262 | If no iface provided, inject net iface inferred from unit private address. | ||
718 | 263 | """ | ||
719 | 264 | def iface_sniffer(*args, **kwargs): | ||
720 | 265 | if not kwargs.get('iface', None): | ||
721 | 266 | kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) | ||
722 | 267 | |||
723 | 268 | return f(*args, **kwargs) | ||
724 | 269 | |||
725 | 270 | return iface_sniffer | ||
726 | 271 | |||
727 | 272 | |||
728 | 273 | @sniff_iface | ||
729 | 274 | def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, | ||
730 | 275 | dynamic_only=True): | ||
731 | 276 | """Get assigned IPv6 address for a given interface. | ||
732 | 277 | |||
733 | 278 | Returns list of addresses found. If no address found, returns empty list. | ||
734 | 279 | |||
735 | 280 | If iface is None, we infer the current primary interface by doing a reverse | ||
736 | 281 | lookup on the unit private-address. | ||
737 | 282 | |||
738 | 283 | We currently only support scope global IPv6 addresses i.e. non-temporary | ||
739 | 284 | addresses. If no global IPv6 address is found, return the first one found | ||
740 | 285 | in the ipv6 address list. | ||
741 | 286 | """ | ||
742 | 287 | addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', | ||
743 | 288 | inc_aliases=inc_aliases, fatal=fatal, | ||
744 | 289 | exc_list=exc_list) | ||
745 | 290 | |||
746 | 291 | if addresses: | ||
747 | 292 | global_addrs = [] | ||
748 | 293 | for addr in addresses: | ||
749 | 294 | key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") | ||
750 | 295 | m = re.match(key_scope_link_local, addr) | ||
751 | 296 | if m: | ||
752 | 297 | eui_64_mac = m.group(1) | ||
753 | 298 | iface = m.group(2) | ||
754 | 299 | else: | ||
755 | 300 | global_addrs.append(addr) | ||
756 | 301 | |||
757 | 302 | if global_addrs: | ||
758 | 303 | # Make sure any found global addresses are not temporary | ||
759 | 304 | cmd = ['ip', 'addr', 'show', iface] | ||
760 | 305 | out = subprocess.check_output(cmd).decode('UTF-8') | ||
761 | 306 | if dynamic_only: | ||
762 | 307 | key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") | ||
763 | 308 | else: | ||
764 | 309 | key = re.compile("inet6 (.+)/[0-9]+ scope global.*") | ||
765 | 310 | |||
766 | 311 | addrs = [] | ||
767 | 312 | for line in out.split('\n'): | ||
768 | 313 | line = line.strip() | ||
769 | 314 | m = re.match(key, line) | ||
770 | 315 | if m and 'temporary' not in line: | ||
771 | 316 | # Return the first valid address we find | ||
772 | 317 | for addr in global_addrs: | ||
773 | 318 | if m.group(1) == addr: | ||
774 | 319 | if not dynamic_only or \ | ||
775 | 320 | m.group(1).endswith(eui_64_mac): | ||
776 | 321 | addrs.append(addr) | ||
777 | 322 | |||
778 | 323 | if addrs: | ||
779 | 324 | return addrs | ||
780 | 325 | |||
781 | 326 | if fatal: | ||
782 | 327 | raise Exception("Interface '%s' does not have a scope global " | ||
783 | 328 | "non-temporary ipv6 address." % iface) | ||
784 | 329 | |||
785 | 330 | return [] | ||
786 | 331 | |||
787 | 332 | |||
788 | 333 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): | ||
789 | 334 | """Return a list of bridges on the system.""" | ||
790 | 335 | b_regex = "%s/*/bridge" % vnic_dir | ||
791 | 336 | return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] | ||
792 | 337 | |||
793 | 338 | |||
794 | 339 | def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): | ||
795 | 340 | """Return a list of nics comprising a given bridge on the system.""" | ||
796 | 341 | brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) | ||
797 | 342 | return [x.split('/')[-1] for x in glob.glob(brif_regex)] | ||
798 | 343 | |||
799 | 344 | |||
800 | 345 | def is_bridge_member(nic): | ||
801 | 346 | """Check if a given nic is a member of a bridge.""" | ||
802 | 347 | for bridge in get_bridges(): | ||
803 | 348 | if nic in get_bridge_nics(bridge): | ||
804 | 349 | return True | ||
805 | 350 | |||
806 | 351 | return False | ||
807 | 0 | 352 | ||
808 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
809 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
810 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
811 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
812 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-01-15 16:18:44 +0000 | |||
813 | @@ -0,0 +1,92 @@ | |||
814 | 1 | import six | ||
815 | 2 | from charmhelpers.contrib.amulet.deployment import ( | ||
816 | 3 | AmuletDeployment | ||
817 | 4 | ) | ||
818 | 5 | |||
819 | 6 | |||
820 | 7 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
821 | 8 | """OpenStack amulet deployment. | ||
822 | 9 | |||
823 | 10 | This class inherits from AmuletDeployment and has additional support | ||
824 | 11 | that is specifically for use by OpenStack charms. | ||
825 | 12 | """ | ||
826 | 13 | |||
827 | 14 | def __init__(self, series=None, openstack=None, source=None, stable=True): | ||
828 | 15 | """Initialize the deployment environment.""" | ||
829 | 16 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
830 | 17 | self.openstack = openstack | ||
831 | 18 | self.source = source | ||
832 | 19 | self.stable = stable | ||
833 | 20 | # Note(coreycb): this needs to be changed when new next branches come | ||
834 | 21 | # out. | ||
835 | 22 | self.current_next = "trusty" | ||
836 | 23 | |||
837 | 24 | def _determine_branch_locations(self, other_services): | ||
838 | 25 | """Determine the branch locations for the other services. | ||
839 | 26 | |||
840 | 27 | Determine if the local branch being tested is derived from its | ||
841 | 28 | stable or next (dev) branch, and based on this, use the corresonding | ||
842 | 29 | stable or next branches for the other_services.""" | ||
843 | 30 | base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] | ||
844 | 31 | |||
845 | 32 | if self.stable: | ||
846 | 33 | for svc in other_services: | ||
847 | 34 | temp = 'lp:charms/{}' | ||
848 | 35 | svc['location'] = temp.format(svc['name']) | ||
849 | 36 | else: | ||
850 | 37 | for svc in other_services: | ||
851 | 38 | if svc['name'] in base_charms: | ||
852 | 39 | temp = 'lp:charms/{}' | ||
853 | 40 | svc['location'] = temp.format(svc['name']) | ||
854 | 41 | else: | ||
855 | 42 | temp = 'lp:~openstack-charmers/charms/{}/{}/next' | ||
856 | 43 | svc['location'] = temp.format(self.current_next, | ||
857 | 44 | svc['name']) | ||
858 | 45 | return other_services | ||
859 | 46 | |||
860 | 47 | def _add_services(self, this_service, other_services): | ||
861 | 48 | """Add services to the deployment and set openstack-origin/source.""" | ||
862 | 49 | other_services = self._determine_branch_locations(other_services) | ||
863 | 50 | |||
864 | 51 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
865 | 52 | other_services) | ||
866 | 53 | |||
867 | 54 | services = other_services | ||
868 | 55 | services.append(this_service) | ||
869 | 56 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | ||
870 | 57 | 'ceph-osd', 'ceph-radosgw'] | ||
871 | 58 | |||
872 | 59 | if self.openstack: | ||
873 | 60 | for svc in services: | ||
874 | 61 | if svc['name'] not in use_source: | ||
875 | 62 | config = {'openstack-origin': self.openstack} | ||
876 | 63 | self.d.configure(svc['name'], config) | ||
877 | 64 | |||
878 | 65 | if self.source: | ||
879 | 66 | for svc in services: | ||
880 | 67 | if svc['name'] in use_source: | ||
881 | 68 | config = {'source': self.source} | ||
882 | 69 | self.d.configure(svc['name'], config) | ||
883 | 70 | |||
884 | 71 | def _configure_services(self, configs): | ||
885 | 72 | """Configure all of the services.""" | ||
886 | 73 | for service, config in six.iteritems(configs): | ||
887 | 74 | self.d.configure(service, config) | ||
888 | 75 | |||
889 | 76 | def _get_openstack_release(self): | ||
890 | 77 | """Get openstack release. | ||
891 | 78 | |||
892 | 79 | Return an integer representing the enum value of the openstack | ||
893 | 80 | release. | ||
894 | 81 | """ | ||
895 | 82 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | ||
896 | 83 | self.precise_havana, self.precise_icehouse, | ||
897 | 84 | self.trusty_icehouse) = range(6) | ||
898 | 85 | releases = { | ||
899 | 86 | ('precise', None): self.precise_essex, | ||
900 | 87 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
901 | 88 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
902 | 89 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
903 | 90 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
904 | 91 | ('trusty', None): self.trusty_icehouse} | ||
905 | 92 | return releases[(self.series, self.openstack)] | ||
906 | 0 | 93 | ||
907 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
908 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
909 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-01-15 16:18:44 +0000 | |||
910 | @@ -0,0 +1,278 @@ | |||
911 | 1 | import logging | ||
912 | 2 | import os | ||
913 | 3 | import time | ||
914 | 4 | import urllib | ||
915 | 5 | |||
916 | 6 | import glanceclient.v1.client as glance_client | ||
917 | 7 | import keystoneclient.v2_0 as keystone_client | ||
918 | 8 | import novaclient.v1_1.client as nova_client | ||
919 | 9 | |||
920 | 10 | import six | ||
921 | 11 | |||
922 | 12 | from charmhelpers.contrib.amulet.utils import ( | ||
923 | 13 | AmuletUtils | ||
924 | 14 | ) | ||
925 | 15 | |||
926 | 16 | DEBUG = logging.DEBUG | ||
927 | 17 | ERROR = logging.ERROR | ||
928 | 18 | |||
929 | 19 | |||
930 | 20 | class OpenStackAmuletUtils(AmuletUtils): | ||
931 | 21 | """OpenStack amulet utilities. | ||
932 | 22 | |||
933 | 23 | This class inherits from AmuletUtils and has additional support | ||
934 | 24 | that is specifically for use by OpenStack charms. | ||
935 | 25 | """ | ||
936 | 26 | |||
937 | 27 | def __init__(self, log_level=ERROR): | ||
938 | 28 | """Initialize the deployment environment.""" | ||
939 | 29 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
940 | 30 | |||
941 | 31 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
942 | 32 | public_port, expected): | ||
943 | 33 | """Validate endpoint data. | ||
944 | 34 | |||
945 | 35 | Validate actual endpoint data vs expected endpoint data. The ports | ||
946 | 36 | are used to find the matching endpoint. | ||
947 | 37 | """ | ||
948 | 38 | found = False | ||
949 | 39 | for ep in endpoints: | ||
950 | 40 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
951 | 41 | if (admin_port in ep.adminurl and | ||
952 | 42 | internal_port in ep.internalurl and | ||
953 | 43 | public_port in ep.publicurl): | ||
954 | 44 | found = True | ||
955 | 45 | actual = {'id': ep.id, | ||
956 | 46 | 'region': ep.region, | ||
957 | 47 | 'adminurl': ep.adminurl, | ||
958 | 48 | 'internalurl': ep.internalurl, | ||
959 | 49 | 'publicurl': ep.publicurl, | ||
960 | 50 | 'service_id': ep.service_id} | ||
961 | 51 | ret = self._validate_dict_data(expected, actual) | ||
962 | 52 | if ret: | ||
963 | 53 | return 'unexpected endpoint data - {}'.format(ret) | ||
964 | 54 | |||
965 | 55 | if not found: | ||
966 | 56 | return 'endpoint not found' | ||
967 | 57 | |||
968 | 58 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
969 | 59 | """Validate service catalog endpoint data. | ||
970 | 60 | |||
971 | 61 | Validate a list of actual service catalog endpoints vs a list of | ||
972 | 62 | expected service catalog endpoints. | ||
973 | 63 | """ | ||
974 | 64 | self.log.debug('actual: {}'.format(repr(actual))) | ||
975 | 65 | for k, v in six.iteritems(expected): | ||
976 | 66 | if k in actual: | ||
977 | 67 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
978 | 68 | if ret: | ||
979 | 69 | return self.endpoint_error(k, ret) | ||
980 | 70 | else: | ||
981 | 71 | return "endpoint {} does not exist".format(k) | ||
982 | 72 | return ret | ||
983 | 73 | |||
984 | 74 | def validate_tenant_data(self, expected, actual): | ||
985 | 75 | """Validate tenant data. | ||
986 | 76 | |||
987 | 77 | Validate a list of actual tenant data vs list of expected tenant | ||
988 | 78 | data. | ||
989 | 79 | """ | ||
990 | 80 | self.log.debug('actual: {}'.format(repr(actual))) | ||
991 | 81 | for e in expected: | ||
992 | 82 | found = False | ||
993 | 83 | for act in actual: | ||
994 | 84 | a = {'enabled': act.enabled, 'description': act.description, | ||
995 | 85 | 'name': act.name, 'id': act.id} | ||
996 | 86 | if e['name'] == a['name']: | ||
997 | 87 | found = True | ||
998 | 88 | ret = self._validate_dict_data(e, a) | ||
999 | 89 | if ret: | ||
1000 | 90 | return "unexpected tenant data - {}".format(ret) | ||
1001 | 91 | if not found: | ||
1002 | 92 | return "tenant {} does not exist".format(e['name']) | ||
1003 | 93 | return ret | ||
1004 | 94 | |||
1005 | 95 | def validate_role_data(self, expected, actual): | ||
1006 | 96 | """Validate role data. | ||
1007 | 97 | |||
1008 | 98 | Validate a list of actual role data vs a list of expected role | ||
1009 | 99 | data. | ||
1010 | 100 | """ | ||
1011 | 101 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1012 | 102 | for e in expected: | ||
1013 | 103 | found = False | ||
1014 | 104 | for act in actual: | ||
1015 | 105 | a = {'name': act.name, 'id': act.id} | ||
1016 | 106 | if e['name'] == a['name']: | ||
1017 | 107 | found = True | ||
1018 | 108 | ret = self._validate_dict_data(e, a) | ||
1019 | 109 | if ret: | ||
1020 | 110 | return "unexpected role data - {}".format(ret) | ||
1021 | 111 | if not found: | ||
1022 | 112 | return "role {} does not exist".format(e['name']) | ||
1023 | 113 | return ret | ||
1024 | 114 | |||
1025 | 115 | def validate_user_data(self, expected, actual): | ||
1026 | 116 | """Validate user data. | ||
1027 | 117 | |||
1028 | 118 | Validate a list of actual user data vs a list of expected user | ||
1029 | 119 | data. | ||
1030 | 120 | """ | ||
1031 | 121 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1032 | 122 | for e in expected: | ||
1033 | 123 | found = False | ||
1034 | 124 | for act in actual: | ||
1035 | 125 | a = {'enabled': act.enabled, 'name': act.name, | ||
1036 | 126 | 'email': act.email, 'tenantId': act.tenantId, | ||
1037 | 127 | 'id': act.id} | ||
1038 | 128 | if e['name'] == a['name']: | ||
1039 | 129 | found = True | ||
1040 | 130 | ret = self._validate_dict_data(e, a) | ||
1041 | 131 | if ret: | ||
1042 | 132 | return "unexpected user data - {}".format(ret) | ||
1043 | 133 | if not found: | ||
1044 | 134 | return "user {} does not exist".format(e['name']) | ||
1045 | 135 | return ret | ||
1046 | 136 | |||
1047 | 137 | def validate_flavor_data(self, expected, actual): | ||
1048 | 138 | """Validate flavor data. | ||
1049 | 139 | |||
1050 | 140 | Validate a list of actual flavors vs a list of expected flavors. | ||
1051 | 141 | """ | ||
1052 | 142 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1053 | 143 | act = [a.name for a in actual] | ||
1054 | 144 | return self._validate_list_data(expected, act) | ||
1055 | 145 | |||
1056 | 146 | def tenant_exists(self, keystone, tenant): | ||
1057 | 147 | """Return True if tenant exists.""" | ||
1058 | 148 | return tenant in [t.name for t in keystone.tenants.list()] | ||
1059 | 149 | |||
1060 | 150 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
1061 | 151 | tenant): | ||
1062 | 152 | """Authenticates admin user with the keystone admin endpoint.""" | ||
1063 | 153 | unit = keystone_sentry | ||
1064 | 154 | service_ip = unit.relation('shared-db', | ||
1065 | 155 | 'mysql:shared-db')['private-address'] | ||
1066 | 156 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
1067 | 157 | return keystone_client.Client(username=user, password=password, | ||
1068 | 158 | tenant_name=tenant, auth_url=ep) | ||
1069 | 159 | |||
1070 | 160 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
1071 | 161 | """Authenticates a regular user with the keystone public endpoint.""" | ||
1072 | 162 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1073 | 163 | endpoint_type='publicURL') | ||
1074 | 164 | return keystone_client.Client(username=user, password=password, | ||
1075 | 165 | tenant_name=tenant, auth_url=ep) | ||
1076 | 166 | |||
1077 | 167 | def authenticate_glance_admin(self, keystone): | ||
1078 | 168 | """Authenticates admin user with glance.""" | ||
1079 | 169 | ep = keystone.service_catalog.url_for(service_type='image', | ||
1080 | 170 | endpoint_type='adminURL') | ||
1081 | 171 | return glance_client.Client(ep, token=keystone.auth_token) | ||
1082 | 172 | |||
1083 | 173 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
1084 | 174 | """Authenticates a regular user with nova-api.""" | ||
1085 | 175 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1086 | 176 | endpoint_type='publicURL') | ||
1087 | 177 | return nova_client.Client(username=user, api_key=password, | ||
1088 | 178 | project_id=tenant, auth_url=ep) | ||
1089 | 179 | |||
1090 | 180 | def create_cirros_image(self, glance, image_name): | ||
1091 | 181 | """Download the latest cirros image and upload it to glance.""" | ||
1092 | 182 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
1093 | 183 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
1094 | 184 | if http_proxy: | ||
1095 | 185 | proxies = {'http': http_proxy} | ||
1096 | 186 | opener = urllib.FancyURLopener(proxies) | ||
1097 | 187 | else: | ||
1098 | 188 | opener = urllib.FancyURLopener() | ||
1099 | 189 | |||
1100 | 190 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
1101 | 191 | version = f.read().strip() | ||
1102 | 192 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) | ||
1103 | 193 | local_path = os.path.join('tests', cirros_img) | ||
1104 | 194 | |||
1105 | 195 | if not os.path.exists(local_path): | ||
1106 | 196 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
1107 | 197 | version, cirros_img) | ||
1108 | 198 | opener.retrieve(cirros_url, local_path) | ||
1109 | 199 | f.close() | ||
1110 | 200 | |||
1111 | 201 | with open(local_path) as f: | ||
1112 | 202 | image = glance.images.create(name=image_name, is_public=True, | ||
1113 | 203 | disk_format='qcow2', | ||
1114 | 204 | container_format='bare', data=f) | ||
1115 | 205 | count = 1 | ||
1116 | 206 | status = image.status | ||
1117 | 207 | while status != 'active' and count < 10: | ||
1118 | 208 | time.sleep(3) | ||
1119 | 209 | image = glance.images.get(image.id) | ||
1120 | 210 | status = image.status | ||
1121 | 211 | self.log.debug('image status: {}'.format(status)) | ||
1122 | 212 | count += 1 | ||
1123 | 213 | |||
1124 | 214 | if status != 'active': | ||
1125 | 215 | self.log.error('image creation timed out') | ||
1126 | 216 | return None | ||
1127 | 217 | |||
1128 | 218 | return image | ||
1129 | 219 | |||
1130 | 220 | def delete_image(self, glance, image): | ||
1131 | 221 | """Delete the specified image.""" | ||
1132 | 222 | num_before = len(list(glance.images.list())) | ||
1133 | 223 | glance.images.delete(image) | ||
1134 | 224 | |||
1135 | 225 | count = 1 | ||
1136 | 226 | num_after = len(list(glance.images.list())) | ||
1137 | 227 | while num_after != (num_before - 1) and count < 10: | ||
1138 | 228 | time.sleep(3) | ||
1139 | 229 | num_after = len(list(glance.images.list())) | ||
1140 | 230 | self.log.debug('number of images: {}'.format(num_after)) | ||
1141 | 231 | count += 1 | ||
1142 | 232 | |||
1143 | 233 | if num_after != (num_before - 1): | ||
1144 | 234 | self.log.error('image deletion timed out') | ||
1145 | 235 | return False | ||
1146 | 236 | |||
1147 | 237 | return True | ||
1148 | 238 | |||
1149 | 239 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
1150 | 240 | """Create the specified instance.""" | ||
1151 | 241 | image = nova.images.find(name=image_name) | ||
1152 | 242 | flavor = nova.flavors.find(name=flavor) | ||
1153 | 243 | instance = nova.servers.create(name=instance_name, image=image, | ||
1154 | 244 | flavor=flavor) | ||
1155 | 245 | |||
1156 | 246 | count = 1 | ||
1157 | 247 | status = instance.status | ||
1158 | 248 | while status != 'ACTIVE' and count < 60: | ||
1159 | 249 | time.sleep(3) | ||
1160 | 250 | instance = nova.servers.get(instance.id) | ||
1161 | 251 | status = instance.status | ||
1162 | 252 | self.log.debug('instance status: {}'.format(status)) | ||
1163 | 253 | count += 1 | ||
1164 | 254 | |||
1165 | 255 | if status != 'ACTIVE': | ||
1166 | 256 | self.log.error('instance creation timed out') | ||
1167 | 257 | return None | ||
1168 | 258 | |||
1169 | 259 | return instance | ||
1170 | 260 | |||
1171 | 261 | def delete_instance(self, nova, instance): | ||
1172 | 262 | """Delete the specified instance.""" | ||
1173 | 263 | num_before = len(list(nova.servers.list())) | ||
1174 | 264 | nova.servers.delete(instance) | ||
1175 | 265 | |||
1176 | 266 | count = 1 | ||
1177 | 267 | num_after = len(list(nova.servers.list())) | ||
1178 | 268 | while num_after != (num_before - 1) and count < 10: | ||
1179 | 269 | time.sleep(3) | ||
1180 | 270 | num_after = len(list(nova.servers.list())) | ||
1181 | 271 | self.log.debug('number of instances: {}'.format(num_after)) | ||
1182 | 272 | count += 1 | ||
1183 | 273 | |||
1184 | 274 | if num_after != (num_before - 1): | ||
1185 | 275 | self.log.error('instance deletion timed out') | ||
1186 | 276 | return False | ||
1187 | 277 | |||
1188 | 278 | return True | ||
1189 | 0 | 279 | ||
1190 | === added file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
1191 | --- hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000 | |||
1192 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-01-15 16:18:44 +0000 | |||
1193 | @@ -0,0 +1,1038 @@ | |||
1194 | 1 | import json | ||
1195 | 2 | import os | ||
1196 | 3 | import time | ||
1197 | 4 | from base64 import b64decode | ||
1198 | 5 | from subprocess import check_call | ||
1199 | 6 | |||
1200 | 7 | import six | ||
1201 | 8 | |||
1202 | 9 | from charmhelpers.fetch import ( | ||
1203 | 10 | apt_install, | ||
1204 | 11 | filter_installed_packages, | ||
1205 | 12 | ) | ||
1206 | 13 | from charmhelpers.core.hookenv import ( | ||
1207 | 14 | config, | ||
1208 | 15 | is_relation_made, | ||
1209 | 16 | local_unit, | ||
1210 | 17 | log, | ||
1211 | 18 | relation_get, | ||
1212 | 19 | relation_ids, | ||
1213 | 20 | related_units, | ||
1214 | 21 | relation_set, | ||
1215 | 22 | unit_get, | ||
1216 | 23 | unit_private_ip, | ||
1217 | 24 | charm_name, | ||
1218 | 25 | DEBUG, | ||
1219 | 26 | INFO, | ||
1220 | 27 | WARNING, | ||
1221 | 28 | ERROR, | ||
1222 | 29 | ) | ||
1223 | 30 | |||
1224 | 31 | from charmhelpers.core.sysctl import create as sysctl_create | ||
1225 | 32 | |||
1226 | 33 | from charmhelpers.core.host import ( | ||
1227 | 34 | mkdir, | ||
1228 | 35 | write_file, | ||
1229 | 36 | ) | ||
1230 | 37 | from charmhelpers.contrib.hahelpers.cluster import ( | ||
1231 | 38 | determine_apache_port, | ||
1232 | 39 | determine_api_port, | ||
1233 | 40 | https, | ||
1234 | 41 | is_clustered, | ||
1235 | 42 | ) | ||
1236 | 43 | from charmhelpers.contrib.hahelpers.apache import ( | ||
1237 | 44 | get_cert, | ||
1238 | 45 | get_ca_cert, | ||
1239 | 46 | install_ca_cert, | ||
1240 | 47 | ) | ||
1241 | 48 | from charmhelpers.contrib.openstack.neutron import ( | ||
1242 | 49 | neutron_plugin_attribute, | ||
1243 | 50 | ) | ||
1244 | 51 | from charmhelpers.contrib.network.ip import ( | ||
1245 | 52 | get_address_in_network, | ||
1246 | 53 | get_ipv6_addr, | ||
1247 | 54 | get_netmask_for_address, | ||
1248 | 55 | format_ipv6_addr, | ||
1249 | 56 | is_address_in_network, | ||
1250 | 57 | ) | ||
1251 | 58 | from charmhelpers.contrib.openstack.utils import get_host_ip | ||
1252 | 59 | |||
1253 | 60 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | ||
1254 | 61 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | ||
1255 | 62 | |||
1256 | 63 | |||
1257 | 64 | class OSContextError(Exception): | ||
1258 | 65 | pass | ||
1259 | 66 | |||
1260 | 67 | |||
1261 | 68 | def ensure_packages(packages): | ||
1262 | 69 | """Install but do not upgrade required plugin packages.""" | ||
1263 | 70 | required = filter_installed_packages(packages) | ||
1264 | 71 | if required: | ||
1265 | 72 | apt_install(required, fatal=True) | ||
1266 | 73 | |||
1267 | 74 | |||
1268 | 75 | def context_complete(ctxt): | ||
1269 | 76 | _missing = [] | ||
1270 | 77 | for k, v in six.iteritems(ctxt): | ||
1271 | 78 | if v is None or v == '': | ||
1272 | 79 | _missing.append(k) | ||
1273 | 80 | |||
1274 | 81 | if _missing: | ||
1275 | 82 | log('Missing required data: %s' % ' '.join(_missing), level=INFO) | ||
1276 | 83 | return False | ||
1277 | 84 | |||
1278 | 85 | return True | ||
1279 | 86 | |||
1280 | 87 | |||
1281 | 88 | def config_flags_parser(config_flags): | ||
1282 | 89 | """Parses config flags string into dict. | ||
1283 | 90 | |||
1284 | 91 | The provided config_flags string may be a list of comma-separated values | ||
1285 | 92 | which themselves may be comma-separated list of values. | ||
1286 | 93 | """ | ||
1287 | 94 | if config_flags.find('==') >= 0: | ||
1288 | 95 | log("config_flags is not in expected format (key=value)", level=ERROR) | ||
1289 | 96 | raise OSContextError | ||
1290 | 97 | |||
1291 | 98 | # strip the following from each value. | ||
1292 | 99 | post_strippers = ' ,' | ||
1293 | 100 | # we strip any leading/trailing '=' or ' ' from the string then | ||
1294 | 101 | # split on '='. | ||
1295 | 102 | split = config_flags.strip(' =').split('=') | ||
1296 | 103 | limit = len(split) | ||
1297 | 104 | flags = {} | ||
1298 | 105 | for i in range(0, limit - 1): | ||
1299 | 106 | current = split[i] | ||
1300 | 107 | next = split[i + 1] | ||
1301 | 108 | vindex = next.rfind(',') | ||
1302 | 109 | if (i == limit - 2) or (vindex < 0): | ||
1303 | 110 | value = next | ||
1304 | 111 | else: | ||
1305 | 112 | value = next[:vindex] | ||
1306 | 113 | |||
1307 | 114 | if i == 0: | ||
1308 | 115 | key = current | ||
1309 | 116 | else: | ||
1310 | 117 | # if this not the first entry, expect an embedded key. | ||
1311 | 118 | index = current.rfind(',') | ||
1312 | 119 | if index < 0: | ||
1313 | 120 | log("Invalid config value(s) at index %s" % (i), level=ERROR) | ||
1314 | 121 | raise OSContextError | ||
1315 | 122 | key = current[index + 1:] | ||
1316 | 123 | |||
1317 | 124 | # Add to collection. | ||
1318 | 125 | flags[key.strip(post_strippers)] = value.rstrip(post_strippers) | ||
1319 | 126 | |||
1320 | 127 | return flags | ||
1321 | 128 | |||
1322 | 129 | |||
1323 | 130 | class OSContextGenerator(object): | ||
1324 | 131 | """Base class for all context generators.""" | ||
1325 | 132 | interfaces = [] | ||
1326 | 133 | |||
1327 | 134 | def __call__(self): | ||
1328 | 135 | raise NotImplementedError | ||
1329 | 136 | |||
1330 | 137 | |||
1331 | 138 | class SharedDBContext(OSContextGenerator): | ||
1332 | 139 | interfaces = ['shared-db'] | ||
1333 | 140 | |||
1334 | 141 | def __init__(self, | ||
1335 | 142 | database=None, user=None, relation_prefix=None, ssl_dir=None): | ||
1336 | 143 | """Allows inspecting relation for settings prefixed with | ||
1337 | 144 | relation_prefix. This is useful for parsing access for multiple | ||
1338 | 145 | databases returned via the shared-db interface (eg, nova_password, | ||
1339 | 146 | quantum_password) | ||
1340 | 147 | """ | ||
1341 | 148 | self.relation_prefix = relation_prefix | ||
1342 | 149 | self.database = database | ||
1343 | 150 | self.user = user | ||
1344 | 151 | self.ssl_dir = ssl_dir | ||
1345 | 152 | |||
1346 | 153 | def __call__(self): | ||
1347 | 154 | self.database = self.database or config('database') | ||
1348 | 155 | self.user = self.user or config('database-user') | ||
1349 | 156 | if None in [self.database, self.user]: | ||
1350 | 157 | log("Could not generate shared_db context. Missing required charm " | ||
1351 | 158 | "config options. (database name and user)", level=ERROR) | ||
1352 | 159 | raise OSContextError | ||
1353 | 160 | |||
1354 | 161 | ctxt = {} | ||
1355 | 162 | |||
1356 | 163 | # NOTE(jamespage) if mysql charm provides a network upon which | ||
1357 | 164 | # access to the database should be made, reconfigure relation | ||
1358 | 165 | # with the service units local address and defer execution | ||
1359 | 166 | access_network = relation_get('access-network') | ||
1360 | 167 | if access_network is not None: | ||
1361 | 168 | if self.relation_prefix is not None: | ||
1362 | 169 | hostname_key = "{}_hostname".format(self.relation_prefix) | ||
1363 | 170 | else: | ||
1364 | 171 | hostname_key = "hostname" | ||
1365 | 172 | access_hostname = get_address_in_network(access_network, | ||
1366 | 173 | unit_get('private-address')) | ||
1367 | 174 | set_hostname = relation_get(attribute=hostname_key, | ||
1368 | 175 | unit=local_unit()) | ||
1369 | 176 | if set_hostname != access_hostname: | ||
1370 | 177 | relation_set(relation_settings={hostname_key: access_hostname}) | ||
1371 | 178 | return ctxt # Defer any further hook execution for now.... | ||
1372 | 179 | |||
1373 | 180 | password_setting = 'password' | ||
1374 | 181 | if self.relation_prefix: | ||
1375 | 182 | password_setting = self.relation_prefix + '_password' | ||
1376 | 183 | |||
1377 | 184 | for rid in relation_ids('shared-db'): | ||
1378 | 185 | for unit in related_units(rid): | ||
1379 | 186 | rdata = relation_get(rid=rid, unit=unit) | ||
1380 | 187 | host = rdata.get('db_host') | ||
1381 | 188 | host = format_ipv6_addr(host) or host | ||
1382 | 189 | ctxt = { | ||
1383 | 190 | 'database_host': host, | ||
1384 | 191 | 'database': self.database, | ||
1385 | 192 | 'database_user': self.user, | ||
1386 | 193 | 'database_password': rdata.get(password_setting), | ||
1387 | 194 | 'database_type': 'mysql' | ||
1388 | 195 | } | ||
1389 | 196 | if context_complete(ctxt): | ||
1390 | 197 | db_ssl(rdata, ctxt, self.ssl_dir) | ||
1391 | 198 | return ctxt | ||
1392 | 199 | return {} | ||
1393 | 200 | |||
1394 | 201 | |||
1395 | 202 | class PostgresqlDBContext(OSContextGenerator): | ||
1396 | 203 | interfaces = ['pgsql-db'] | ||
1397 | 204 | |||
1398 | 205 | def __init__(self, database=None): | ||
1399 | 206 | self.database = database | ||
1400 | 207 | |||
1401 | 208 | def __call__(self): | ||
1402 | 209 | self.database = self.database or config('database') | ||
1403 | 210 | if self.database is None: | ||
1404 | 211 | log('Could not generate postgresql_db context. Missing required ' | ||
1405 | 212 | 'charm config options. (database name)', level=ERROR) | ||
1406 | 213 | raise OSContextError | ||
1407 | 214 | |||
1408 | 215 | ctxt = {} | ||
1409 | 216 | for rid in relation_ids(self.interfaces[0]): | ||
1410 | 217 | for unit in related_units(rid): | ||
1411 | 218 | rel_host = relation_get('host', rid=rid, unit=unit) | ||
1412 | 219 | rel_user = relation_get('user', rid=rid, unit=unit) | ||
1413 | 220 | rel_passwd = relation_get('password', rid=rid, unit=unit) | ||
1414 | 221 | ctxt = {'database_host': rel_host, | ||
1415 | 222 | 'database': self.database, | ||
1416 | 223 | 'database_user': rel_user, | ||
1417 | 224 | 'database_password': rel_passwd, | ||
1418 | 225 | 'database_type': 'postgresql'} | ||
1419 | 226 | if context_complete(ctxt): | ||
1420 | 227 | return ctxt | ||
1421 | 228 | |||
1422 | 229 | return {} | ||
1423 | 230 | |||
1424 | 231 | |||
1425 | 232 | def db_ssl(rdata, ctxt, ssl_dir): | ||
1426 | 233 | if 'ssl_ca' in rdata and ssl_dir: | ||
1427 | 234 | ca_path = os.path.join(ssl_dir, 'db-client.ca') | ||
1428 | 235 | with open(ca_path, 'w') as fh: | ||
1429 | 236 | fh.write(b64decode(rdata['ssl_ca'])) | ||
1430 | 237 | |||
1431 | 238 | ctxt['database_ssl_ca'] = ca_path | ||
1432 | 239 | elif 'ssl_ca' in rdata: | ||
1433 | 240 | log("Charm not setup for ssl support but ssl ca found", level=INFO) | ||
1434 | 241 | return ctxt | ||
1435 | 242 | |||
1436 | 243 | if 'ssl_cert' in rdata: | ||
1437 | 244 | cert_path = os.path.join( | ||
1438 | 245 | ssl_dir, 'db-client.cert') | ||
1439 | 246 | if not os.path.exists(cert_path): | ||
1440 | 247 | log("Waiting 1m for ssl client cert validity", level=INFO) | ||
1441 | 248 | time.sleep(60) | ||
1442 | 249 | |||
1443 | 250 | with open(cert_path, 'w') as fh: | ||
1444 | 251 | fh.write(b64decode(rdata['ssl_cert'])) | ||
1445 | 252 | |||
1446 | 253 | ctxt['database_ssl_cert'] = cert_path | ||
1447 | 254 | key_path = os.path.join(ssl_dir, 'db-client.key') | ||
1448 | 255 | with open(key_path, 'w') as fh: | ||
1449 | 256 | fh.write(b64decode(rdata['ssl_key'])) | ||
1450 | 257 | |||
1451 | 258 | ctxt['database_ssl_key'] = key_path | ||
1452 | 259 | |||
1453 | 260 | return ctxt | ||
1454 | 261 | |||
1455 | 262 | |||
1456 | 263 | class IdentityServiceContext(OSContextGenerator): | ||
1457 | 264 | interfaces = ['identity-service'] | ||
1458 | 265 | |||
1459 | 266 | def __call__(self): | ||
1460 | 267 | log('Generating template context for identity-service', level=DEBUG) | ||
1461 | 268 | ctxt = {} | ||
1462 | 269 | for rid in relation_ids('identity-service'): | ||
1463 | 270 | for unit in related_units(rid): | ||
1464 | 271 | rdata = relation_get(rid=rid, unit=unit) | ||
1465 | 272 | serv_host = rdata.get('service_host') | ||
1466 | 273 | serv_host = format_ipv6_addr(serv_host) or serv_host | ||
1467 | 274 | auth_host = rdata.get('auth_host') | ||
1468 | 275 | auth_host = format_ipv6_addr(auth_host) or auth_host | ||
1469 | 276 | svc_protocol = rdata.get('service_protocol') or 'http' | ||
1470 | 277 | auth_protocol = rdata.get('auth_protocol') or 'http' | ||
1471 | 278 | ctxt = {'service_port': rdata.get('service_port'), | ||
1472 | 279 | 'service_host': serv_host, | ||
1473 | 280 | 'auth_host': auth_host, | ||
1474 | 281 | 'auth_port': rdata.get('auth_port'), | ||
1475 | 282 | 'admin_tenant_name': rdata.get('service_tenant'), | ||
1476 | 283 | 'admin_user': rdata.get('service_username'), | ||
1477 | 284 | 'admin_password': rdata.get('service_password'), | ||
1478 | 285 | 'service_protocol': svc_protocol, | ||
1479 | 286 | 'auth_protocol': auth_protocol} | ||
1480 | 287 | if context_complete(ctxt): | ||
1481 | 288 | # NOTE(jamespage) this is required for >= icehouse | ||
1482 | 289 | # so a missing value just indicates keystone needs | ||
1483 | 290 | # upgrading | ||
1484 | 291 | ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') | ||
1485 | 292 | return ctxt | ||
1486 | 293 | |||
1487 | 294 | return {} | ||
1488 | 295 | |||
1489 | 296 | |||
1490 | 297 | class AMQPContext(OSContextGenerator): | ||
1491 | 298 | |||
1492 | 299 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): | ||
1493 | 300 | self.ssl_dir = ssl_dir | ||
1494 | 301 | self.rel_name = rel_name | ||
1495 | 302 | self.relation_prefix = relation_prefix | ||
1496 | 303 | self.interfaces = [rel_name] | ||
1497 | 304 | |||
1498 | 305 | def __call__(self): | ||
1499 | 306 | log('Generating template context for amqp', level=DEBUG) | ||
1500 | 307 | conf = config() | ||
1501 | 308 | if self.relation_prefix: | ||
1502 | 309 | user_setting = '%s-rabbit-user' % (self.relation_prefix) | ||
1503 | 310 | vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) | ||
1504 | 311 | else: | ||
1505 | 312 | user_setting = 'rabbit-user' | ||
1506 | 313 | vhost_setting = 'rabbit-vhost' | ||
1507 | 314 | |||
1508 | 315 | try: | ||
1509 | 316 | username = conf[user_setting] | ||
1510 | 317 | vhost = conf[vhost_setting] | ||
1511 | 318 | except KeyError as e: | ||
1512 | 319 | log('Could not generate shared_db context. Missing required charm ' | ||
1513 | 320 | 'config options: %s.' % e, level=ERROR) | ||
1514 | 321 | raise OSContextError | ||
1515 | 322 | |||
1516 | 323 | ctxt = {} | ||
1517 | 324 | for rid in relation_ids(self.rel_name): | ||
1518 | 325 | ha_vip_only = False | ||
1519 | 326 | for unit in related_units(rid): | ||
1520 | 327 | if relation_get('clustered', rid=rid, unit=unit): | ||
1521 | 328 | ctxt['clustered'] = True | ||
1522 | 329 | vip = relation_get('vip', rid=rid, unit=unit) | ||
1523 | 330 | vip = format_ipv6_addr(vip) or vip | ||
1524 | 331 | ctxt['rabbitmq_host'] = vip | ||
1525 | 332 | else: | ||
1526 | 333 | host = relation_get('private-address', rid=rid, unit=unit) | ||
1527 | 334 | host = format_ipv6_addr(host) or host | ||
1528 | 335 | ctxt['rabbitmq_host'] = host | ||
1529 | 336 | |||
1530 | 337 | ctxt.update({ | ||
1531 | 338 | 'rabbitmq_user': username, | ||
1532 | 339 | 'rabbitmq_password': relation_get('password', rid=rid, | ||
1533 | 340 | unit=unit), | ||
1534 | 341 | 'rabbitmq_virtual_host': vhost, | ||
1535 | 342 | }) | ||
1536 | 343 | |||
1537 | 344 | ssl_port = relation_get('ssl_port', rid=rid, unit=unit) | ||
1538 | 345 | if ssl_port: | ||
1539 | 346 | ctxt['rabbit_ssl_port'] = ssl_port | ||
1540 | 347 | |||
1541 | 348 | ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) | ||
1542 | 349 | if ssl_ca: | ||
1543 | 350 | ctxt['rabbit_ssl_ca'] = ssl_ca | ||
1544 | 351 | |||
1545 | 352 | if relation_get('ha_queues', rid=rid, unit=unit) is not None: | ||
1546 | 353 | ctxt['rabbitmq_ha_queues'] = True | ||
1547 | 354 | |||
1548 | 355 | ha_vip_only = relation_get('ha-vip-only', | ||
1549 | 356 | rid=rid, unit=unit) is not None | ||
1550 | 357 | |||
1551 | 358 | if context_complete(ctxt): | ||
1552 | 359 | if 'rabbit_ssl_ca' in ctxt: | ||
1553 | 360 | if not self.ssl_dir: | ||
1554 | 361 | log("Charm not setup for ssl support but ssl ca " | ||
1555 | 362 | "found", level=INFO) | ||
1556 | 363 | break | ||
1557 | 364 | |||
1558 | 365 | ca_path = os.path.join( | ||
1559 | 366 | self.ssl_dir, 'rabbit-client-ca.pem') | ||
1560 | 367 | with open(ca_path, 'w') as fh: | ||
1561 | 368 | fh.write(b64decode(ctxt['rabbit_ssl_ca'])) | ||
1562 | 369 | ctxt['rabbit_ssl_ca'] = ca_path | ||
1563 | 370 | |||
1564 | 371 | # Sufficient information found = break out! | ||
1565 | 372 | break | ||
1566 | 373 | |||
1567 | 374 | # Used for active/active rabbitmq >= grizzly | ||
1568 | 375 | if (('clustered' not in ctxt or ha_vip_only) and | ||
1569 | 376 | len(related_units(rid)) > 1): | ||
1570 | 377 | rabbitmq_hosts = [] | ||
1571 | 378 | for unit in related_units(rid): | ||
1572 | 379 | host = relation_get('private-address', rid=rid, unit=unit) | ||
1573 | 380 | host = format_ipv6_addr(host) or host | ||
1574 | 381 | rabbitmq_hosts.append(host) | ||
1575 | 382 | |||
1576 | 383 | ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) | ||
1577 | 384 | |||
1578 | 385 | if not context_complete(ctxt): | ||
1579 | 386 | return {} | ||
1580 | 387 | |||
1581 | 388 | return ctxt | ||
1582 | 389 | |||
1583 | 390 | |||
1584 | 391 | class CephContext(OSContextGenerator): | ||
1585 | 392 | """Generates context for /etc/ceph/ceph.conf templates.""" | ||
1586 | 393 | interfaces = ['ceph'] | ||
1587 | 394 | |||
1588 | 395 | def __call__(self): | ||
1589 | 396 | if not relation_ids('ceph'): | ||
1590 | 397 | return {} | ||
1591 | 398 | |||
1592 | 399 | log('Generating template context for ceph', level=DEBUG) | ||
1593 | 400 | mon_hosts = [] | ||
1594 | 401 | auth = None | ||
1595 | 402 | key = None | ||
1596 | 403 | use_syslog = str(config('use-syslog')).lower() | ||
1597 | 404 | for rid in relation_ids('ceph'): | ||
1598 | 405 | for unit in related_units(rid): | ||
1599 | 406 | auth = relation_get('auth', rid=rid, unit=unit) | ||
1600 | 407 | key = relation_get('key', rid=rid, unit=unit) | ||
1601 | 408 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, | ||
1602 | 409 | unit=unit) | ||
1603 | 410 | unit_priv_addr = relation_get('private-address', rid=rid, | ||
1604 | 411 | unit=unit) | ||
1605 | 412 | ceph_addr = ceph_pub_addr or unit_priv_addr | ||
1606 | 413 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | ||
1607 | 414 | mon_hosts.append(ceph_addr) | ||
1608 | 415 | |||
1609 | 416 | ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), | ||
1610 | 417 | 'auth': auth, | ||
1611 | 418 | 'key': key, | ||
1612 | 419 | 'use_syslog': use_syslog} | ||
1613 | 420 | |||
1614 | 421 | if not os.path.isdir('/etc/ceph'): | ||
1615 | 422 | os.mkdir('/etc/ceph') | ||
1616 | 423 | |||
1617 | 424 | if not context_complete(ctxt): | ||
1618 | 425 | return {} | ||
1619 | 426 | |||
1620 | 427 | ensure_packages(['ceph-common']) | ||
1621 | 428 | return ctxt | ||
1622 | 429 | |||
1623 | 430 | |||
1624 | 431 | class HAProxyContext(OSContextGenerator): | ||
1625 | 432 | """Provides half a context for the haproxy template, which describes | ||
1626 | 433 | all peers to be included in the cluster. Each charm needs to include | ||
1627 | 434 | its own context generator that describes the port mapping. | ||
1628 | 435 | """ | ||
1629 | 436 | interfaces = ['cluster'] | ||
1630 | 437 | |||
1631 | 438 | def __init__(self, singlenode_mode=False): | ||
1632 | 439 | self.singlenode_mode = singlenode_mode | ||
1633 | 440 | |||
1634 | 441 | def __call__(self): | ||
1635 | 442 | if not relation_ids('cluster') and not self.singlenode_mode: | ||
1636 | 443 | return {} | ||
1637 | 444 | |||
1638 | 445 | if config('prefer-ipv6'): | ||
1639 | 446 | addr = get_ipv6_addr(exc_list=[config('vip')])[0] | ||
1640 | 447 | else: | ||
1641 | 448 | addr = get_host_ip(unit_get('private-address')) | ||
1642 | 449 | |||
1643 | 450 | l_unit = local_unit().replace('/', '-') | ||
1644 | 451 | cluster_hosts = {} | ||
1645 | 452 | |||
1646 | 453 | # NOTE(jamespage): build out map of configured network endpoints | ||
1647 | 454 | # and associated backends | ||
1648 | 455 | for addr_type in ADDRESS_TYPES: | ||
1649 | 456 | cfg_opt = 'os-{}-network'.format(addr_type) | ||
1650 | 457 | laddr = get_address_in_network(config(cfg_opt)) | ||
1651 | 458 | if laddr: | ||
1652 | 459 | netmask = get_netmask_for_address(laddr) | ||
1653 | 460 | cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, | ||
1654 | 461 | netmask), | ||
1655 | 462 | 'backends': {l_unit: laddr}} | ||
1656 | 463 | for rid in relation_ids('cluster'): | ||
1657 | 464 | for unit in related_units(rid): | ||
1658 | 465 | _laddr = relation_get('{}-address'.format(addr_type), | ||
1659 | 466 | rid=rid, unit=unit) | ||
1660 | 467 | if _laddr: | ||
1661 | 468 | _unit = unit.replace('/', '-') | ||
1662 | 469 | cluster_hosts[laddr]['backends'][_unit] = _laddr | ||
1663 | 470 | |||
1664 | 471 | # NOTE(jamespage) add backend based on private address - this | ||
1665 | 472 | # with either be the only backend or the fallback if no acls | ||
1666 | 473 | # match in the frontend | ||
1667 | 474 | cluster_hosts[addr] = {} | ||
1668 | 475 | netmask = get_netmask_for_address(addr) | ||
1669 | 476 | cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), | ||
1670 | 477 | 'backends': {l_unit: addr}} | ||
1671 | 478 | for rid in relation_ids('cluster'): | ||
1672 | 479 | for unit in related_units(rid): | ||
1673 | 480 | _laddr = relation_get('private-address', | ||
1674 | 481 | rid=rid, unit=unit) | ||
1675 | 482 | if _laddr: | ||
1676 | 483 | _unit = unit.replace('/', '-') | ||
1677 | 484 | cluster_hosts[addr]['backends'][_unit] = _laddr | ||
1678 | 485 | |||
1679 | 486 | ctxt = { | ||
1680 | 487 | 'frontends': cluster_hosts, | ||
1681 | 488 | 'default_backend': addr | ||
1682 | 489 | } | ||
1683 | 490 | |||
1684 | 491 | if config('haproxy-server-timeout'): | ||
1685 | 492 | ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') | ||
1686 | 493 | |||
1687 | 494 | if config('haproxy-client-timeout'): | ||
1688 | 495 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | ||
1689 | 496 | |||
1690 | 497 | if config('prefer-ipv6'): | ||
1691 | 498 | ctxt['ipv6'] = True | ||
1692 | 499 | ctxt['local_host'] = 'ip6-localhost' | ||
1693 | 500 | ctxt['haproxy_host'] = '::' | ||
1694 | 501 | ctxt['stat_port'] = ':::8888' | ||
1695 | 502 | else: | ||
1696 | 503 | ctxt['local_host'] = '127.0.0.1' | ||
1697 | 504 | ctxt['haproxy_host'] = '0.0.0.0' | ||
1698 | 505 | ctxt['stat_port'] = ':8888' | ||
1699 | 506 | |||
1700 | 507 | for frontend in cluster_hosts: | ||
1701 | 508 | if (len(cluster_hosts[frontend]['backends']) > 1 or | ||
1702 | 509 | self.singlenode_mode): | ||
1703 | 510 | # Enable haproxy when we have enough peers. | ||
1704 | 511 | log('Ensuring haproxy enabled in /etc/default/haproxy.', | ||
1705 | 512 | level=DEBUG) | ||
1706 | 513 | with open('/etc/default/haproxy', 'w') as out: | ||
1707 | 514 | out.write('ENABLED=1\n') | ||
1708 | 515 | |||
1709 | 516 | return ctxt | ||
1710 | 517 | |||
1711 | 518 | log('HAProxy context is incomplete, this unit has no peers.', | ||
1712 | 519 | level=INFO) | ||
1713 | 520 | return {} | ||
1714 | 521 | |||
1715 | 522 | |||
1716 | 523 | class ImageServiceContext(OSContextGenerator): | ||
1717 | 524 | interfaces = ['image-service'] | ||
1718 | 525 | |||
1719 | 526 | def __call__(self): | ||
1720 | 527 | """Obtains the glance API server from the image-service relation. | ||
1721 | 528 | Useful in nova and cinder (currently). | ||
1722 | 529 | """ | ||
1723 | 530 | log('Generating template context for image-service.', level=DEBUG) | ||
1724 | 531 | rids = relation_ids('image-service') | ||
1725 | 532 | if not rids: | ||
1726 | 533 | return {} | ||
1727 | 534 | |||
1728 | 535 | for rid in rids: | ||
1729 | 536 | for unit in related_units(rid): | ||
1730 | 537 | api_server = relation_get('glance-api-server', | ||
1731 | 538 | rid=rid, unit=unit) | ||
1732 | 539 | if api_server: | ||
1733 | 540 | return {'glance_api_servers': api_server} | ||
1734 | 541 | |||
1735 | 542 | log("ImageService context is incomplete. Missing required relation " | ||
1736 | 543 | "data.", level=INFO) | ||
1737 | 544 | return {} | ||
1738 | 545 | |||
1739 | 546 | |||
1740 | 547 | class ApacheSSLContext(OSContextGenerator): | ||
1741 | 548 | """Generates a context for an apache vhost configuration that configures | ||
1742 | 549 | HTTPS reverse proxying for one or many endpoints. Generated context | ||
1743 | 550 | looks something like:: | ||
1744 | 551 | |||
1745 | 552 | { | ||
1746 | 553 | 'namespace': 'cinder', | ||
1747 | 554 | 'private_address': 'iscsi.mycinderhost.com', | ||
1748 | 555 | 'endpoints': [(8776, 8766), (8777, 8767)] | ||
1749 | 556 | } | ||
1750 | 557 | |||
1751 | 558 | The endpoints list consists of a tuples mapping external ports | ||
1752 | 559 | to internal ports. | ||
1753 | 560 | """ | ||
1754 | 561 | interfaces = ['https'] | ||
1755 | 562 | |||
1756 | 563 | # charms should inherit this context and set external ports | ||
1757 | 564 | # and service namespace accordingly. | ||
1758 | 565 | external_ports = [] | ||
1759 | 566 | service_namespace = None | ||
1760 | 567 | |||
1761 | 568 | def enable_modules(self): | ||
1762 | 569 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] | ||
1763 | 570 | check_call(cmd) | ||
1764 | 571 | |||
1765 | 572 | def configure_cert(self, cn=None): | ||
1766 | 573 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) | ||
1767 | 574 | mkdir(path=ssl_dir) | ||
1768 | 575 | cert, key = get_cert(cn) | ||
1769 | 576 | if cn: | ||
1770 | 577 | cert_filename = 'cert_{}'.format(cn) | ||
1771 | 578 | key_filename = 'key_{}'.format(cn) | ||
1772 | 579 | else: | ||
1773 | 580 | cert_filename = 'cert' | ||
1774 | 581 | key_filename = 'key' | ||
1775 | 582 | |||
1776 | 583 | write_file(path=os.path.join(ssl_dir, cert_filename), | ||
1777 | 584 | content=b64decode(cert)) | ||
1778 | 585 | write_file(path=os.path.join(ssl_dir, key_filename), | ||
1779 | 586 | content=b64decode(key)) | ||
1780 | 587 | |||
1781 | 588 | def configure_ca(self): | ||
1782 | 589 | ca_cert = get_ca_cert() | ||
1783 | 590 | if ca_cert: | ||
1784 | 591 | install_ca_cert(b64decode(ca_cert)) | ||
1785 | 592 | |||
1786 | 593 | def canonical_names(self): | ||
1787 | 594 | """Figure out which canonical names clients will access this service. | ||
1788 | 595 | """ | ||
1789 | 596 | cns = [] | ||
1790 | 597 | for r_id in relation_ids('identity-service'): | ||
1791 | 598 | for unit in related_units(r_id): | ||
1792 | 599 | rdata = relation_get(rid=r_id, unit=unit) | ||
1793 | 600 | for k in rdata: | ||
1794 | 601 | if k.startswith('ssl_key_'): | ||
1795 | 602 | cns.append(k.lstrip('ssl_key_')) | ||
1796 | 603 | |||
1797 | 604 | return sorted(list(set(cns))) | ||
1798 | 605 | |||
1799 | 606 | def get_network_addresses(self): | ||
1800 | 607 | """For each network configured, return corresponding address and vip | ||
1801 | 608 | (if available). | ||
1802 | 609 | |||
1803 | 610 | Returns a list of tuples of the form: | ||
1804 | 611 | |||
1805 | 612 | [(address_in_net_a, vip_in_net_a), | ||
1806 | 613 | (address_in_net_b, vip_in_net_b), | ||
1807 | 614 | ...] | ||
1808 | 615 | |||
1809 | 616 | or, if no vip(s) available: | ||
1810 | 617 | |||
1811 | 618 | [(address_in_net_a, address_in_net_a), | ||
1812 | 619 | (address_in_net_b, address_in_net_b), | ||
1813 | 620 | ...] | ||
1814 | 621 | """ | ||
1815 | 622 | addresses = [] | ||
1816 | 623 | if config('vip'): | ||
1817 | 624 | vips = config('vip').split() | ||
1818 | 625 | else: | ||
1819 | 626 | vips = [] | ||
1820 | 627 | |||
1821 | 628 | for net_type in ['os-internal-network', 'os-admin-network', | ||
1822 | 629 | 'os-public-network']: | ||
1823 | 630 | addr = get_address_in_network(config(net_type), | ||
1824 | 631 | unit_get('private-address')) | ||
1825 | 632 | if len(vips) > 1 and is_clustered(): | ||
1826 | 633 | if not config(net_type): | ||
1827 | 634 | log("Multiple networks configured but net_type " | ||
1828 | 635 | "is None (%s)." % net_type, level=WARNING) | ||
1829 | 636 | continue | ||
1830 | 637 | |||
1831 | 638 | for vip in vips: | ||
1832 | 639 | if is_address_in_network(config(net_type), vip): | ||
1833 | 640 | addresses.append((addr, vip)) | ||
1834 | 641 | break | ||
1835 | 642 | |||
1836 | 643 | elif is_clustered() and config('vip'): | ||
1837 | 644 | addresses.append((addr, config('vip'))) | ||
1838 | 645 | else: | ||
1839 | 646 | addresses.append((addr, addr)) | ||
1840 | 647 | |||
1841 | 648 | return sorted(addresses) | ||
1842 | 649 | |||
1843 | 650 | def __call__(self): | ||
1844 | 651 | if isinstance(self.external_ports, six.string_types): | ||
1845 | 652 | self.external_ports = [self.external_ports] | ||
1846 | 653 | |||
1847 | 654 | if not self.external_ports or not https(): | ||
1848 | 655 | return {} | ||
1849 | 656 | |||
1850 | 657 | self.configure_ca() | ||
1851 | 658 | self.enable_modules() | ||
1852 | 659 | |||
1853 | 660 | ctxt = {'namespace': self.service_namespace, | ||
1854 | 661 | 'endpoints': [], | ||
1855 | 662 | 'ext_ports': []} | ||
1856 | 663 | |||
1857 | 664 | for cn in self.canonical_names(): | ||
1858 | 665 | self.configure_cert(cn) | ||
1859 | 666 | |||
1860 | 667 | addresses = self.get_network_addresses() | ||
1861 | 668 | for address, endpoint in sorted(set(addresses)): | ||
1862 | 669 | for api_port in self.external_ports: | ||
1863 | 670 | ext_port = determine_apache_port(api_port, | ||
1864 | 671 | singlenode_mode=True) | ||
1865 | 672 | int_port = determine_api_port(api_port, singlenode_mode=True) | ||
1866 | 673 | portmap = (address, endpoint, int(ext_port), int(int_port)) | ||
1867 | 674 | ctxt['endpoints'].append(portmap) | ||
1868 | 675 | ctxt['ext_ports'].append(int(ext_port)) | ||
1869 | 676 | |||
1870 | 677 | ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) | ||
1871 | 678 | return ctxt | ||
1872 | 679 | |||
1873 | 680 | |||
1874 | 681 | class NeutronContext(OSContextGenerator): | ||
1875 | 682 | interfaces = [] | ||
1876 | 683 | |||
1877 | 684 | @property | ||
1878 | 685 | def plugin(self): | ||
1879 | 686 | return None | ||
1880 | 687 | |||
1881 | 688 | @property | ||
1882 | 689 | def network_manager(self): | ||
1883 | 690 | return None | ||
1884 | 691 | |||
1885 | 692 | @property | ||
1886 | 693 | def packages(self): | ||
1887 | 694 | return neutron_plugin_attribute(self.plugin, 'packages', | ||
1888 | 695 | self.network_manager) | ||
1889 | 696 | |||
1890 | 697 | @property | ||
1891 | 698 | def neutron_security_groups(self): | ||
1892 | 699 | return None | ||
1893 | 700 | |||
1894 | 701 | def _ensure_packages(self): | ||
1895 | 702 | for pkgs in self.packages: | ||
1896 | 703 | ensure_packages(pkgs) | ||
1897 | 704 | |||
1898 | 705 | def _save_flag_file(self): | ||
1899 | 706 | if self.network_manager == 'quantum': | ||
1900 | 707 | _file = '/etc/nova/quantum_plugin.conf' | ||
1901 | 708 | else: | ||
1902 | 709 | _file = '/etc/nova/neutron_plugin.conf' | ||
1903 | 710 | |||
1904 | 711 | with open(_file, 'wb') as out: | ||
1905 | 712 | out.write(self.plugin + '\n') | ||
1906 | 713 | |||
1907 | 714 | def ovs_ctxt(self): | ||
1908 | 715 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1909 | 716 | self.network_manager) | ||
1910 | 717 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1911 | 718 | self.network_manager) | ||
1912 | 719 | ovs_ctxt = {'core_plugin': driver, | ||
1913 | 720 | 'neutron_plugin': 'ovs', | ||
1914 | 721 | 'neutron_security_groups': self.neutron_security_groups, | ||
1915 | 722 | 'local_ip': unit_private_ip(), | ||
1916 | 723 | 'config': config} | ||
1917 | 724 | |||
1918 | 725 | return ovs_ctxt | ||
1919 | 726 | |||
1920 | 727 | def nvp_ctxt(self): | ||
1921 | 728 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1922 | 729 | self.network_manager) | ||
1923 | 730 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1924 | 731 | self.network_manager) | ||
1925 | 732 | nvp_ctxt = {'core_plugin': driver, | ||
1926 | 733 | 'neutron_plugin': 'nvp', | ||
1927 | 734 | 'neutron_security_groups': self.neutron_security_groups, | ||
1928 | 735 | 'local_ip': unit_private_ip(), | ||
1929 | 736 | 'config': config} | ||
1930 | 737 | |||
1931 | 738 | return nvp_ctxt | ||
1932 | 739 | |||
1933 | 740 | def n1kv_ctxt(self): | ||
1934 | 741 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1935 | 742 | self.network_manager) | ||
1936 | 743 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
1937 | 744 | self.network_manager) | ||
1938 | 745 | n1kv_user_config_flags = config('n1kv-config-flags') | ||
1939 | 746 | restrict_policy_profiles = config('n1kv-restrict-policy-profiles') | ||
1940 | 747 | n1kv_ctxt = {'core_plugin': driver, | ||
1941 | 748 | 'neutron_plugin': 'n1kv', | ||
1942 | 749 | 'neutron_security_groups': self.neutron_security_groups, | ||
1943 | 750 | 'local_ip': unit_private_ip(), | ||
1944 | 751 | 'config': n1kv_config, | ||
1945 | 752 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
1946 | 753 | 'vsm_username': config('n1kv-vsm-username'), | ||
1947 | 754 | 'vsm_password': config('n1kv-vsm-password'), | ||
1948 | 755 | 'restrict_policy_profiles': restrict_policy_profiles} | ||
1949 | 756 | |||
1950 | 757 | if n1kv_user_config_flags: | ||
1951 | 758 | flags = config_flags_parser(n1kv_user_config_flags) | ||
1952 | 759 | n1kv_ctxt['user_config_flags'] = flags | ||
1953 | 760 | |||
1954 | 761 | return n1kv_ctxt | ||
1955 | 762 | |||
1956 | 763 | def calico_ctxt(self): | ||
1957 | 764 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1958 | 765 | self.network_manager) | ||
1959 | 766 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1960 | 767 | self.network_manager) | ||
1961 | 768 | calico_ctxt = {'core_plugin': driver, | ||
1962 | 769 | 'neutron_plugin': 'Calico', | ||
1963 | 770 | 'neutron_security_groups': self.neutron_security_groups, | ||
1964 | 771 | 'local_ip': unit_private_ip(), | ||
1965 | 772 | 'config': config} | ||
1966 | 773 | |||
1967 | 774 | return calico_ctxt | ||
1968 | 775 | |||
1969 | 776 | def neutron_ctxt(self): | ||
1970 | 777 | if https(): | ||
1971 | 778 | proto = 'https' | ||
1972 | 779 | else: | ||
1973 | 780 | proto = 'http' | ||
1974 | 781 | |||
1975 | 782 | if is_clustered(): | ||
1976 | 783 | host = config('vip') | ||
1977 | 784 | else: | ||
1978 | 785 | host = unit_get('private-address') | ||
1979 | 786 | |||
1980 | 787 | ctxt = {'network_manager': self.network_manager, | ||
1981 | 788 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} | ||
1982 | 789 | return ctxt | ||
1983 | 790 | |||
1984 | 791 | def __call__(self): | ||
1985 | 792 | self._ensure_packages() | ||
1986 | 793 | |||
1987 | 794 | if self.network_manager not in ['quantum', 'neutron']: | ||
1988 | 795 | return {} | ||
1989 | 796 | |||
1990 | 797 | if not self.plugin: | ||
1991 | 798 | return {} | ||
1992 | 799 | |||
1993 | 800 | ctxt = self.neutron_ctxt() | ||
1994 | 801 | |||
1995 | 802 | if self.plugin == 'ovs': | ||
1996 | 803 | ctxt.update(self.ovs_ctxt()) | ||
1997 | 804 | elif self.plugin in ['nvp', 'nsx']: | ||
1998 | 805 | ctxt.update(self.nvp_ctxt()) | ||
1999 | 806 | elif self.plugin == 'n1kv': | ||
2000 | 807 | ctxt.update(self.n1kv_ctxt()) | ||
2001 | 808 | elif self.plugin == 'Calico': | ||
2002 | 809 | ctxt.update(self.calico_ctxt()) | ||
2003 | 810 | |||
2004 | 811 | alchemy_flags = config('neutron-alchemy-flags') | ||
2005 | 812 | if alchemy_flags: | ||
2006 | 813 | flags = config_flags_parser(alchemy_flags) | ||
2007 | 814 | ctxt['neutron_alchemy_flags'] = flags | ||
2008 | 815 | |||
2009 | 816 | self._save_flag_file() | ||
2010 | 817 | return ctxt | ||
2011 | 818 | |||
2012 | 819 | |||
2013 | 820 | class OSConfigFlagContext(OSContextGenerator): | ||
2014 | 821 | """Provides support for user-defined config flags. | ||
2015 | 822 | |||
2016 | 823 | Users can define a comma-seperated list of key=value pairs | ||
2017 | 824 | in the charm configuration and apply them at any point in | ||
2018 | 825 | any file by using a template flag. | ||
2019 | 826 | |||
2020 | 827 | Sometimes users might want config flags inserted within a | ||
2021 | 828 | specific section so this class allows users to specify the | ||
2022 | 829 | template flag name, allowing for multiple template flags | ||
2023 | 830 | (sections) within the same context. | ||
2024 | 831 | |||
2025 | 832 | NOTE: the value of config-flags may be a comma-separated list of | ||
2026 | 833 | key=value pairs and some Openstack config files support | ||
2027 | 834 | comma-separated lists as values. | ||
2028 | 835 | """ | ||
2029 | 836 | |||
2030 | 837 | def __init__(self, charm_flag='config-flags', | ||
2031 | 838 | template_flag='user_config_flags'): | ||
2032 | 839 | """ | ||
2033 | 840 | :param charm_flag: config flags in charm configuration. | ||
2034 | 841 | :param template_flag: insert point for user-defined flags in template | ||
2035 | 842 | file. | ||
2036 | 843 | """ | ||
2037 | 844 | super(OSConfigFlagContext, self).__init__() | ||
2038 | 845 | self._charm_flag = charm_flag | ||
2039 | 846 | self._template_flag = template_flag | ||
2040 | 847 | |||
2041 | 848 | def __call__(self): | ||
2042 | 849 | config_flags = config(self._charm_flag) | ||
2043 | 850 | if not config_flags: | ||
2044 | 851 | return {} | ||
2045 | 852 | |||
2046 | 853 | return {self._template_flag: | ||
2047 | 854 | config_flags_parser(config_flags)} | ||
2048 | 855 | |||
2049 | 856 | |||
2050 | 857 | class SubordinateConfigContext(OSContextGenerator): | ||
2051 | 858 | |||
2052 | 859 | """ | ||
2053 | 860 | Responsible for inspecting relations to subordinates that | ||
2054 | 861 | may be exporting required config via a json blob. | ||
2055 | 862 | |||
2056 | 863 | The subordinate interface allows subordinates to export their | ||
2057 | 864 | configuration requirements to the principle for multiple config | ||
2058 | 865 | files and multiple serivces. Ie, a subordinate that has interfaces | ||
2059 | 866 | to both glance and nova may export to following yaml blob as json:: | ||
2060 | 867 | |||
2061 | 868 | glance: | ||
2062 | 869 | /etc/glance/glance-api.conf: | ||
2063 | 870 | sections: | ||
2064 | 871 | DEFAULT: | ||
2065 | 872 | - [key1, value1] | ||
2066 | 873 | /etc/glance/glance-registry.conf: | ||
2067 | 874 | MYSECTION: | ||
2068 | 875 | - [key2, value2] | ||
2069 | 876 | nova: | ||
2070 | 877 | /etc/nova/nova.conf: | ||
2071 | 878 | sections: | ||
2072 | 879 | DEFAULT: | ||
2073 | 880 | - [key3, value3] | ||
2074 | 881 | |||
2075 | 882 | |||
2076 | 883 | It is then up to the principle charms to subscribe this context to | ||
2077 | 884 | the service+config file it is interestd in. Configuration data will | ||
2078 | 885 | be available in the template context, in glance's case, as:: | ||
2079 | 886 | |||
2080 | 887 | ctxt = { | ||
2081 | 888 | ... other context ... | ||
2082 | 889 | 'subordinate_config': { | ||
2083 | 890 | 'DEFAULT': { | ||
2084 | 891 | 'key1': 'value1', | ||
2085 | 892 | }, | ||
2086 | 893 | 'MYSECTION': { | ||
2087 | 894 | 'key2': 'value2', | ||
2088 | 895 | }, | ||
2089 | 896 | } | ||
2090 | 897 | } | ||
2091 | 898 | """ | ||
2092 | 899 | |||
2093 | 900 | def __init__(self, service, config_file, interface): | ||
2094 | 901 | """ | ||
2095 | 902 | :param service : Service name key to query in any subordinate | ||
2096 | 903 | data found | ||
2097 | 904 | :param config_file : Service's config file to query sections | ||
2098 | 905 | :param interface : Subordinate interface to inspect | ||
2099 | 906 | """ | ||
2100 | 907 | self.service = service | ||
2101 | 908 | self.config_file = config_file | ||
2102 | 909 | self.interface = interface | ||
2103 | 910 | |||
2104 | 911 | def __call__(self): | ||
2105 | 912 | ctxt = {'sections': {}} | ||
2106 | 913 | for rid in relation_ids(self.interface): | ||
2107 | 914 | for unit in related_units(rid): | ||
2108 | 915 | sub_config = relation_get('subordinate_configuration', | ||
2109 | 916 | rid=rid, unit=unit) | ||
2110 | 917 | if sub_config and sub_config != '': | ||
2111 | 918 | try: | ||
2112 | 919 | sub_config = json.loads(sub_config) | ||
2113 | 920 | except: | ||
2114 | 921 | log('Could not parse JSON from subordinate_config ' | ||
2115 | 922 | 'setting from %s' % rid, level=ERROR) | ||
2116 | 923 | continue | ||
2117 | 924 | |||
2118 | 925 | if self.service not in sub_config: | ||
2119 | 926 | log('Found subordinate_config on %s but it contained' | ||
2120 | 927 | 'nothing for %s service' % (rid, self.service), | ||
2121 | 928 | level=INFO) | ||
2122 | 929 | continue | ||
2123 | 930 | |||
2124 | 931 | sub_config = sub_config[self.service] | ||
2125 | 932 | if self.config_file not in sub_config: | ||
2126 | 933 | log('Found subordinate_config on %s but it contained' | ||
2127 | 934 | 'nothing for %s' % (rid, self.config_file), | ||
2128 | 935 | level=INFO) | ||
2129 | 936 | continue | ||
2130 | 937 | |||
2131 | 938 | sub_config = sub_config[self.config_file] | ||
2132 | 939 | for k, v in six.iteritems(sub_config): | ||
2133 | 940 | if k == 'sections': | ||
2134 | 941 | for section, config_dict in six.iteritems(v): | ||
2135 | 942 | log("adding section '%s'" % (section), | ||
2136 | 943 | level=DEBUG) | ||
2137 | 944 | ctxt[k][section] = config_dict | ||
2138 | 945 | else: | ||
2139 | 946 | ctxt[k] = v | ||
2140 | 947 | |||
2141 | 948 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | ||
2142 | 949 | return ctxt | ||
2143 | 950 | |||
2144 | 951 | |||
2145 | 952 | class LogLevelContext(OSContextGenerator): | ||
2146 | 953 | |||
2147 | 954 | def __call__(self): | ||
2148 | 955 | ctxt = {} | ||
2149 | 956 | ctxt['debug'] = \ | ||
2150 | 957 | False if config('debug') is None else config('debug') | ||
2151 | 958 | ctxt['verbose'] = \ | ||
2152 | 959 | False if config('verbose') is None else config('verbose') | ||
2153 | 960 | |||
2154 | 961 | return ctxt | ||
2155 | 962 | |||
2156 | 963 | |||
2157 | 964 | class SyslogContext(OSContextGenerator): | ||
2158 | 965 | |||
2159 | 966 | def __call__(self): | ||
2160 | 967 | ctxt = {'use_syslog': config('use-syslog')} | ||
2161 | 968 | return ctxt | ||
2162 | 969 | |||
2163 | 970 | |||
2164 | 971 | class BindHostContext(OSContextGenerator): | ||
2165 | 972 | |||
2166 | 973 | def __call__(self): | ||
2167 | 974 | if config('prefer-ipv6'): | ||
2168 | 975 | return {'bind_host': '::'} | ||
2169 | 976 | else: | ||
2170 | 977 | return {'bind_host': '0.0.0.0'} | ||
2171 | 978 | |||
2172 | 979 | |||
2173 | 980 | class WorkerConfigContext(OSContextGenerator): | ||
2174 | 981 | |||
2175 | 982 | @property | ||
2176 | 983 | def num_cpus(self): | ||
2177 | 984 | try: | ||
2178 | 985 | from psutil import NUM_CPUS | ||
2179 | 986 | except ImportError: | ||
2180 | 987 | apt_install('python-psutil', fatal=True) | ||
2181 | 988 | from psutil import NUM_CPUS | ||
2182 | 989 | |||
2183 | 990 | return NUM_CPUS | ||
2184 | 991 | |||
2185 | 992 | def __call__(self): | ||
2186 | 993 | multiplier = config('worker-multiplier') or 0 | ||
2187 | 994 | ctxt = {"workers": self.num_cpus * multiplier} | ||
2188 | 995 | return ctxt | ||
2189 | 996 | |||
2190 | 997 | |||
2191 | 998 | class ZeroMQContext(OSContextGenerator): | ||
2192 | 999 | interfaces = ['zeromq-configuration'] | ||
2193 | 1000 | |||
2194 | 1001 | def __call__(self): | ||
2195 | 1002 | ctxt = {} | ||
2196 | 1003 | if is_relation_made('zeromq-configuration', 'host'): | ||
2197 | 1004 | for rid in relation_ids('zeromq-configuration'): | ||
2198 | 1005 | for unit in related_units(rid): | ||
2199 | 1006 | ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) | ||
2200 | 1007 | ctxt['zmq_host'] = relation_get('host', unit, rid) | ||
2201 | 1008 | |||
2202 | 1009 | return ctxt | ||
2203 | 1010 | |||
2204 | 1011 | |||
2205 | 1012 | class NotificationDriverContext(OSContextGenerator): | ||
2206 | 1013 | |||
2207 | 1014 | def __init__(self, zmq_relation='zeromq-configuration', | ||
2208 | 1015 | amqp_relation='amqp'): | ||
2209 | 1016 | """ | ||
2210 | 1017 | :param zmq_relation: Name of Zeromq relation to check | ||
2211 | 1018 | """ | ||
2212 | 1019 | self.zmq_relation = zmq_relation | ||
2213 | 1020 | self.amqp_relation = amqp_relation | ||
2214 | 1021 | |||
2215 | 1022 | def __call__(self): | ||
2216 | 1023 | ctxt = {'notifications': 'False'} | ||
2217 | 1024 | if is_relation_made(self.amqp_relation): | ||
2218 | 1025 | ctxt['notifications'] = "True" | ||
2219 | 1026 | |||
2220 | 1027 | return ctxt | ||
2221 | 1028 | |||
2222 | 1029 | |||
2223 | 1030 | class SysctlContext(OSContextGenerator): | ||
2224 | 1031 | """This context check if the 'sysctl' option exists on configuration | ||
2225 | 1032 | then creates a file with the loaded contents""" | ||
2226 | 1033 | def __call__(self): | ||
2227 | 1034 | sysctl_dict = config('sysctl') | ||
2228 | 1035 | if sysctl_dict: | ||
2229 | 1036 | sysctl_create(sysctl_dict, | ||
2230 | 1037 | '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) | ||
2231 | 1038 | return {'sysctl': sysctl_dict} | ||
2232 | 0 | 1039 | ||
2233 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
2234 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 | |||
2235 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2015-01-15 16:18:44 +0000 | |||
2236 | @@ -0,0 +1,93 @@ | |||
2237 | 1 | from charmhelpers.core.hookenv import ( | ||
2238 | 2 | config, | ||
2239 | 3 | unit_get, | ||
2240 | 4 | ) | ||
2241 | 5 | from charmhelpers.contrib.network.ip import ( | ||
2242 | 6 | get_address_in_network, | ||
2243 | 7 | is_address_in_network, | ||
2244 | 8 | is_ipv6, | ||
2245 | 9 | get_ipv6_addr, | ||
2246 | 10 | ) | ||
2247 | 11 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | ||
2248 | 12 | |||
2249 | 13 | PUBLIC = 'public' | ||
2250 | 14 | INTERNAL = 'int' | ||
2251 | 15 | ADMIN = 'admin' | ||
2252 | 16 | |||
2253 | 17 | ADDRESS_MAP = { | ||
2254 | 18 | PUBLIC: { | ||
2255 | 19 | 'config': 'os-public-network', | ||
2256 | 20 | 'fallback': 'public-address' | ||
2257 | 21 | }, | ||
2258 | 22 | INTERNAL: { | ||
2259 | 23 | 'config': 'os-internal-network', | ||
2260 | 24 | 'fallback': 'private-address' | ||
2261 | 25 | }, | ||
2262 | 26 | ADMIN: { | ||
2263 | 27 | 'config': 'os-admin-network', | ||
2264 | 28 | 'fallback': 'private-address' | ||
2265 | 29 | } | ||
2266 | 30 | } | ||
2267 | 31 | |||
2268 | 32 | |||
2269 | 33 | def canonical_url(configs, endpoint_type=PUBLIC): | ||
2270 | 34 | """Returns the correct HTTP URL to this host given the state of HTTPS | ||
2271 | 35 | configuration, hacluster and charm configuration. | ||
2272 | 36 | |||
2273 | 37 | :param configs: OSTemplateRenderer config templating object to inspect | ||
2274 | 38 | for a complete https context. | ||
2275 | 39 | :param endpoint_type: str endpoint type to resolve. | ||
2276 | 40 | :param returns: str base URL for services on the current service unit. | ||
2277 | 41 | """ | ||
2278 | 42 | scheme = 'http' | ||
2279 | 43 | if 'https' in configs.complete_contexts(): | ||
2280 | 44 | scheme = 'https' | ||
2281 | 45 | address = resolve_address(endpoint_type) | ||
2282 | 46 | if is_ipv6(address): | ||
2283 | 47 | address = "[{}]".format(address) | ||
2284 | 48 | return '%s://%s' % (scheme, address) | ||
2285 | 49 | |||
2286 | 50 | |||
2287 | 51 | def resolve_address(endpoint_type=PUBLIC): | ||
2288 | 52 | """Return unit address depending on net config. | ||
2289 | 53 | |||
2290 | 54 | If unit is clustered with vip(s) and has net splits defined, return vip on | ||
2291 | 55 | correct network. If clustered with no nets defined, return primary vip. | ||
2292 | 56 | |||
2293 | 57 | If not clustered, return unit address ensuring address is on configured net | ||
2294 | 58 | split if one is configured. | ||
2295 | 59 | |||
2296 | 60 | :param endpoint_type: Network endpoing type | ||
2297 | 61 | """ | ||
2298 | 62 | resolved_address = None | ||
2299 | 63 | vips = config('vip') | ||
2300 | 64 | if vips: | ||
2301 | 65 | vips = vips.split() | ||
2302 | 66 | |||
2303 | 67 | net_type = ADDRESS_MAP[endpoint_type]['config'] | ||
2304 | 68 | net_addr = config(net_type) | ||
2305 | 69 | net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] | ||
2306 | 70 | clustered = is_clustered() | ||
2307 | 71 | if clustered: | ||
2308 | 72 | if not net_addr: | ||
2309 | 73 | # If no net-splits defined, we expect a single vip | ||
2310 | 74 | resolved_address = vips[0] | ||
2311 | 75 | else: | ||
2312 | 76 | for vip in vips: | ||
2313 | 77 | if is_address_in_network(net_addr, vip): | ||
2314 | 78 | resolved_address = vip | ||
2315 | 79 | break | ||
2316 | 80 | else: | ||
2317 | 81 | if config('prefer-ipv6'): | ||
2318 | 82 | fallback_addr = get_ipv6_addr(exc_list=vips)[0] | ||
2319 | 83 | else: | ||
2320 | 84 | fallback_addr = unit_get(net_fallback) | ||
2321 | 85 | |||
2322 | 86 | resolved_address = get_address_in_network(net_addr, fallback_addr) | ||
2323 | 87 | |||
2324 | 88 | if resolved_address is None: | ||
2325 | 89 | raise ValueError("Unable to resolve a suitable IP address based on " | ||
2326 | 90 | "charm state and configuration. (net_type=%s, " | ||
2327 | 91 | "clustered=%s)" % (net_type, clustered)) | ||
2328 | 92 | |||
2329 | 93 | return resolved_address | ||
2330 | 0 | 94 | ||
2331 | === added file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
2332 | --- hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000 | |||
2333 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-01-15 16:18:44 +0000 | |||
2334 | @@ -0,0 +1,223 @@ | |||
2335 | 1 | # Various utilies for dealing with Neutron and the renaming from Quantum. | ||
2336 | 2 | |||
2337 | 3 | from subprocess import check_output | ||
2338 | 4 | |||
2339 | 5 | from charmhelpers.core.hookenv import ( | ||
2340 | 6 | config, | ||
2341 | 7 | log, | ||
2342 | 8 | ERROR, | ||
2343 | 9 | ) | ||
2344 | 10 | |||
2345 | 11 | from charmhelpers.contrib.openstack.utils import os_release | ||
2346 | 12 | |||
2347 | 13 | |||
2348 | 14 | def headers_package(): | ||
2349 | 15 | """Ensures correct linux-headers for running kernel are installed, | ||
2350 | 16 | for building DKMS package""" | ||
2351 | 17 | kver = check_output(['uname', '-r']).decode('UTF-8').strip() | ||
2352 | 18 | return 'linux-headers-%s' % kver | ||
2353 | 19 | |||
2354 | 20 | QUANTUM_CONF_DIR = '/etc/quantum' | ||
2355 | 21 | |||
2356 | 22 | |||
2357 | 23 | def kernel_version(): | ||
2358 | 24 | """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ | ||
2359 | 25 | kver = check_output(['uname', '-r']).decode('UTF-8').strip() | ||
2360 | 26 | kver = kver.split('.') | ||
2361 | 27 | return (int(kver[0]), int(kver[1])) | ||
2362 | 28 | |||
2363 | 29 | |||
2364 | 30 | def determine_dkms_package(): | ||
2365 | 31 | """ Determine which DKMS package should be used based on kernel version """ | ||
2366 | 32 | # NOTE: 3.13 kernels have support for GRE and VXLAN native | ||
2367 | 33 | if kernel_version() >= (3, 13): | ||
2368 | 34 | return [] | ||
2369 | 35 | else: | ||
2370 | 36 | return ['openvswitch-datapath-dkms'] | ||
2371 | 37 | |||
2372 | 38 | |||
2373 | 39 | # legacy | ||
2374 | 40 | |||
2375 | 41 | |||
2376 | 42 | def quantum_plugins(): | ||
2377 | 43 | from charmhelpers.contrib.openstack import context | ||
2378 | 44 | return { | ||
2379 | 45 | 'ovs': { | ||
2380 | 46 | 'config': '/etc/quantum/plugins/openvswitch/' | ||
2381 | 47 | 'ovs_quantum_plugin.ini', | ||
2382 | 48 | 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' | ||
2383 | 49 | 'OVSQuantumPluginV2', | ||
2384 | 50 | 'contexts': [ | ||
2385 | 51 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2386 | 52 | database=config('neutron-database'), | ||
2387 | 53 | relation_prefix='neutron', | ||
2388 | 54 | ssl_dir=QUANTUM_CONF_DIR)], | ||
2389 | 55 | 'services': ['quantum-plugin-openvswitch-agent'], | ||
2390 | 56 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2391 | 57 | ['quantum-plugin-openvswitch-agent']], | ||
2392 | 58 | 'server_packages': ['quantum-server', | ||
2393 | 59 | 'quantum-plugin-openvswitch'], | ||
2394 | 60 | 'server_services': ['quantum-server'] | ||
2395 | 61 | }, | ||
2396 | 62 | 'nvp': { | ||
2397 | 63 | 'config': '/etc/quantum/plugins/nicira/nvp.ini', | ||
2398 | 64 | 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' | ||
2399 | 65 | 'QuantumPlugin.NvpPluginV2', | ||
2400 | 66 | 'contexts': [ | ||
2401 | 67 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2402 | 68 | database=config('neutron-database'), | ||
2403 | 69 | relation_prefix='neutron', | ||
2404 | 70 | ssl_dir=QUANTUM_CONF_DIR)], | ||
2405 | 71 | 'services': [], | ||
2406 | 72 | 'packages': [], | ||
2407 | 73 | 'server_packages': ['quantum-server', | ||
2408 | 74 | 'quantum-plugin-nicira'], | ||
2409 | 75 | 'server_services': ['quantum-server'] | ||
2410 | 76 | } | ||
2411 | 77 | } | ||
2412 | 78 | |||
2413 | 79 | NEUTRON_CONF_DIR = '/etc/neutron' | ||
2414 | 80 | |||
2415 | 81 | |||
2416 | 82 | def neutron_plugins(): | ||
2417 | 83 | from charmhelpers.contrib.openstack import context | ||
2418 | 84 | release = os_release('nova-common') | ||
2419 | 85 | plugins = { | ||
2420 | 86 | 'ovs': { | ||
2421 | 87 | 'config': '/etc/neutron/plugins/openvswitch/' | ||
2422 | 88 | 'ovs_neutron_plugin.ini', | ||
2423 | 89 | 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' | ||
2424 | 90 | 'OVSNeutronPluginV2', | ||
2425 | 91 | 'contexts': [ | ||
2426 | 92 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2427 | 93 | database=config('neutron-database'), | ||
2428 | 94 | relation_prefix='neutron', | ||
2429 | 95 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2430 | 96 | 'services': ['neutron-plugin-openvswitch-agent'], | ||
2431 | 97 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2432 | 98 | ['neutron-plugin-openvswitch-agent']], | ||
2433 | 99 | 'server_packages': ['neutron-server', | ||
2434 | 100 | 'neutron-plugin-openvswitch'], | ||
2435 | 101 | 'server_services': ['neutron-server'] | ||
2436 | 102 | }, | ||
2437 | 103 | 'nvp': { | ||
2438 | 104 | 'config': '/etc/neutron/plugins/nicira/nvp.ini', | ||
2439 | 105 | 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' | ||
2440 | 106 | 'NeutronPlugin.NvpPluginV2', | ||
2441 | 107 | 'contexts': [ | ||
2442 | 108 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2443 | 109 | database=config('neutron-database'), | ||
2444 | 110 | relation_prefix='neutron', | ||
2445 | 111 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2446 | 112 | 'services': [], | ||
2447 | 113 | 'packages': [], | ||
2448 | 114 | 'server_packages': ['neutron-server', | ||
2449 | 115 | 'neutron-plugin-nicira'], | ||
2450 | 116 | 'server_services': ['neutron-server'] | ||
2451 | 117 | }, | ||
2452 | 118 | 'nsx': { | ||
2453 | 119 | 'config': '/etc/neutron/plugins/vmware/nsx.ini', | ||
2454 | 120 | 'driver': 'vmware', | ||
2455 | 121 | 'contexts': [ | ||
2456 | 122 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2457 | 123 | database=config('neutron-database'), | ||
2458 | 124 | relation_prefix='neutron', | ||
2459 | 125 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2460 | 126 | 'services': [], | ||
2461 | 127 | 'packages': [], | ||
2462 | 128 | 'server_packages': ['neutron-server', | ||
2463 | 129 | 'neutron-plugin-vmware'], | ||
2464 | 130 | 'server_services': ['neutron-server'] | ||
2465 | 131 | }, | ||
2466 | 132 | 'n1kv': { | ||
2467 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
2468 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
2469 | 135 | 'contexts': [ | ||
2470 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2471 | 137 | database=config('neutron-database'), | ||
2472 | 138 | relation_prefix='neutron', | ||
2473 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2474 | 140 | 'services': [], | ||
2475 | 141 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2476 | 142 | ['neutron-plugin-cisco']], | ||
2477 | 143 | 'server_packages': ['neutron-server', | ||
2478 | 144 | 'neutron-plugin-cisco'], | ||
2479 | 145 | 'server_services': ['neutron-server'] | ||
2480 | 146 | }, | ||
2481 | 147 | 'Calico': { | ||
2482 | 148 | 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', | ||
2483 | 149 | 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', | ||
2484 | 150 | 'contexts': [ | ||
2485 | 151 | context.SharedDBContext(user=config('neutron-database-user'), | ||
2486 | 152 | database=config('neutron-database'), | ||
2487 | 153 | relation_prefix='neutron', | ||
2488 | 154 | ssl_dir=NEUTRON_CONF_DIR)], | ||
2489 | 155 | 'services': ['calico-felix', | ||
2490 | 156 | 'bird', | ||
2491 | 157 | 'neutron-dhcp-agent', | ||
2492 | 158 | 'nova-api-metadata'], | ||
2493 | 159 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
2494 | 160 | ['calico-compute', | ||
2495 | 161 | 'bird', | ||
2496 | 162 | 'neutron-dhcp-agent', | ||
2497 | 163 | 'nova-api-metadata']], | ||
2498 | 164 | 'server_packages': ['neutron-server', 'calico-control'], | ||
2499 | 165 | 'server_services': ['neutron-server'] | ||
2500 | 166 | } | ||
2501 | 167 | } | ||
2502 | 168 | if release >= 'icehouse': | ||
2503 | 169 | # NOTE: patch in ml2 plugin for icehouse onwards | ||
2504 | 170 | plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' | ||
2505 | 171 | plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' | ||
2506 | 172 | plugins['ovs']['server_packages'] = ['neutron-server', | ||
2507 | 173 | 'neutron-plugin-ml2'] | ||
2508 | 174 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | ||
2509 | 175 | plugins['nvp'] = plugins['nsx'] | ||
2510 | 176 | return plugins | ||
2511 | 177 | |||
2512 | 178 | |||
2513 | 179 | def neutron_plugin_attribute(plugin, attr, net_manager=None): | ||
2514 | 180 | manager = net_manager or network_manager() | ||
2515 | 181 | if manager == 'quantum': | ||
2516 | 182 | plugins = quantum_plugins() | ||
2517 | 183 | elif manager == 'neutron': | ||
2518 | 184 | plugins = neutron_plugins() | ||
2519 | 185 | else: | ||
2520 | 186 | log("Network manager '%s' does not support plugins." % (manager), | ||
2521 | 187 | level=ERROR) | ||
2522 | 188 | raise Exception | ||
2523 | 189 | |||
2524 | 190 | try: | ||
2525 | 191 | _plugin = plugins[plugin] | ||
2526 | 192 | except KeyError: | ||
2527 | 193 | log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) | ||
2528 | 194 | raise Exception | ||
2529 | 195 | |||
2530 | 196 | try: | ||
2531 | 197 | return _plugin[attr] | ||
2532 | 198 | except KeyError: | ||
2533 | 199 | return None | ||
2534 | 200 | |||
2535 | 201 | |||
2536 | 202 | def network_manager(): | ||
2537 | 203 | ''' | ||
2538 | 204 | Deals with the renaming of Quantum to Neutron in H and any situations | ||
2539 | 205 | that require compatability (eg, deploying H with network-manager=quantum, | ||
2540 | 206 | upgrading from G). | ||
2541 | 207 | ''' | ||
2542 | 208 | release = os_release('nova-common') | ||
2543 | 209 | manager = config('network-manager').lower() | ||
2544 | 210 | |||
2545 | 211 | if manager not in ['quantum', 'neutron']: | ||
2546 | 212 | return manager | ||
2547 | 213 | |||
2548 | 214 | if release in ['essex']: | ||
2549 | 215 | # E does not support neutron | ||
2550 | 216 | log('Neutron networking not supported in Essex.', level=ERROR) | ||
2551 | 217 | raise Exception | ||
2552 | 218 | elif release in ['folsom', 'grizzly']: | ||
2553 | 219 | # neutron is named quantum in F and G | ||
2554 | 220 | return 'quantum' | ||
2555 | 221 | else: | ||
2556 | 222 | # ensure accurate naming for all releases post-H | ||
2557 | 223 | return 'neutron' | ||
2558 | 0 | 224 | ||
2559 | === added directory 'hooks/charmhelpers/contrib/openstack/templates' | |||
2560 | === added file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py' | |||
2561 | --- hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000 | |||
2562 | +++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 2015-01-15 16:18:44 +0000 | |||
2563 | @@ -0,0 +1,2 @@ | |||
2564 | 1 | # dummy __init__.py to fool syncer into thinking this is a syncable python | ||
2565 | 2 | # module | ||
2566 | 0 | 3 | ||
2567 | === added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' | |||
2568 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000 | |||
2569 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-01-15 16:18:44 +0000 | |||
2570 | @@ -0,0 +1,15 @@ | |||
2571 | 1 | ############################################################################### | ||
2572 | 2 | # [ WARNING ] | ||
2573 | 3 | # cinder configuration file maintained by Juju | ||
2574 | 4 | # local changes may be overwritten. | ||
2575 | 5 | ############################################################################### | ||
2576 | 6 | [global] | ||
2577 | 7 | {% if auth -%} | ||
2578 | 8 | auth_supported = {{ auth }} | ||
2579 | 9 | keyring = /etc/ceph/$cluster.$name.keyring | ||
2580 | 10 | mon host = {{ mon_hosts }} | ||
2581 | 11 | {% endif -%} | ||
2582 | 12 | log to syslog = {{ use_syslog }} | ||
2583 | 13 | err to syslog = {{ use_syslog }} | ||
2584 | 14 | clog to syslog = {{ use_syslog }} | ||
2585 | 15 | |||
2586 | 0 | 16 | ||
2587 | === added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
2588 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000 | |||
2589 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-01-15 16:18:44 +0000 | |||
2590 | @@ -0,0 +1,58 @@ | |||
2591 | 1 | global | ||
2592 | 2 | log {{ local_host }} local0 | ||
2593 | 3 | log {{ local_host }} local1 notice | ||
2594 | 4 | maxconn 20000 | ||
2595 | 5 | user haproxy | ||
2596 | 6 | group haproxy | ||
2597 | 7 | spread-checks 0 | ||
2598 | 8 | |||
2599 | 9 | defaults | ||
2600 | 10 | log global | ||
2601 | 11 | mode tcp | ||
2602 | 12 | option tcplog | ||
2603 | 13 | option dontlognull | ||
2604 | 14 | retries 3 | ||
2605 | 15 | timeout queue 1000 | ||
2606 | 16 | timeout connect 1000 | ||
2607 | 17 | {% if haproxy_client_timeout -%} | ||
2608 | 18 | timeout client {{ haproxy_client_timeout }} | ||
2609 | 19 | {% else -%} | ||
2610 | 20 | timeout client 30000 | ||
2611 | 21 | {% endif -%} | ||
2612 | 22 | |||
2613 | 23 | {% if haproxy_server_timeout -%} | ||
2614 | 24 | timeout server {{ haproxy_server_timeout }} | ||
2615 | 25 | {% else -%} | ||
2616 | 26 | timeout server 30000 | ||
2617 | 27 | {% endif -%} | ||
2618 | 28 | |||
2619 | 29 | listen stats {{ stat_port }} | ||
2620 | 30 | mode http | ||
2621 | 31 | stats enable | ||
2622 | 32 | stats hide-version | ||
2623 | 33 | stats realm Haproxy\ Statistics | ||
2624 | 34 | stats uri / | ||
2625 | 35 | stats auth admin:password | ||
2626 | 36 | |||
2627 | 37 | {% if frontends -%} | ||
2628 | 38 | {% for service, ports in service_ports.items() -%} | ||
2629 | 39 | frontend tcp-in_{{ service }} | ||
2630 | 40 | bind *:{{ ports[0] }} | ||
2631 | 41 | {% if ipv6 -%} | ||
2632 | 42 | bind :::{{ ports[0] }} | ||
2633 | 43 | {% endif -%} | ||
2634 | 44 | {% for frontend in frontends -%} | ||
2635 | 45 | acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} | ||
2636 | 46 | use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} | ||
2637 | 47 | {% endfor -%} | ||
2638 | 48 | default_backend {{ service }}_{{ default_backend }} | ||
2639 | 49 | |||
2640 | 50 | {% for frontend in frontends -%} | ||
2641 | 51 | backend {{ service }}_{{ frontend }} | ||
2642 | 52 | balance leastconn | ||
2643 | 53 | {% for unit, address in frontends[frontend]['backends'].items() -%} | ||
2644 | 54 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
2645 | 55 | {% endfor %} | ||
2646 | 56 | {% endfor -%} | ||
2647 | 57 | {% endfor -%} | ||
2648 | 58 | {% endif -%} | ||
2649 | 0 | 59 | ||
2650 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend' | |||
2651 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000 | |||
2652 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2015-01-15 16:18:44 +0000 | |||
2653 | @@ -0,0 +1,24 @@ | |||
2654 | 1 | {% if endpoints -%} | ||
2655 | 2 | {% for ext_port in ext_ports -%} | ||
2656 | 3 | Listen {{ ext_port }} | ||
2657 | 4 | {% endfor -%} | ||
2658 | 5 | {% for address, endpoint, ext, int in endpoints -%} | ||
2659 | 6 | <VirtualHost {{ address }}:{{ ext }}> | ||
2660 | 7 | ServerName {{ endpoint }} | ||
2661 | 8 | SSLEngine on | ||
2662 | 9 | SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} | ||
2663 | 10 | SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} | ||
2664 | 11 | ProxyPass / http://localhost:{{ int }}/ | ||
2665 | 12 | ProxyPassReverse / http://localhost:{{ int }}/ | ||
2666 | 13 | ProxyPreserveHost on | ||
2667 | 14 | </VirtualHost> | ||
2668 | 15 | {% endfor -%} | ||
2669 | 16 | <Proxy *> | ||
2670 | 17 | Order deny,allow | ||
2671 | 18 | Allow from all | ||
2672 | 19 | </Proxy> | ||
2673 | 20 | <Location /> | ||
2674 | 21 | Order allow,deny | ||
2675 | 22 | Allow from all | ||
2676 | 23 | </Location> | ||
2677 | 24 | {% endif -%} | ||
2678 | 0 | 25 | ||
2679 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf' | |||
2680 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000 | |||
2681 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2015-01-15 16:18:44 +0000 | |||
2682 | @@ -0,0 +1,24 @@ | |||
2683 | 1 | {% if endpoints -%} | ||
2684 | 2 | {% for ext_port in ext_ports -%} | ||
2685 | 3 | Listen {{ ext_port }} | ||
2686 | 4 | {% endfor -%} | ||
2687 | 5 | {% for address, endpoint, ext, int in endpoints -%} | ||
2688 | 6 | <VirtualHost {{ address }}:{{ ext }}> | ||
2689 | 7 | ServerName {{ endpoint }} | ||
2690 | 8 | SSLEngine on | ||
2691 | 9 | SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} | ||
2692 | 10 | SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} | ||
2693 | 11 | ProxyPass / http://localhost:{{ int }}/ | ||
2694 | 12 | ProxyPassReverse / http://localhost:{{ int }}/ | ||
2695 | 13 | ProxyPreserveHost on | ||
2696 | 14 | </VirtualHost> | ||
2697 | 15 | {% endfor -%} | ||
2698 | 16 | <Proxy *> | ||
2699 | 17 | Order deny,allow | ||
2700 | 18 | Allow from all | ||
2701 | 19 | </Proxy> | ||
2702 | 20 | <Location /> | ||
2703 | 21 | Order allow,deny | ||
2704 | 22 | Allow from all | ||
2705 | 23 | </Location> | ||
2706 | 24 | {% endif -%} | ||
2707 | 0 | 25 | ||
2708 | === added file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
2709 | --- hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000 | |||
2710 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2015-01-15 16:18:44 +0000 | |||
2711 | @@ -0,0 +1,279 @@ | |||
2712 | 1 | import os | ||
2713 | 2 | |||
2714 | 3 | import six | ||
2715 | 4 | |||
2716 | 5 | from charmhelpers.fetch import apt_install | ||
2717 | 6 | from charmhelpers.core.hookenv import ( | ||
2718 | 7 | log, | ||
2719 | 8 | ERROR, | ||
2720 | 9 | INFO | ||
2721 | 10 | ) | ||
2722 | 11 | from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES | ||
2723 | 12 | |||
2724 | 13 | try: | ||
2725 | 14 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | ||
2726 | 15 | except ImportError: | ||
2727 | 16 | # python-jinja2 may not be installed yet, or we're running unittests. | ||
2728 | 17 | FileSystemLoader = ChoiceLoader = Environment = exceptions = None | ||
2729 | 18 | |||
2730 | 19 | |||
2731 | 20 | class OSConfigException(Exception): | ||
2732 | 21 | pass | ||
2733 | 22 | |||
2734 | 23 | |||
2735 | 24 | def get_loader(templates_dir, os_release): | ||
2736 | 25 | """ | ||
2737 | 26 | Create a jinja2.ChoiceLoader containing template dirs up to | ||
2738 | 27 | and including os_release. If directory template directory | ||
2739 | 28 | is missing at templates_dir, it will be omitted from the loader. | ||
2740 | 29 | templates_dir is added to the bottom of the search list as a base | ||
2741 | 30 | loading dir. | ||
2742 | 31 | |||
2743 | 32 | A charm may also ship a templates dir with this module | ||
2744 | 33 | and it will be appended to the bottom of the search list, eg:: | ||
2745 | 34 | |||
2746 | 35 | hooks/charmhelpers/contrib/openstack/templates | ||
2747 | 36 | |||
2748 | 37 | :param templates_dir (str): Base template directory containing release | ||
2749 | 38 | sub-directories. | ||
2750 | 39 | :param os_release (str): OpenStack release codename to construct template | ||
2751 | 40 | loader. | ||
2752 | 41 | :returns: jinja2.ChoiceLoader constructed with a list of | ||
2753 | 42 | jinja2.FilesystemLoaders, ordered in descending | ||
2754 | 43 | order by OpenStack release. | ||
2755 | 44 | """ | ||
2756 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | ||
2757 | 46 | for rel in six.itervalues(OPENSTACK_CODENAMES)] | ||
2758 | 47 | |||
2759 | 48 | if not os.path.isdir(templates_dir): | ||
2760 | 49 | log('Templates directory not found @ %s.' % templates_dir, | ||
2761 | 50 | level=ERROR) | ||
2762 | 51 | raise OSConfigException | ||
2763 | 52 | |||
2764 | 53 | # the bottom contains tempaltes_dir and possibly a common templates dir | ||
2765 | 54 | # shipped with the helper. | ||
2766 | 55 | loaders = [FileSystemLoader(templates_dir)] | ||
2767 | 56 | helper_templates = os.path.join(os.path.dirname(__file__), 'templates') | ||
2768 | 57 | if os.path.isdir(helper_templates): | ||
2769 | 58 | loaders.append(FileSystemLoader(helper_templates)) | ||
2770 | 59 | |||
2771 | 60 | for rel, tmpl_dir in tmpl_dirs: | ||
2772 | 61 | if os.path.isdir(tmpl_dir): | ||
2773 | 62 | loaders.insert(0, FileSystemLoader(tmpl_dir)) | ||
2774 | 63 | if rel == os_release: | ||
2775 | 64 | break | ||
2776 | 65 | log('Creating choice loader with dirs: %s' % | ||
2777 | 66 | [l.searchpath for l in loaders], level=INFO) | ||
2778 | 67 | return ChoiceLoader(loaders) | ||
2779 | 68 | |||
2780 | 69 | |||
2781 | 70 | class OSConfigTemplate(object): | ||
2782 | 71 | """ | ||
2783 | 72 | Associates a config file template with a list of context generators. | ||
2784 | 73 | Responsible for constructing a template context based on those generators. | ||
2785 | 74 | """ | ||
2786 | 75 | def __init__(self, config_file, contexts): | ||
2787 | 76 | self.config_file = config_file | ||
2788 | 77 | |||
2789 | 78 | if hasattr(contexts, '__call__'): | ||
2790 | 79 | self.contexts = [contexts] | ||
2791 | 80 | else: | ||
2792 | 81 | self.contexts = contexts | ||
2793 | 82 | |||
2794 | 83 | self._complete_contexts = [] | ||
2795 | 84 | |||
2796 | 85 | def context(self): | ||
2797 | 86 | ctxt = {} | ||
2798 | 87 | for context in self.contexts: | ||
2799 | 88 | _ctxt = context() | ||
2800 | 89 | if _ctxt: | ||
2801 | 90 | ctxt.update(_ctxt) | ||
2802 | 91 | # track interfaces for every complete context. | ||
2803 | 92 | [self._complete_contexts.append(interface) | ||
2804 | 93 | for interface in context.interfaces | ||
2805 | 94 | if interface not in self._complete_contexts] | ||
2806 | 95 | return ctxt | ||
2807 | 96 | |||
2808 | 97 | def complete_contexts(self): | ||
2809 | 98 | ''' | ||
2810 | 99 | Return a list of interfaces that have atisfied contexts. | ||
2811 | 100 | ''' | ||
2812 | 101 | if self._complete_contexts: | ||
2813 | 102 | return self._complete_contexts | ||
2814 | 103 | self.context() | ||
2815 | 104 | return self._complete_contexts | ||
2816 | 105 | |||
2817 | 106 | |||
2818 | 107 | class OSConfigRenderer(object): | ||
2819 | 108 | """ | ||
2820 | 109 | This class provides a common templating system to be used by OpenStack | ||
2821 | 110 | charms. It is intended to help charms share common code and templates, | ||
2822 | 111 | and ease the burden of managing config templates across multiple OpenStack | ||
2823 | 112 | releases. | ||
2824 | 113 | |||
2825 | 114 | Basic usage:: | ||
2826 | 115 | |||
2827 | 116 | # import some common context generates from charmhelpers | ||
2828 | 117 | from charmhelpers.contrib.openstack import context | ||
2829 | 118 | |||
2830 | 119 | # Create a renderer object for a specific OS release. | ||
2831 | 120 | configs = OSConfigRenderer(templates_dir='/tmp/templates', | ||
2832 | 121 | openstack_release='folsom') | ||
2833 | 122 | # register some config files with context generators. | ||
2834 | 123 | configs.register(config_file='/etc/nova/nova.conf', | ||
2835 | 124 | contexts=[context.SharedDBContext(), | ||
2836 | 125 | context.AMQPContext()]) | ||
2837 | 126 | configs.register(config_file='/etc/nova/api-paste.ini', | ||
2838 | 127 | contexts=[context.IdentityServiceContext()]) | ||
2839 | 128 | configs.register(config_file='/etc/haproxy/haproxy.conf', | ||
2840 | 129 | contexts=[context.HAProxyContext()]) | ||
2841 | 130 | # write out a single config | ||
2842 | 131 | configs.write('/etc/nova/nova.conf') | ||
2843 | 132 | # write out all registered configs | ||
2844 | 133 | configs.write_all() | ||
2845 | 134 | |||
2846 | 135 | **OpenStack Releases and template loading** | ||
2847 | 136 | |||
2848 | 137 | When the object is instantiated, it is associated with a specific OS | ||
2849 | 138 | release. This dictates how the template loader will be constructed. | ||
2850 | 139 | |||
2851 | 140 | The constructed loader attempts to load the template from several places | ||
2852 | 141 | in the following order: | ||
2853 | 142 | - from the most recent OS release-specific template dir (if one exists) | ||
2854 | 143 | - the base templates_dir | ||
2855 | 144 | - a template directory shipped in the charm with this helper file. | ||
2856 | 145 | |||
2857 | 146 | For the example above, '/tmp/templates' contains the following structure:: | ||
2858 | 147 | |||
2859 | 148 | /tmp/templates/nova.conf | ||
2860 | 149 | /tmp/templates/api-paste.ini | ||
2861 | 150 | /tmp/templates/grizzly/api-paste.ini | ||
2862 | 151 | /tmp/templates/havana/api-paste.ini | ||
2863 | 152 | |||
2864 | 153 | Since it was registered with the grizzly release, it first seraches | ||
2865 | 154 | the grizzly directory for nova.conf, then the templates dir. | ||
2866 | 155 | |||
2867 | 156 | When writing api-paste.ini, it will find the template in the grizzly | ||
2868 | 157 | directory. | ||
2869 | 158 | |||
2870 | 159 | If the object were created with folsom, it would fall back to the | ||
2871 | 160 | base templates dir for its api-paste.ini template. | ||
2872 | 161 | |||
2873 | 162 | This system should help manage changes in config files through | ||
2874 | 163 | openstack releases, allowing charms to fall back to the most recently | ||
2875 | 164 | updated config template for a given release | ||
2876 | 165 | |||
2877 | 166 | The haproxy.conf, since it is not shipped in the templates dir, will | ||
2878 | 167 | be loaded from the module directory's template directory, eg | ||
2879 | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | ||
2880 | 169 | us to ship common templates (haproxy, apache) with the helpers. | ||
2881 | 170 | |||
2882 | 171 | **Context generators** | ||
2883 | 172 | |||
2884 | 173 | Context generators are used to generate template contexts during hook | ||
2885 | 174 | execution. Doing so may require inspecting service relations, charm | ||
2886 | 175 | config, etc. When registered, a config file is associated with a list | ||
2887 | 176 | of generators. When a template is rendered and written, all context | ||
2888 | 177 | generates are called in a chain to generate the context dictionary | ||
2889 | 178 | passed to the jinja2 template. See context.py for more info. | ||
2890 | 179 | """ | ||
2891 | 180 | def __init__(self, templates_dir, openstack_release): | ||
2892 | 181 | if not os.path.isdir(templates_dir): | ||
2893 | 182 | log('Could not locate templates dir %s' % templates_dir, | ||
2894 | 183 | level=ERROR) | ||
2895 | 184 | raise OSConfigException | ||
2896 | 185 | |||
2897 | 186 | self.templates_dir = templates_dir | ||
2898 | 187 | self.openstack_release = openstack_release | ||
2899 | 188 | self.templates = {} | ||
2900 | 189 | self._tmpl_env = None | ||
2901 | 190 | |||
2902 | 191 | if None in [Environment, ChoiceLoader, FileSystemLoader]: | ||
2903 | 192 | # if this code is running, the object is created pre-install hook. | ||
2904 | 193 | # jinja2 shouldn't get touched until the module is reloaded on next | ||
2905 | 194 | # hook execution, with proper jinja2 bits successfully imported. | ||
2906 | 195 | apt_install('python-jinja2') | ||
2907 | 196 | |||
2908 | 197 | def register(self, config_file, contexts): | ||
2909 | 198 | """ | ||
2910 | 199 | Register a config file with a list of context generators to be called | ||
2911 | 200 | during rendering. | ||
2912 | 201 | """ | ||
2913 | 202 | self.templates[config_file] = OSConfigTemplate(config_file=config_file, | ||
2914 | 203 | contexts=contexts) | ||
2915 | 204 | log('Registered config file: %s' % config_file, level=INFO) | ||
2916 | 205 | |||
2917 | 206 | def _get_tmpl_env(self): | ||
2918 | 207 | if not self._tmpl_env: | ||
2919 | 208 | loader = get_loader(self.templates_dir, self.openstack_release) | ||
2920 | 209 | self._tmpl_env = Environment(loader=loader) | ||
2921 | 210 | |||
2922 | 211 | def _get_template(self, template): | ||
2923 | 212 | self._get_tmpl_env() | ||
2924 | 213 | template = self._tmpl_env.get_template(template) | ||
2925 | 214 | log('Loaded template from %s' % template.filename, level=INFO) | ||
2926 | 215 | return template | ||
2927 | 216 | |||
2928 | 217 | def render(self, config_file): | ||
2929 | 218 | if config_file not in self.templates: | ||
2930 | 219 | log('Config not registered: %s' % config_file, level=ERROR) | ||
2931 | 220 | raise OSConfigException | ||
2932 | 221 | ctxt = self.templates[config_file].context() | ||
2933 | 222 | |||
2934 | 223 | _tmpl = os.path.basename(config_file) | ||
2935 | 224 | try: | ||
2936 | 225 | template = self._get_template(_tmpl) | ||
2937 | 226 | except exceptions.TemplateNotFound: | ||
2938 | 227 | # if no template is found with basename, try looking for it | ||
2939 | 228 | # using a munged full path, eg: | ||
2940 | 229 | # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf | ||
2941 | 230 | _tmpl = '_'.join(config_file.split('/')[1:]) | ||
2942 | 231 | try: | ||
2943 | 232 | template = self._get_template(_tmpl) | ||
2944 | 233 | except exceptions.TemplateNotFound as e: | ||
2945 | 234 | log('Could not load template from %s by %s or %s.' % | ||
2946 | 235 | (self.templates_dir, os.path.basename(config_file), _tmpl), | ||
2947 | 236 | level=ERROR) | ||
2948 | 237 | raise e | ||
2949 | 238 | |||
2950 | 239 | log('Rendering from template: %s' % _tmpl, level=INFO) | ||
2951 | 240 | return template.render(ctxt) | ||
2952 | 241 | |||
2953 | 242 | def write(self, config_file): | ||
2954 | 243 | """ | ||
2955 | 244 | Write a single config file, raises if config file is not registered. | ||
2956 | 245 | """ | ||
2957 | 246 | if config_file not in self.templates: | ||
2958 | 247 | log('Config not registered: %s' % config_file, level=ERROR) | ||
2959 | 248 | raise OSConfigException | ||
2960 | 249 | |||
2961 | 250 | _out = self.render(config_file) | ||
2962 | 251 | |||
2963 | 252 | with open(config_file, 'wb') as out: | ||
2964 | 253 | out.write(_out) | ||
2965 | 254 | |||
2966 | 255 | log('Wrote template %s.' % config_file, level=INFO) | ||
2967 | 256 | |||
2968 | 257 | def write_all(self): | ||
2969 | 258 | """ | ||
2970 | 259 | Write out all registered config files. | ||
2971 | 260 | """ | ||
2972 | 261 | [self.write(k) for k in six.iterkeys(self.templates)] | ||
2973 | 262 | |||
2974 | 263 | def set_release(self, openstack_release): | ||
2975 | 264 | """ | ||
2976 | 265 | Resets the template environment and generates a new template loader | ||
2977 | 266 | based on a the new openstack release. | ||
2978 | 267 | """ | ||
2979 | 268 | self._tmpl_env = None | ||
2980 | 269 | self.openstack_release = openstack_release | ||
2981 | 270 | self._get_tmpl_env() | ||
2982 | 271 | |||
2983 | 272 | def complete_contexts(self): | ||
2984 | 273 | ''' | ||
2985 | 274 | Returns a list of context interfaces that yield a complete context. | ||
2986 | 275 | ''' | ||
2987 | 276 | interfaces = [] | ||
2988 | 277 | [interfaces.extend(i.complete_contexts()) | ||
2989 | 278 | for i in six.itervalues(self.templates)] | ||
2990 | 279 | return interfaces | ||
2991 | 0 | 280 | ||
2992 | === added file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
2993 | --- hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000 | |||
2994 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-01-15 16:18:44 +0000 | |||
2995 | @@ -0,0 +1,625 @@ | |||
2996 | 1 | #!/usr/bin/python | ||
2997 | 2 | |||
2998 | 3 | # Common python helper functions used for OpenStack charms. | ||
2999 | 4 | from collections import OrderedDict | ||
3000 | 5 | from functools import wraps | ||
3001 | 6 | |||
3002 | 7 | import subprocess | ||
3003 | 8 | import json | ||
3004 | 9 | import os | ||
3005 | 10 | import socket | ||
3006 | 11 | import sys | ||
3007 | 12 | |||
3008 | 13 | import six | ||
3009 | 14 | import yaml | ||
3010 | 15 | |||
3011 | 16 | from charmhelpers.core.hookenv import ( | ||
3012 | 17 | config, | ||
3013 | 18 | log as juju_log, | ||
3014 | 19 | charm_dir, | ||
3015 | 20 | INFO, | ||
3016 | 21 | relation_ids, | ||
3017 | 22 | relation_set | ||
3018 | 23 | ) | ||
3019 | 24 | |||
3020 | 25 | from charmhelpers.contrib.storage.linux.lvm import ( | ||
3021 | 26 | deactivate_lvm_volume_group, | ||
3022 | 27 | is_lvm_physical_volume, | ||
3023 | 28 | remove_lvm_physical_volume, | ||
3024 | 29 | ) | ||
3025 | 30 | |||
3026 | 31 | from charmhelpers.contrib.network.ip import ( | ||
3027 | 32 | get_ipv6_addr | ||
3028 | 33 | ) | ||
3029 | 34 | |||
3030 | 35 | from charmhelpers.core.host import lsb_release, mounts, umount | ||
3031 | 36 | from charmhelpers.fetch import apt_install, apt_cache, install_remote | ||
3032 | 37 | from charmhelpers.contrib.python.packages import pip_install | ||
3033 | 38 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | ||
3034 | 39 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | ||
3035 | 40 | |||
3036 | 41 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" | ||
3037 | 42 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' | ||
3038 | 43 | |||
3039 | 44 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | ||
3040 | 45 | 'restricted main multiverse universe') | ||
3041 | 46 | |||
3042 | 47 | |||
3043 | 48 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | ||
3044 | 49 | ('oneiric', 'diablo'), | ||
3045 | 50 | ('precise', 'essex'), | ||
3046 | 51 | ('quantal', 'folsom'), | ||
3047 | 52 | ('raring', 'grizzly'), | ||
3048 | 53 | ('saucy', 'havana'), | ||
3049 | 54 | ('trusty', 'icehouse'), | ||
3050 | 55 | ('utopic', 'juno'), | ||
3051 | 56 | ('vivid', 'kilo'), | ||
3052 | 57 | ]) | ||
3053 | 58 | |||
3054 | 59 | |||
3055 | 60 | OPENSTACK_CODENAMES = OrderedDict([ | ||
3056 | 61 | ('2011.2', 'diablo'), | ||
3057 | 62 | ('2012.1', 'essex'), | ||
3058 | 63 | ('2012.2', 'folsom'), | ||
3059 | 64 | ('2013.1', 'grizzly'), | ||
3060 | 65 | ('2013.2', 'havana'), | ||
3061 | 66 | ('2014.1', 'icehouse'), | ||
3062 | 67 | ('2014.2', 'juno'), | ||
3063 | 68 | ('2015.1', 'kilo'), | ||
3064 | 69 | ]) | ||
3065 | 70 | |||
3066 | 71 | # The ugly duckling | ||
3067 | 72 | SWIFT_CODENAMES = OrderedDict([ | ||
3068 | 73 | ('1.4.3', 'diablo'), | ||
3069 | 74 | ('1.4.8', 'essex'), | ||
3070 | 75 | ('1.7.4', 'folsom'), | ||
3071 | 76 | ('1.8.0', 'grizzly'), | ||
3072 | 77 | ('1.7.7', 'grizzly'), | ||
3073 | 78 | ('1.7.6', 'grizzly'), | ||
3074 | 79 | ('1.10.0', 'havana'), | ||
3075 | 80 | ('1.9.1', 'havana'), | ||
3076 | 81 | ('1.9.0', 'havana'), | ||
3077 | 82 | ('1.13.1', 'icehouse'), | ||
3078 | 83 | ('1.13.0', 'icehouse'), | ||
3079 | 84 | ('1.12.0', 'icehouse'), | ||
3080 | 85 | ('1.11.0', 'icehouse'), | ||
3081 | 86 | ('2.0.0', 'juno'), | ||
3082 | 87 | ('2.1.0', 'juno'), | ||
3083 | 88 | ('2.2.0', 'juno'), | ||
3084 | 89 | ('2.2.1', 'kilo'), | ||
3085 | 90 | ]) | ||
3086 | 91 | |||
3087 | 92 | DEFAULT_LOOPBACK_SIZE = '5G' | ||
3088 | 93 | |||
3089 | 94 | |||
3090 | 95 | def error_out(msg): | ||
3091 | 96 | juju_log("FATAL ERROR: %s" % msg, level='ERROR') | ||
3092 | 97 | sys.exit(1) | ||
3093 | 98 | |||
3094 | 99 | |||
3095 | 100 | def get_os_codename_install_source(src): | ||
3096 | 101 | '''Derive OpenStack release codename from a given installation source.''' | ||
3097 | 102 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3098 | 103 | rel = '' | ||
3099 | 104 | if src is None: | ||
3100 | 105 | return rel | ||
3101 | 106 | if src in ['distro', 'distro-proposed']: | ||
3102 | 107 | try: | ||
3103 | 108 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | ||
3104 | 109 | except KeyError: | ||
3105 | 110 | e = 'Could not derive openstack release for '\ | ||
3106 | 111 | 'this Ubuntu release: %s' % ubuntu_rel | ||
3107 | 112 | error_out(e) | ||
3108 | 113 | return rel | ||
3109 | 114 | |||
3110 | 115 | if src.startswith('cloud:'): | ||
3111 | 116 | ca_rel = src.split(':')[1] | ||
3112 | 117 | ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] | ||
3113 | 118 | return ca_rel | ||
3114 | 119 | |||
3115 | 120 | # Best guess match based on deb string provided | ||
3116 | 121 | if src.startswith('deb') or src.startswith('ppa'): | ||
3117 | 122 | for k, v in six.iteritems(OPENSTACK_CODENAMES): | ||
3118 | 123 | if v in src: | ||
3119 | 124 | return v | ||
3120 | 125 | |||
3121 | 126 | |||
3122 | 127 | def get_os_version_install_source(src): | ||
3123 | 128 | codename = get_os_codename_install_source(src) | ||
3124 | 129 | return get_os_version_codename(codename) | ||
3125 | 130 | |||
3126 | 131 | |||
3127 | 132 | def get_os_codename_version(vers): | ||
3128 | 133 | '''Determine OpenStack codename from version number.''' | ||
3129 | 134 | try: | ||
3130 | 135 | return OPENSTACK_CODENAMES[vers] | ||
3131 | 136 | except KeyError: | ||
3132 | 137 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3133 | 138 | error_out(e) | ||
3134 | 139 | |||
3135 | 140 | |||
3136 | 141 | def get_os_version_codename(codename): | ||
3137 | 142 | '''Determine OpenStack version number from codename.''' | ||
3138 | 143 | for k, v in six.iteritems(OPENSTACK_CODENAMES): | ||
3139 | 144 | if v == codename: | ||
3140 | 145 | return k | ||
3141 | 146 | e = 'Could not derive OpenStack version for '\ | ||
3142 | 147 | 'codename: %s' % codename | ||
3143 | 148 | error_out(e) | ||
3144 | 149 | |||
3145 | 150 | |||
3146 | 151 | def get_os_codename_package(package, fatal=True): | ||
3147 | 152 | '''Derive OpenStack release codename from an installed package.''' | ||
3148 | 153 | import apt_pkg as apt | ||
3149 | 154 | |||
3150 | 155 | cache = apt_cache() | ||
3151 | 156 | |||
3152 | 157 | try: | ||
3153 | 158 | pkg = cache[package] | ||
3154 | 159 | except: | ||
3155 | 160 | if not fatal: | ||
3156 | 161 | return None | ||
3157 | 162 | # the package is unknown to the current apt cache. | ||
3158 | 163 | e = 'Could not determine version of package with no installation '\ | ||
3159 | 164 | 'candidate: %s' % package | ||
3160 | 165 | error_out(e) | ||
3161 | 166 | |||
3162 | 167 | if not pkg.current_ver: | ||
3163 | 168 | if not fatal: | ||
3164 | 169 | return None | ||
3165 | 170 | # package is known, but no version is currently installed. | ||
3166 | 171 | e = 'Could not determine version of uninstalled package: %s' % package | ||
3167 | 172 | error_out(e) | ||
3168 | 173 | |||
3169 | 174 | vers = apt.upstream_version(pkg.current_ver.ver_str) | ||
3170 | 175 | |||
3171 | 176 | try: | ||
3172 | 177 | if 'swift' in pkg.name: | ||
3173 | 178 | swift_vers = vers[:5] | ||
3174 | 179 | if swift_vers not in SWIFT_CODENAMES: | ||
3175 | 180 | # Deal with 1.10.0 upward | ||
3176 | 181 | swift_vers = vers[:6] | ||
3177 | 182 | return SWIFT_CODENAMES[swift_vers] | ||
3178 | 183 | else: | ||
3179 | 184 | vers = vers[:6] | ||
3180 | 185 | return OPENSTACK_CODENAMES[vers] | ||
3181 | 186 | except KeyError: | ||
3182 | 187 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3183 | 188 | error_out(e) | ||
3184 | 189 | |||
3185 | 190 | |||
3186 | 191 | def get_os_version_package(pkg, fatal=True): | ||
3187 | 192 | '''Derive OpenStack version number from an installed package.''' | ||
3188 | 193 | codename = get_os_codename_package(pkg, fatal=fatal) | ||
3189 | 194 | |||
3190 | 195 | if not codename: | ||
3191 | 196 | return None | ||
3192 | 197 | |||
3193 | 198 | if 'swift' in pkg: | ||
3194 | 199 | vers_map = SWIFT_CODENAMES | ||
3195 | 200 | else: | ||
3196 | 201 | vers_map = OPENSTACK_CODENAMES | ||
3197 | 202 | |||
3198 | 203 | for version, cname in six.iteritems(vers_map): | ||
3199 | 204 | if cname == codename: | ||
3200 | 205 | return version | ||
3201 | 206 | # e = "Could not determine OpenStack version for package: %s" % pkg | ||
3202 | 207 | # error_out(e) | ||
3203 | 208 | |||
3204 | 209 | |||
3205 | 210 | os_rel = None | ||
3206 | 211 | |||
3207 | 212 | |||
3208 | 213 | def os_release(package, base='essex'): | ||
3209 | 214 | ''' | ||
3210 | 215 | Returns OpenStack release codename from a cached global. | ||
3211 | 216 | If the codename can not be determined from either an installed package or | ||
3212 | 217 | the installation source, the earliest release supported by the charm should | ||
3213 | 218 | be returned. | ||
3214 | 219 | ''' | ||
3215 | 220 | global os_rel | ||
3216 | 221 | if os_rel: | ||
3217 | 222 | return os_rel | ||
3218 | 223 | os_rel = (get_os_codename_package(package, fatal=False) or | ||
3219 | 224 | get_os_codename_install_source(config('openstack-origin')) or | ||
3220 | 225 | base) | ||
3221 | 226 | return os_rel | ||
3222 | 227 | |||
3223 | 228 | |||
3224 | 229 | def import_key(keyid): | ||
3225 | 230 | cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ | ||
3226 | 231 | "--recv-keys %s" % keyid | ||
3227 | 232 | try: | ||
3228 | 233 | subprocess.check_call(cmd.split(' ')) | ||
3229 | 234 | except subprocess.CalledProcessError: | ||
3230 | 235 | error_out("Error importing repo key %s" % keyid) | ||
3231 | 236 | |||
3232 | 237 | |||
3233 | 238 | def configure_installation_source(rel): | ||
3234 | 239 | '''Configure apt installation source.''' | ||
3235 | 240 | if rel == 'distro': | ||
3236 | 241 | return | ||
3237 | 242 | elif rel == 'distro-proposed': | ||
3238 | 243 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3239 | 244 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
3240 | 245 | f.write(DISTRO_PROPOSED % ubuntu_rel) | ||
3241 | 246 | elif rel[:4] == "ppa:": | ||
3242 | 247 | src = rel | ||
3243 | 248 | subprocess.check_call(["add-apt-repository", "-y", src]) | ||
3244 | 249 | elif rel[:3] == "deb": | ||
3245 | 250 | l = len(rel.split('|')) | ||
3246 | 251 | if l == 2: | ||
3247 | 252 | src, key = rel.split('|') | ||
3248 | 253 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
3249 | 254 | import_key(key) | ||
3250 | 255 | elif l == 1: | ||
3251 | 256 | src = rel | ||
3252 | 257 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
3253 | 258 | f.write(src) | ||
3254 | 259 | elif rel[:6] == 'cloud:': | ||
3255 | 260 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3256 | 261 | rel = rel.split(':')[1] | ||
3257 | 262 | u_rel = rel.split('-')[0] | ||
3258 | 263 | ca_rel = rel.split('-')[1] | ||
3259 | 264 | |||
3260 | 265 | if u_rel != ubuntu_rel: | ||
3261 | 266 | e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ | ||
3262 | 267 | 'version (%s)' % (ca_rel, ubuntu_rel) | ||
3263 | 268 | error_out(e) | ||
3264 | 269 | |||
3265 | 270 | if 'staging' in ca_rel: | ||
3266 | 271 | # staging is just a regular PPA. | ||
3267 | 272 | os_rel = ca_rel.split('/')[0] | ||
3268 | 273 | ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel | ||
3269 | 274 | cmd = 'add-apt-repository -y %s' % ppa | ||
3270 | 275 | subprocess.check_call(cmd.split(' ')) | ||
3271 | 276 | return | ||
3272 | 277 | |||
3273 | 278 | # map charm config options to actual archive pockets. | ||
3274 | 279 | pockets = { | ||
3275 | 280 | 'folsom': 'precise-updates/folsom', | ||
3276 | 281 | 'folsom/updates': 'precise-updates/folsom', | ||
3277 | 282 | 'folsom/proposed': 'precise-proposed/folsom', | ||
3278 | 283 | 'grizzly': 'precise-updates/grizzly', | ||
3279 | 284 | 'grizzly/updates': 'precise-updates/grizzly', | ||
3280 | 285 | 'grizzly/proposed': 'precise-proposed/grizzly', | ||
3281 | 286 | 'havana': 'precise-updates/havana', | ||
3282 | 287 | 'havana/updates': 'precise-updates/havana', | ||
3283 | 288 | 'havana/proposed': 'precise-proposed/havana', | ||
3284 | 289 | 'icehouse': 'precise-updates/icehouse', | ||
3285 | 290 | 'icehouse/updates': 'precise-updates/icehouse', | ||
3286 | 291 | 'icehouse/proposed': 'precise-proposed/icehouse', | ||
3287 | 292 | 'juno': 'trusty-updates/juno', | ||
3288 | 293 | 'juno/updates': 'trusty-updates/juno', | ||
3289 | 294 | 'juno/proposed': 'trusty-proposed/juno', | ||
3290 | 295 | 'kilo': 'trusty-updates/kilo', | ||
3291 | 296 | 'kilo/updates': 'trusty-updates/kilo', | ||
3292 | 297 | 'kilo/proposed': 'trusty-proposed/kilo', | ||
3293 | 298 | } | ||
3294 | 299 | |||
3295 | 300 | try: | ||
3296 | 301 | pocket = pockets[ca_rel] | ||
3297 | 302 | except KeyError: | ||
3298 | 303 | e = 'Invalid Cloud Archive release specified: %s' % rel | ||
3299 | 304 | error_out(e) | ||
3300 | 305 | |||
3301 | 306 | src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) | ||
3302 | 307 | apt_install('ubuntu-cloud-keyring', fatal=True) | ||
3303 | 308 | |||
3304 | 309 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: | ||
3305 | 310 | f.write(src) | ||
3306 | 311 | else: | ||
3307 | 312 | error_out("Invalid openstack-release specified: %s" % rel) | ||
3308 | 313 | |||
3309 | 314 | |||
3310 | 315 | def save_script_rc(script_path="scripts/scriptrc", **env_vars): | ||
3311 | 316 | """ | ||
3312 | 317 | Write an rc file in the charm-delivered directory containing | ||
3313 | 318 | exported environment variables provided by env_vars. Any charm scripts run | ||
3314 | 319 | outside the juju hook environment can source this scriptrc to obtain | ||
3315 | 320 | updated config information necessary to perform health checks or | ||
3316 | 321 | service changes. | ||
3317 | 322 | """ | ||
3318 | 323 | juju_rc_path = "%s/%s" % (charm_dir(), script_path) | ||
3319 | 324 | if not os.path.exists(os.path.dirname(juju_rc_path)): | ||
3320 | 325 | os.mkdir(os.path.dirname(juju_rc_path)) | ||
3321 | 326 | with open(juju_rc_path, 'wb') as rc_script: | ||
3322 | 327 | rc_script.write( | ||
3323 | 328 | "#!/bin/bash\n") | ||
3324 | 329 | [rc_script.write('export %s=%s\n' % (u, p)) | ||
3325 | 330 | for u, p in six.iteritems(env_vars) if u != "script_path"] | ||
3326 | 331 | |||
3327 | 332 | |||
3328 | 333 | def openstack_upgrade_available(package): | ||
3329 | 334 | """ | ||
3330 | 335 | Determines if an OpenStack upgrade is available from installation | ||
3331 | 336 | source, based on version of installed package. | ||
3332 | 337 | |||
3333 | 338 | :param package: str: Name of installed package. | ||
3334 | 339 | |||
3335 | 340 | :returns: bool: : Returns True if configured installation source offers | ||
3336 | 341 | a newer version of package. | ||
3337 | 342 | |||
3338 | 343 | """ | ||
3339 | 344 | |||
3340 | 345 | import apt_pkg as apt | ||
3341 | 346 | src = config('openstack-origin') | ||
3342 | 347 | cur_vers = get_os_version_package(package) | ||
3343 | 348 | available_vers = get_os_version_install_source(src) | ||
3344 | 349 | apt.init() | ||
3345 | 350 | return apt.version_compare(available_vers, cur_vers) == 1 | ||
3346 | 351 | |||
3347 | 352 | |||
3348 | 353 | def ensure_block_device(block_device): | ||
3349 | 354 | ''' | ||
3350 | 355 | Confirm block_device, create as loopback if necessary. | ||
3351 | 356 | |||
3352 | 357 | :param block_device: str: Full path of block device to ensure. | ||
3353 | 358 | |||
3354 | 359 | :returns: str: Full path of ensured block device. | ||
3355 | 360 | ''' | ||
3356 | 361 | _none = ['None', 'none', None] | ||
3357 | 362 | if (block_device in _none): | ||
3358 | 363 | error_out('prepare_storage(): Missing required input: block_device=%s.' | ||
3359 | 364 | % block_device) | ||
3360 | 365 | |||
3361 | 366 | if block_device.startswith('/dev/'): | ||
3362 | 367 | bdev = block_device | ||
3363 | 368 | elif block_device.startswith('/'): | ||
3364 | 369 | _bd = block_device.split('|') | ||
3365 | 370 | if len(_bd) == 2: | ||
3366 | 371 | bdev, size = _bd | ||
3367 | 372 | else: | ||
3368 | 373 | bdev = block_device | ||
3369 | 374 | size = DEFAULT_LOOPBACK_SIZE | ||
3370 | 375 | bdev = ensure_loopback_device(bdev, size) | ||
3371 | 376 | else: | ||
3372 | 377 | bdev = '/dev/%s' % block_device | ||
3373 | 378 | |||
3374 | 379 | if not is_block_device(bdev): | ||
3375 | 380 | error_out('Failed to locate valid block device at %s' % bdev) | ||
3376 | 381 | |||
3377 | 382 | return bdev | ||
3378 | 383 | |||
3379 | 384 | |||
3380 | 385 | def clean_storage(block_device): | ||
3381 | 386 | ''' | ||
3382 | 387 | Ensures a block device is clean. That is: | ||
3383 | 388 | - unmounted | ||
3384 | 389 | - any lvm volume groups are deactivated | ||
3385 | 390 | - any lvm physical device signatures removed | ||
3386 | 391 | - partition table wiped | ||
3387 | 392 | |||
3388 | 393 | :param block_device: str: Full path to block device to clean. | ||
3389 | 394 | ''' | ||
3390 | 395 | for mp, d in mounts(): | ||
3391 | 396 | if d == block_device: | ||
3392 | 397 | juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % | ||
3393 | 398 | (d, mp), level=INFO) | ||
3394 | 399 | umount(mp, persist=True) | ||
3395 | 400 | |||
3396 | 401 | if is_lvm_physical_volume(block_device): | ||
3397 | 402 | deactivate_lvm_volume_group(block_device) | ||
3398 | 403 | remove_lvm_physical_volume(block_device) | ||
3399 | 404 | else: | ||
3400 | 405 | zap_disk(block_device) | ||
3401 | 406 | |||
3402 | 407 | |||
3403 | 408 | def is_ip(address): | ||
3404 | 409 | """ | ||
3405 | 410 | Returns True if address is a valid IP address. | ||
3406 | 411 | """ | ||
3407 | 412 | try: | ||
3408 | 413 | # Test to see if already an IPv4 address | ||
3409 | 414 | socket.inet_aton(address) | ||
3410 | 415 | return True | ||
3411 | 416 | except socket.error: | ||
3412 | 417 | return False | ||
3413 | 418 | |||
3414 | 419 | |||
3415 | 420 | def ns_query(address): | ||
3416 | 421 | try: | ||
3417 | 422 | import dns.resolver | ||
3418 | 423 | except ImportError: | ||
3419 | 424 | apt_install('python-dnspython') | ||
3420 | 425 | import dns.resolver | ||
3421 | 426 | |||
3422 | 427 | if isinstance(address, dns.name.Name): | ||
3423 | 428 | rtype = 'PTR' | ||
3424 | 429 | elif isinstance(address, six.string_types): | ||
3425 | 430 | rtype = 'A' | ||
3426 | 431 | else: | ||
3427 | 432 | return None | ||
3428 | 433 | |||
3429 | 434 | answers = dns.resolver.query(address, rtype) | ||
3430 | 435 | if answers: | ||
3431 | 436 | return str(answers[0]) | ||
3432 | 437 | return None | ||
3433 | 438 | |||
3434 | 439 | |||
3435 | 440 | def get_host_ip(hostname): | ||
3436 | 441 | """ | ||
3437 | 442 | Resolves the IP for a given hostname, or returns | ||
3438 | 443 | the input if it is already an IP. | ||
3439 | 444 | """ | ||
3440 | 445 | if is_ip(hostname): | ||
3441 | 446 | return hostname | ||
3442 | 447 | |||
3443 | 448 | return ns_query(hostname) | ||
3444 | 449 | |||
3445 | 450 | |||
3446 | 451 | def get_hostname(address, fqdn=True): | ||
3447 | 452 | """ | ||
3448 | 453 | Resolves hostname for given IP, or returns the input | ||
3449 | 454 | if it is already a hostname. | ||
3450 | 455 | """ | ||
3451 | 456 | if is_ip(address): | ||
3452 | 457 | try: | ||
3453 | 458 | import dns.reversename | ||
3454 | 459 | except ImportError: | ||
3455 | 460 | apt_install('python-dnspython') | ||
3456 | 461 | import dns.reversename | ||
3457 | 462 | |||
3458 | 463 | rev = dns.reversename.from_address(address) | ||
3459 | 464 | result = ns_query(rev) | ||
3460 | 465 | if not result: | ||
3461 | 466 | return None | ||
3462 | 467 | else: | ||
3463 | 468 | result = address | ||
3464 | 469 | |||
3465 | 470 | if fqdn: | ||
3466 | 471 | # strip trailing . | ||
3467 | 472 | if result.endswith('.'): | ||
3468 | 473 | return result[:-1] | ||
3469 | 474 | else: | ||
3470 | 475 | return result | ||
3471 | 476 | else: | ||
3472 | 477 | return result.split('.')[0] | ||
3473 | 478 | |||
3474 | 479 | |||
3475 | 480 | def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): | ||
3476 | 481 | mm_map = {} | ||
3477 | 482 | if os.path.isfile(mm_file): | ||
3478 | 483 | with open(mm_file, 'r') as f: | ||
3479 | 484 | mm_map = json.load(f) | ||
3480 | 485 | return mm_map | ||
3481 | 486 | |||
3482 | 487 | |||
3483 | 488 | def sync_db_with_multi_ipv6_addresses(database, database_user, | ||
3484 | 489 | relation_prefix=None): | ||
3485 | 490 | hosts = get_ipv6_addr(dynamic_only=False) | ||
3486 | 491 | |||
3487 | 492 | kwargs = {'database': database, | ||
3488 | 493 | 'username': database_user, | ||
3489 | 494 | 'hostname': json.dumps(hosts)} | ||
3490 | 495 | |||
3491 | 496 | if relation_prefix: | ||
3492 | 497 | for key in list(kwargs.keys()): | ||
3493 | 498 | kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] | ||
3494 | 499 | del kwargs[key] | ||
3495 | 500 | |||
3496 | 501 | for rid in relation_ids('shared-db'): | ||
3497 | 502 | relation_set(relation_id=rid, **kwargs) | ||
3498 | 503 | |||
3499 | 504 | |||
3500 | 505 | def os_requires_version(ostack_release, pkg): | ||
3501 | 506 | """ | ||
3502 | 507 | Decorator for hook to specify minimum supported release | ||
3503 | 508 | """ | ||
3504 | 509 | def wrap(f): | ||
3505 | 510 | @wraps(f) | ||
3506 | 511 | def wrapped_f(*args): | ||
3507 | 512 | if os_release(pkg) < ostack_release: | ||
3508 | 513 | raise Exception("This hook is not supported on releases" | ||
3509 | 514 | " before %s" % ostack_release) | ||
3510 | 515 | f(*args) | ||
3511 | 516 | return wrapped_f | ||
3512 | 517 | return wrap | ||
3513 | 518 | |||
3514 | 519 | |||
3515 | 520 | def git_install_requested(): | ||
3516 | 521 | """Returns true if openstack-origin-git is specified.""" | ||
3517 | 522 | return config('openstack-origin-git') != "None" | ||
3518 | 523 | |||
3519 | 524 | |||
3520 | 525 | requirements_dir = None | ||
3521 | 526 | |||
3522 | 527 | |||
3523 | 528 | def git_clone_and_install(file_name, core_project): | ||
3524 | 529 | """Clone/install all OpenStack repos specified in yaml config file.""" | ||
3525 | 530 | global requirements_dir | ||
3526 | 531 | |||
3527 | 532 | if file_name == "None": | ||
3528 | 533 | return | ||
3529 | 534 | |||
3530 | 535 | yaml_file = os.path.join(charm_dir(), file_name) | ||
3531 | 536 | |||
3532 | 537 | # clone/install the requirements project first | ||
3533 | 538 | installed = _git_clone_and_install_subset(yaml_file, | ||
3534 | 539 | whitelist=['requirements']) | ||
3535 | 540 | if 'requirements' not in installed: | ||
3536 | 541 | error_out('requirements git repository must be specified') | ||
3537 | 542 | |||
3538 | 543 | # clone/install all other projects except requirements and the core project | ||
3539 | 544 | blacklist = ['requirements', core_project] | ||
3540 | 545 | _git_clone_and_install_subset(yaml_file, blacklist=blacklist, | ||
3541 | 546 | update_requirements=True) | ||
3542 | 547 | |||
3543 | 548 | # clone/install the core project | ||
3544 | 549 | whitelist = [core_project] | ||
3545 | 550 | installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist, | ||
3546 | 551 | update_requirements=True) | ||
3547 | 552 | if core_project not in installed: | ||
3548 | 553 | error_out('{} git repository must be specified'.format(core_project)) | ||
3549 | 554 | |||
3550 | 555 | |||
3551 | 556 | def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[], | ||
3552 | 557 | update_requirements=False): | ||
3553 | 558 | """Clone/install subset of OpenStack repos specified in yaml config file.""" | ||
3554 | 559 | global requirements_dir | ||
3555 | 560 | installed = [] | ||
3556 | 561 | |||
3557 | 562 | with open(yaml_file, 'r') as fd: | ||
3558 | 563 | projects = yaml.load(fd) | ||
3559 | 564 | for proj, val in projects.items(): | ||
3560 | 565 | # The project subset is chosen based on the following 3 rules: | ||
3561 | 566 | # 1) If project is in blacklist, we don't clone/install it, period. | ||
3562 | 567 | # 2) If whitelist is empty, we clone/install everything else. | ||
3563 | 568 | # 3) If whitelist is not empty, we clone/install everything in the | ||
3564 | 569 | # whitelist. | ||
3565 | 570 | if proj in blacklist: | ||
3566 | 571 | continue | ||
3567 | 572 | if whitelist and proj not in whitelist: | ||
3568 | 573 | continue | ||
3569 | 574 | repo = val['repository'] | ||
3570 | 575 | branch = val['branch'] | ||
3571 | 576 | repo_dir = _git_clone_and_install_single(repo, branch, | ||
3572 | 577 | update_requirements) | ||
3573 | 578 | if proj == 'requirements': | ||
3574 | 579 | requirements_dir = repo_dir | ||
3575 | 580 | installed.append(proj) | ||
3576 | 581 | return installed | ||
3577 | 582 | |||
3578 | 583 | |||
3579 | 584 | def _git_clone_and_install_single(repo, branch, update_requirements=False): | ||
3580 | 585 | """Clone and install a single git repository.""" | ||
3581 | 586 | dest_parent_dir = "/mnt/openstack-git/" | ||
3582 | 587 | dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo)) | ||
3583 | 588 | |||
3584 | 589 | if not os.path.exists(dest_parent_dir): | ||
3585 | 590 | juju_log('Host dir not mounted at {}. ' | ||
3586 | 591 | 'Creating directory there instead.'.format(dest_parent_dir)) | ||
3587 | 592 | os.mkdir(dest_parent_dir) | ||
3588 | 593 | |||
3589 | 594 | if not os.path.exists(dest_dir): | ||
3590 | 595 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) | ||
3591 | 596 | repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch) | ||
3592 | 597 | else: | ||
3593 | 598 | repo_dir = dest_dir | ||
3594 | 599 | |||
3595 | 600 | if update_requirements: | ||
3596 | 601 | if not requirements_dir: | ||
3597 | 602 | error_out('requirements repo must be cloned before ' | ||
3598 | 603 | 'updating from global requirements.') | ||
3599 | 604 | _git_update_requirements(repo_dir, requirements_dir) | ||
3600 | 605 | |||
3601 | 606 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) | ||
3602 | 607 | pip_install(repo_dir) | ||
3603 | 608 | |||
3604 | 609 | return repo_dir | ||
3605 | 610 | |||
3606 | 611 | |||
3607 | 612 | def _git_update_requirements(package_dir, reqs_dir): | ||
3608 | 613 | """Update from global requirements. | ||
3609 | 614 | |||
3610 | 615 | Update an OpenStack git directory's requirements.txt and | ||
3611 | 616 | test-requirements.txt from global-requirements.txt.""" | ||
3612 | 617 | orig_dir = os.getcwd() | ||
3613 | 618 | os.chdir(reqs_dir) | ||
3614 | 619 | cmd = "python update.py {}".format(package_dir) | ||
3615 | 620 | try: | ||
3616 | 621 | subprocess.check_call(cmd.split(' ')) | ||
3617 | 622 | except subprocess.CalledProcessError: | ||
3618 | 623 | package = os.path.basename(package_dir) | ||
3619 | 624 | error_out("Error updating {} from global-requirements.txt".format(package)) | ||
3620 | 625 | os.chdir(orig_dir) | ||
3621 | 0 | 626 | ||
3622 | === added directory 'hooks/charmhelpers/contrib/python' | |||
3623 | === added file 'hooks/charmhelpers/contrib/python/__init__.py' | |||
3624 | === added file 'hooks/charmhelpers/contrib/python/packages.py' | |||
3625 | --- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000 | |||
3626 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-01-15 16:18:44 +0000 | |||
3627 | @@ -0,0 +1,77 @@ | |||
3628 | 1 | #!/usr/bin/env python | ||
3629 | 2 | # coding: utf-8 | ||
3630 | 3 | |||
3631 | 4 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
3632 | 5 | |||
3633 | 6 | from charmhelpers.fetch import apt_install, apt_update | ||
3634 | 7 | from charmhelpers.core.hookenv import log | ||
3635 | 8 | |||
3636 | 9 | try: | ||
3637 | 10 | from pip import main as pip_execute | ||
3638 | 11 | except ImportError: | ||
3639 | 12 | apt_update() | ||
3640 | 13 | apt_install('python-pip') | ||
3641 | 14 | from pip import main as pip_execute | ||
3642 | 15 | |||
3643 | 16 | |||
3644 | 17 | def parse_options(given, available): | ||
3645 | 18 | """Given a set of options, check if available""" | ||
3646 | 19 | for key, value in sorted(given.items()): | ||
3647 | 20 | if key in available: | ||
3648 | 21 | yield "--{0}={1}".format(key, value) | ||
3649 | 22 | |||
3650 | 23 | |||
3651 | 24 | def pip_install_requirements(requirements, **options): | ||
3652 | 25 | """Install a requirements file """ | ||
3653 | 26 | command = ["install"] | ||
3654 | 27 | |||
3655 | 28 | available_options = ('proxy', 'src', 'log', ) | ||
3656 | 29 | for option in parse_options(options, available_options): | ||
3657 | 30 | command.append(option) | ||
3658 | 31 | |||
3659 | 32 | command.append("-r {0}".format(requirements)) | ||
3660 | 33 | log("Installing from file: {} with options: {}".format(requirements, | ||
3661 | 34 | command)) | ||
3662 | 35 | pip_execute(command) | ||
3663 | 36 | |||
3664 | 37 | |||
3665 | 38 | def pip_install(package, fatal=False, **options): | ||
3666 | 39 | """Install a python package""" | ||
3667 | 40 | command = ["install"] | ||
3668 | 41 | |||
3669 | 42 | available_options = ('proxy', 'src', 'log', "index-url", ) | ||
3670 | 43 | for option in parse_options(options, available_options): | ||
3671 | 44 | command.append(option) | ||
3672 | 45 | |||
3673 | 46 | if isinstance(package, list): | ||
3674 | 47 | command.extend(package) | ||
3675 | 48 | else: | ||
3676 | 49 | command.append(package) | ||
3677 | 50 | |||
3678 | 51 | log("Installing {} package with options: {}".format(package, | ||
3679 | 52 | command)) | ||
3680 | 53 | pip_execute(command) | ||
3681 | 54 | |||
3682 | 55 | |||
3683 | 56 | def pip_uninstall(package, **options): | ||
3684 | 57 | """Uninstall a python package""" | ||
3685 | 58 | command = ["uninstall", "-q", "-y"] | ||
3686 | 59 | |||
3687 | 60 | available_options = ('proxy', 'log', ) | ||
3688 | 61 | for option in parse_options(options, available_options): | ||
3689 | 62 | command.append(option) | ||
3690 | 63 | |||
3691 | 64 | if isinstance(package, list): | ||
3692 | 65 | command.extend(package) | ||
3693 | 66 | else: | ||
3694 | 67 | command.append(package) | ||
3695 | 68 | |||
3696 | 69 | log("Uninstalling {} package with options: {}".format(package, | ||
3697 | 70 | command)) | ||
3698 | 71 | pip_execute(command) | ||
3699 | 72 | |||
3700 | 73 | |||
3701 | 74 | def pip_list(): | ||
3702 | 75 | """Returns the list of current python installed packages | ||
3703 | 76 | """ | ||
3704 | 77 | return pip_execute(["list"]) | ||
3705 | 0 | 78 | ||
3706 | === added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
3707 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000 | |||
3708 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-01-15 16:18:44 +0000 | |||
3709 | @@ -0,0 +1,428 @@ | |||
3710 | 1 | # | ||
3711 | 2 | # Copyright 2012 Canonical Ltd. | ||
3712 | 3 | # | ||
3713 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
3714 | 5 | # | ||
3715 | 6 | # Authors: | ||
3716 | 7 | # James Page <james.page@ubuntu.com> | ||
3717 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
3718 | 9 | # | ||
3719 | 10 | |||
3720 | 11 | import os | ||
3721 | 12 | import shutil | ||
3722 | 13 | import json | ||
3723 | 14 | import time | ||
3724 | 15 | |||
3725 | 16 | from subprocess import ( | ||
3726 | 17 | check_call, | ||
3727 | 18 | check_output, | ||
3728 | 19 | CalledProcessError, | ||
3729 | 20 | ) | ||
3730 | 21 | from charmhelpers.core.hookenv import ( | ||
3731 | 22 | relation_get, | ||
3732 | 23 | relation_ids, | ||
3733 | 24 | related_units, | ||
3734 | 25 | log, | ||
3735 | 26 | DEBUG, | ||
3736 | 27 | INFO, | ||
3737 | 28 | WARNING, | ||
3738 | 29 | ERROR, | ||
3739 | 30 | ) | ||
3740 | 31 | from charmhelpers.core.host import ( | ||
3741 | 32 | mount, | ||
3742 | 33 | mounts, | ||
3743 | 34 | service_start, | ||
3744 | 35 | service_stop, | ||
3745 | 36 | service_running, | ||
3746 | 37 | umount, | ||
3747 | 38 | ) | ||
3748 | 39 | from charmhelpers.fetch import ( | ||
3749 | 40 | apt_install, | ||
3750 | 41 | ) | ||
3751 | 42 | |||
3752 | 43 | KEYRING = '/etc/ceph/ceph.client.{}.keyring' | ||
3753 | 44 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | ||
3754 | 45 | |||
3755 | 46 | CEPH_CONF = """[global] | ||
3756 | 47 | auth supported = {auth} | ||
3757 | 48 | keyring = {keyring} | ||
3758 | 49 | mon host = {mon_hosts} | ||
3759 | 50 | log to syslog = {use_syslog} | ||
3760 | 51 | err to syslog = {use_syslog} | ||
3761 | 52 | clog to syslog = {use_syslog} | ||
3762 | 53 | """ | ||
3763 | 54 | |||
3764 | 55 | |||
3765 | 56 | def install(): | ||
3766 | 57 | """Basic Ceph client installation.""" | ||
3767 | 58 | ceph_dir = "/etc/ceph" | ||
3768 | 59 | if not os.path.exists(ceph_dir): | ||
3769 | 60 | os.mkdir(ceph_dir) | ||
3770 | 61 | |||
3771 | 62 | apt_install('ceph-common', fatal=True) | ||
3772 | 63 | |||
3773 | 64 | |||
3774 | 65 | def rbd_exists(service, pool, rbd_img): | ||
3775 | 66 | """Check to see if a RADOS block device exists.""" | ||
3776 | 67 | try: | ||
3777 | 68 | out = check_output(['rbd', 'list', '--id', | ||
3778 | 69 | service, '--pool', pool]).decode('UTF-8') | ||
3779 | 70 | except CalledProcessError: | ||
3780 | 71 | return False | ||
3781 | 72 | |||
3782 | 73 | return rbd_img in out | ||
3783 | 74 | |||
3784 | 75 | |||
3785 | 76 | def create_rbd_image(service, pool, image, sizemb): | ||
3786 | 77 | """Create a new RADOS block device.""" | ||
3787 | 78 | cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, | ||
3788 | 79 | '--pool', pool] | ||
3789 | 80 | check_call(cmd) | ||
3790 | 81 | |||
3791 | 82 | |||
3792 | 83 | def pool_exists(service, name): | ||
3793 | 84 | """Check to see if a RADOS pool already exists.""" | ||
3794 | 85 | try: | ||
3795 | 86 | out = check_output(['rados', '--id', service, | ||
3796 | 87 | 'lspools']).decode('UTF-8') | ||
3797 | 88 | except CalledProcessError: | ||
3798 | 89 | return False | ||
3799 | 90 | |||
3800 | 91 | return name in out | ||
3801 | 92 | |||
3802 | 93 | |||
3803 | 94 | def get_osds(service): | ||
3804 | 95 | """Return a list of all Ceph Object Storage Daemons currently in the | ||
3805 | 96 | cluster. | ||
3806 | 97 | """ | ||
3807 | 98 | version = ceph_version() | ||
3808 | 99 | if version and version >= '0.56': | ||
3809 | 100 | return json.loads(check_output(['ceph', '--id', service, | ||
3810 | 101 | 'osd', 'ls', | ||
3811 | 102 | '--format=json']).decode('UTF-8')) | ||
3812 | 103 | |||
3813 | 104 | return None | ||
3814 | 105 | |||
3815 | 106 | |||
3816 | 107 | def create_pool(service, name, replicas=3): | ||
3817 | 108 | """Create a new RADOS pool.""" | ||
3818 | 109 | if pool_exists(service, name): | ||
3819 | 110 | log("Ceph pool {} already exists, skipping creation".format(name), | ||
3820 | 111 | level=WARNING) | ||
3821 | 112 | return | ||
3822 | 113 | |||
3823 | 114 | # Calculate the number of placement groups based | ||
3824 | 115 | # on upstream recommended best practices. | ||
3825 | 116 | osds = get_osds(service) | ||
3826 | 117 | if osds: | ||
3827 | 118 | pgnum = (len(osds) * 100 // replicas) | ||
3828 | 119 | else: | ||
3829 | 120 | # NOTE(james-page): Default to 200 for older ceph versions | ||
3830 | 121 | # which don't support OSD query from cli | ||
3831 | 122 | pgnum = 200 | ||
3832 | 123 | |||
3833 | 124 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] | ||
3834 | 125 | check_call(cmd) | ||
3835 | 126 | |||
3836 | 127 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', | ||
3837 | 128 | str(replicas)] | ||
3838 | 129 | check_call(cmd) | ||
3839 | 130 | |||
3840 | 131 | |||
3841 | 132 | def delete_pool(service, name): | ||
3842 | 133 | """Delete a RADOS pool from ceph.""" | ||
3843 | 134 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, | ||
3844 | 135 | '--yes-i-really-really-mean-it'] | ||
3845 | 136 | check_call(cmd) | ||
3846 | 137 | |||
3847 | 138 | |||
3848 | 139 | def _keyfile_path(service): | ||
3849 | 140 | return KEYFILE.format(service) | ||
3850 | 141 | |||
3851 | 142 | |||
3852 | 143 | def _keyring_path(service): | ||
3853 | 144 | return KEYRING.format(service) | ||
3854 | 145 | |||
3855 | 146 | |||
3856 | 147 | def create_keyring(service, key): | ||
3857 | 148 | """Create a new Ceph keyring containing key.""" | ||
3858 | 149 | keyring = _keyring_path(service) | ||
3859 | 150 | if os.path.exists(keyring): | ||
3860 | 151 | log('Ceph keyring exists at %s.' % keyring, level=WARNING) | ||
3861 | 152 | return | ||
3862 | 153 | |||
3863 | 154 | cmd = ['ceph-authtool', keyring, '--create-keyring', | ||
3864 | 155 | '--name=client.{}'.format(service), '--add-key={}'.format(key)] | ||
3865 | 156 | check_call(cmd) | ||
3866 | 157 | log('Created new ceph keyring at %s.' % keyring, level=DEBUG) | ||
3867 | 158 | |||
3868 | 159 | |||
3869 | 160 | def delete_keyring(service): | ||
3870 | 161 | """Delete an existing Ceph keyring.""" | ||
3871 | 162 | keyring = _keyring_path(service) | ||
3872 | 163 | if not os.path.exists(keyring): | ||
3873 | 164 | log('Keyring does not exist at %s' % keyring, level=WARNING) | ||
3874 | 165 | return | ||
3875 | 166 | |||
3876 | 167 | os.remove(keyring) | ||
3877 | 168 | log('Deleted ring at %s.' % keyring, level=INFO) | ||
3878 | 169 | |||
3879 | 170 | |||
3880 | 171 | def create_key_file(service, key): | ||
3881 | 172 | """Create a file containing key.""" | ||
3882 | 173 | keyfile = _keyfile_path(service) | ||
3883 | 174 | if os.path.exists(keyfile): | ||
3884 | 175 | log('Keyfile exists at %s.' % keyfile, level=WARNING) | ||
3885 | 176 | return | ||
3886 | 177 | |||
3887 | 178 | with open(keyfile, 'w') as fd: | ||
3888 | 179 | fd.write(key) | ||
3889 | 180 | |||
3890 | 181 | log('Created new keyfile at %s.' % keyfile, level=INFO) | ||
3891 | 182 | |||
3892 | 183 | |||
3893 | 184 | def get_ceph_nodes(): | ||
3894 | 185 | """Query named relation 'ceph' to determine current nodes.""" | ||
3895 | 186 | hosts = [] | ||
3896 | 187 | for r_id in relation_ids('ceph'): | ||
3897 | 188 | for unit in related_units(r_id): | ||
3898 | 189 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | ||
3899 | 190 | |||
3900 | 191 | return hosts | ||
3901 | 192 | |||
3902 | 193 | |||
3903 | 194 | def configure(service, key, auth, use_syslog): | ||
3904 | 195 | """Perform basic configuration of Ceph.""" | ||
3905 | 196 | create_keyring(service, key) | ||
3906 | 197 | create_key_file(service, key) | ||
3907 | 198 | hosts = get_ceph_nodes() | ||
3908 | 199 | with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: | ||
3909 | 200 | ceph_conf.write(CEPH_CONF.format(auth=auth, | ||
3910 | 201 | keyring=_keyring_path(service), | ||
3911 | 202 | mon_hosts=",".join(map(str, hosts)), | ||
3912 | 203 | use_syslog=use_syslog)) | ||
3913 | 204 | modprobe('rbd') | ||
3914 | 205 | |||
3915 | 206 | |||
3916 | 207 | def image_mapped(name): | ||
3917 | 208 | """Determine whether a RADOS block device is mapped locally.""" | ||
3918 | 209 | try: | ||
3919 | 210 | out = check_output(['rbd', 'showmapped']).decode('UTF-8') | ||
3920 | 211 | except CalledProcessError: | ||
3921 | 212 | return False | ||
3922 | 213 | |||
3923 | 214 | return name in out | ||
3924 | 215 | |||
3925 | 216 | |||
3926 | 217 | def map_block_storage(service, pool, image): | ||
3927 | 218 | """Map a RADOS block device for local use.""" | ||
3928 | 219 | cmd = [ | ||
3929 | 220 | 'rbd', | ||
3930 | 221 | 'map', | ||
3931 | 222 | '{}/{}'.format(pool, image), | ||
3932 | 223 | '--user', | ||
3933 | 224 | service, | ||
3934 | 225 | '--secret', | ||
3935 | 226 | _keyfile_path(service), | ||
3936 | 227 | ] | ||
3937 | 228 | check_call(cmd) | ||
3938 | 229 | |||
3939 | 230 | |||
3940 | 231 | def filesystem_mounted(fs): | ||
3941 | 232 | """Determine whether a filesytems is already mounted.""" | ||
3942 | 233 | return fs in [f for f, m in mounts()] | ||
3943 | 234 | |||
3944 | 235 | |||
3945 | 236 | def make_filesystem(blk_device, fstype='ext4', timeout=10): | ||
3946 | 237 | """Make a new filesystem on the specified block device.""" | ||
3947 | 238 | count = 0 | ||
3948 | 239 | e_noent = os.errno.ENOENT | ||
3949 | 240 | while not os.path.exists(blk_device): | ||
3950 | 241 | if count >= timeout: | ||
3951 | 242 | log('Gave up waiting on block device %s' % blk_device, | ||
3952 | 243 | level=ERROR) | ||
3953 | 244 | raise IOError(e_noent, os.strerror(e_noent), blk_device) | ||
3954 | 245 | |||
3955 | 246 | log('Waiting for block device %s to appear' % blk_device, | ||
3956 | 247 | level=DEBUG) | ||
3957 | 248 | count += 1 | ||
3958 | 249 | time.sleep(1) | ||
3959 | 250 | else: | ||
3960 | 251 | log('Formatting block device %s as filesystem %s.' % | ||
3961 | 252 | (blk_device, fstype), level=INFO) | ||
3962 | 253 | check_call(['mkfs', '-t', fstype, blk_device]) | ||
3963 | 254 | |||
3964 | 255 | |||
3965 | 256 | def place_data_on_block_device(blk_device, data_src_dst): | ||
3966 | 257 | """Migrate data in data_src_dst to blk_device and then remount.""" | ||
3967 | 258 | # mount block device into /mnt | ||
3968 | 259 | mount(blk_device, '/mnt') | ||
3969 | 260 | # copy data to /mnt | ||
3970 | 261 | copy_files(data_src_dst, '/mnt') | ||
3971 | 262 | # umount block device | ||
3972 | 263 | umount('/mnt') | ||
3973 | 264 | # Grab user/group ID's from original source | ||
3974 | 265 | _dir = os.stat(data_src_dst) | ||
3975 | 266 | uid = _dir.st_uid | ||
3976 | 267 | gid = _dir.st_gid | ||
3977 | 268 | # re-mount where the data should originally be | ||
3978 | 269 | # TODO: persist is currently a NO-OP in core.host | ||
3979 | 270 | mount(blk_device, data_src_dst, persist=True) | ||
3980 | 271 | # ensure original ownership of new mount. | ||
3981 | 272 | os.chown(data_src_dst, uid, gid) | ||
3982 | 273 | |||
3983 | 274 | |||
3984 | 275 | # TODO: re-use | ||
3985 | 276 | def modprobe(module): | ||
3986 | 277 | """Load a kernel module and configure for auto-load on reboot.""" | ||
3987 | 278 | log('Loading kernel module', level=INFO) | ||
3988 | 279 | cmd = ['modprobe', module] | ||
3989 | 280 | check_call(cmd) | ||
3990 | 281 | with open('/etc/modules', 'r+') as modules: | ||
3991 | 282 | if module not in modules.read(): | ||
3992 | 283 | modules.write(module) | ||
3993 | 284 | |||
3994 | 285 | |||
3995 | 286 | def copy_files(src, dst, symlinks=False, ignore=None): | ||
3996 | 287 | """Copy files from src to dst.""" | ||
3997 | 288 | for item in os.listdir(src): | ||
3998 | 289 | s = os.path.join(src, item) | ||
3999 | 290 | d = os.path.join(dst, item) | ||
4000 | 291 | if os.path.isdir(s): | ||
4001 | 292 | shutil.copytree(s, d, symlinks, ignore) | ||
4002 | 293 | else: | ||
4003 | 294 | shutil.copy2(s, d) | ||
4004 | 295 | |||
4005 | 296 | |||
4006 | 297 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | ||
4007 | 298 | blk_device, fstype, system_services=[], | ||
4008 | 299 | replicas=3): | ||
4009 | 300 | """NOTE: This function must only be called from a single service unit for | ||
4010 | 301 | the same rbd_img otherwise data loss will occur. | ||
4011 | 302 | |||
4012 | 303 | Ensures given pool and RBD image exists, is mapped to a block device, | ||
4013 | 304 | and the device is formatted and mounted at the given mount_point. | ||
4014 | 305 | |||
4015 | 306 | If formatting a device for the first time, data existing at mount_point | ||
4016 | 307 | will be migrated to the RBD device before being re-mounted. | ||
4017 | 308 | |||
4018 | 309 | All services listed in system_services will be stopped prior to data | ||
4019 | 310 | migration and restarted when complete. | ||
4020 | 311 | """ | ||
4021 | 312 | # Ensure pool, RBD image, RBD mappings are in place. | ||
4022 | 313 | if not pool_exists(service, pool): | ||
4023 | 314 | log('Creating new pool {}.'.format(pool), level=INFO) | ||
4024 | 315 | create_pool(service, pool, replicas=replicas) | ||
4025 | 316 | |||
4026 | 317 | if not rbd_exists(service, pool, rbd_img): | ||
4027 | 318 | log('Creating RBD image ({}).'.format(rbd_img), level=INFO) | ||
4028 | 319 | create_rbd_image(service, pool, rbd_img, sizemb) | ||
4029 | 320 | |||
4030 | 321 | if not image_mapped(rbd_img): | ||
4031 | 322 | log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), | ||
4032 | 323 | level=INFO) | ||
4033 | 324 | map_block_storage(service, pool, rbd_img) | ||
4034 | 325 | |||
4035 | 326 | # make file system | ||
4036 | 327 | # TODO: What happens if for whatever reason this is run again and | ||
4037 | 328 | # the data is already in the rbd device and/or is mounted?? | ||
4038 | 329 | # When it is mounted already, it will fail to make the fs | ||
4039 | 330 | # XXX: This is really sketchy! Need to at least add an fstab entry | ||
4040 | 331 | # otherwise this hook will blow away existing data if its executed | ||
4041 | 332 | # after a reboot. | ||
4042 | 333 | if not filesystem_mounted(mount_point): | ||
4043 | 334 | make_filesystem(blk_device, fstype) | ||
4044 | 335 | |||
4045 | 336 | for svc in system_services: | ||
4046 | 337 | if service_running(svc): | ||
4047 | 338 | log('Stopping services {} prior to migrating data.' | ||
4048 | 339 | .format(svc), level=DEBUG) | ||
4049 | 340 | service_stop(svc) | ||
4050 | 341 | |||
4051 | 342 | place_data_on_block_device(blk_device, mount_point) | ||
4052 | 343 | |||
4053 | 344 | for svc in system_services: | ||
4054 | 345 | log('Starting service {} after migrating data.' | ||
4055 | 346 | .format(svc), level=DEBUG) | ||
4056 | 347 | service_start(svc) | ||
4057 | 348 | |||
4058 | 349 | |||
4059 | 350 | def ensure_ceph_keyring(service, user=None, group=None): | ||
4060 | 351 | """Ensures a ceph keyring is created for a named service and optionally | ||
4061 | 352 | ensures user and group ownership. | ||
4062 | 353 | |||
4063 | 354 | Returns False if no ceph key is available in relation state. | ||
4064 | 355 | """ | ||
4065 | 356 | key = None | ||
4066 | 357 | for rid in relation_ids('ceph'): | ||
4067 | 358 | for unit in related_units(rid): | ||
4068 | 359 | key = relation_get('key', rid=rid, unit=unit) | ||
4069 | 360 | if key: | ||
4070 | 361 | break | ||
4071 | 362 | |||
4072 | 363 | if not key: | ||
4073 | 364 | return False | ||
4074 | 365 | |||
4075 | 366 | create_keyring(service=service, key=key) | ||
4076 | 367 | keyring = _keyring_path(service) | ||
4077 | 368 | if user and group: | ||
4078 | 369 | check_call(['chown', '%s.%s' % (user, group), keyring]) | ||
4079 | 370 | |||
4080 | 371 | return True | ||
4081 | 372 | |||
4082 | 373 | |||
4083 | 374 | def ceph_version(): | ||
4084 | 375 | """Retrieve the local version of ceph.""" | ||
4085 | 376 | if os.path.exists('/usr/bin/ceph'): | ||
4086 | 377 | cmd = ['ceph', '-v'] | ||
4087 | 378 | output = check_output(cmd).decode('US-ASCII') | ||
4088 | 379 | output = output.split() | ||
4089 | 380 | if len(output) > 3: | ||
4090 | 381 | return output[2] | ||
4091 | 382 | else: | ||
4092 | 383 | return None | ||
4093 | 384 | else: | ||
4094 | 385 | return None | ||
4095 | 386 | |||
4096 | 387 | |||
4097 | 388 | class CephBrokerRq(object): | ||
4098 | 389 | """Ceph broker request. | ||
4099 | 390 | |||
4100 | 391 | Multiple operations can be added to a request and sent to the Ceph broker | ||
4101 | 392 | to be executed. | ||
4102 | 393 | |||
4103 | 394 | Request is json-encoded for sending over the wire. | ||
4104 | 395 | |||
4105 | 396 | The API is versioned and defaults to version 1. | ||
4106 | 397 | """ | ||
4107 | 398 | def __init__(self, api_version=1): | ||
4108 | 399 | self.api_version = api_version | ||
4109 | 400 | self.ops = [] | ||
4110 | 401 | |||
4111 | 402 | def add_op_create_pool(self, name, replica_count=3): | ||
4112 | 403 | self.ops.append({'op': 'create-pool', 'name': name, | ||
4113 | 404 | 'replicas': replica_count}) | ||
4114 | 405 | |||
4115 | 406 | @property | ||
4116 | 407 | def request(self): | ||
4117 | 408 | return json.dumps({'api-version': self.api_version, 'ops': self.ops}) | ||
4118 | 409 | |||
4119 | 410 | |||
4120 | 411 | class CephBrokerRsp(object): | ||
4121 | 412 | """Ceph broker response. | ||
4122 | 413 | |||
4123 | 414 | Response is json-decoded and contents provided as methods/properties. | ||
4124 | 415 | |||
4125 | 416 | The API is versioned and defaults to version 1. | ||
4126 | 417 | """ | ||
4127 | 418 | def __init__(self, encoded_rsp): | ||
4128 | 419 | self.api_version = None | ||
4129 | 420 | self.rsp = json.loads(encoded_rsp) | ||
4130 | 421 | |||
4131 | 422 | @property | ||
4132 | 423 | def exit_code(self): | ||
4133 | 424 | return self.rsp.get('exit-code') | ||
4134 | 425 | |||
4135 | 426 | @property | ||
4136 | 427 | def exit_msg(self): | ||
4137 | 428 | return self.rsp.get('stderr') | ||
4138 | 0 | 429 | ||
4139 | === added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
4140 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000 | |||
4141 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-15 16:18:44 +0000 | |||
4142 | @@ -0,0 +1,62 @@ | |||
4143 | 1 | import os | ||
4144 | 2 | import re | ||
4145 | 3 | from subprocess import ( | ||
4146 | 4 | check_call, | ||
4147 | 5 | check_output, | ||
4148 | 6 | ) | ||
4149 | 7 | |||
4150 | 8 | import six | ||
4151 | 9 | |||
4152 | 10 | |||
4153 | 11 | ################################################## | ||
4154 | 12 | # loopback device helpers. | ||
4155 | 13 | ################################################## | ||
4156 | 14 | def loopback_devices(): | ||
4157 | 15 | ''' | ||
4158 | 16 | Parse through 'losetup -a' output to determine currently mapped | ||
4159 | 17 | loopback devices. Output is expected to look like: | ||
4160 | 18 | |||
4161 | 19 | /dev/loop0: [0807]:961814 (/tmp/my.img) | ||
4162 | 20 | |||
4163 | 21 | :returns: dict: a dict mapping {loopback_dev: backing_file} | ||
4164 | 22 | ''' | ||
4165 | 23 | loopbacks = {} | ||
4166 | 24 | cmd = ['losetup', '-a'] | ||
4167 | 25 | devs = [d.strip().split(' ') for d in | ||
4168 | 26 | check_output(cmd).splitlines() if d != ''] | ||
4169 | 27 | for dev, _, f in devs: | ||
4170 | 28 | loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] | ||
4171 | 29 | return loopbacks | ||
4172 | 30 | |||
4173 | 31 | |||
4174 | 32 | def create_loopback(file_path): | ||
4175 | 33 | ''' | ||
4176 | 34 | Create a loopback device for a given backing file. | ||
4177 | 35 | |||
4178 | 36 | :returns: str: Full path to new loopback device (eg, /dev/loop0) | ||
4179 | 37 | ''' | ||
4180 | 38 | file_path = os.path.abspath(file_path) | ||
4181 | 39 | check_call(['losetup', '--find', file_path]) | ||
4182 | 40 | for d, f in six.iteritems(loopback_devices()): | ||
4183 | 41 | if f == file_path: | ||
4184 | 42 | return d | ||
4185 | 43 | |||
4186 | 44 | |||
4187 | 45 | def ensure_loopback_device(path, size): | ||
4188 | 46 | ''' | ||
4189 | 47 | Ensure a loopback device exists for a given backing file path and size. | ||
4190 | 48 | If it a loopback device is not mapped to file, a new one will be created. | ||
4191 | 49 | |||
4192 | 50 | TODO: Confirm size of found loopback device. | ||
4193 | 51 | |||
4194 | 52 | :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) | ||
4195 | 53 | ''' | ||
4196 | 54 | for d, f in six.iteritems(loopback_devices()): | ||
4197 | 55 | if f == path: | ||
4198 | 56 | return d | ||
4199 | 57 | |||
4200 | 58 | if not os.path.exists(path): | ||
4201 | 59 | cmd = ['truncate', '--size', size, path] | ||
4202 | 60 | check_call(cmd) | ||
4203 | 61 | |||
4204 | 62 | return create_loopback(path) | ||
4205 | 0 | 63 | ||
4206 | === added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py' | |||
4207 | --- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000 | |||
4208 | +++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2015-01-15 16:18:44 +0000 | |||
4209 | @@ -0,0 +1,89 @@ | |||
4210 | 1 | from subprocess import ( | ||
4211 | 2 | CalledProcessError, | ||
4212 | 3 | check_call, | ||
4213 | 4 | check_output, | ||
4214 | 5 | Popen, | ||
4215 | 6 | PIPE, | ||
4216 | 7 | ) | ||
4217 | 8 | |||
4218 | 9 | |||
4219 | 10 | ################################################## | ||
4220 | 11 | # LVM helpers. | ||
4221 | 12 | ################################################## | ||
4222 | 13 | def deactivate_lvm_volume_group(block_device): | ||
4223 | 14 | ''' | ||
4224 | 15 | Deactivate any volume gruop associated with an LVM physical volume. | ||
4225 | 16 | |||
4226 | 17 | :param block_device: str: Full path to LVM physical volume | ||
4227 | 18 | ''' | ||
4228 | 19 | vg = list_lvm_volume_group(block_device) | ||
4229 | 20 | if vg: | ||
4230 | 21 | cmd = ['vgchange', '-an', vg] | ||
4231 | 22 | check_call(cmd) | ||
4232 | 23 | |||
4233 | 24 | |||
4234 | 25 | def is_lvm_physical_volume(block_device): | ||
4235 | 26 | ''' | ||
4236 | 27 | Determine whether a block device is initialized as an LVM PV. | ||
4237 | 28 | |||
4238 | 29 | :param block_device: str: Full path of block device to inspect. | ||
4239 | 30 | |||
4240 | 31 | :returns: boolean: True if block device is a PV, False if not. | ||
4241 | 32 | ''' | ||
4242 | 33 | try: | ||
4243 | 34 | check_output(['pvdisplay', block_device]) | ||
4244 | 35 | return True | ||
4245 | 36 | except CalledProcessError: | ||
4246 | 37 | return False | ||
4247 | 38 | |||
4248 | 39 | |||
4249 | 40 | def remove_lvm_physical_volume(block_device): | ||
4250 | 41 | ''' | ||
4251 | 42 | Remove LVM PV signatures from a given block device. | ||
4252 | 43 | |||
4253 | 44 | :param block_device: str: Full path of block device to scrub. | ||
4254 | 45 | ''' | ||
4255 | 46 | p = Popen(['pvremove', '-ff', block_device], | ||
4256 | 47 | stdin=PIPE) | ||
4257 | 48 | p.communicate(input='y\n') | ||
4258 | 49 | |||
4259 | 50 | |||
4260 | 51 | def list_lvm_volume_group(block_device): | ||
4261 | 52 | ''' | ||
4262 | 53 | List LVM volume group associated with a given block device. | ||
4263 | 54 | |||
4264 | 55 | Assumes block device is a valid LVM PV. | ||
4265 | 56 | |||
4266 | 57 | :param block_device: str: Full path of block device to inspect. | ||
4267 | 58 | |||
4268 | 59 | :returns: str: Name of volume group associated with block device or None | ||
4269 | 60 | ''' | ||
4270 | 61 | vg = None | ||
4271 | 62 | pvd = check_output(['pvdisplay', block_device]).splitlines() | ||
4272 | 63 | for l in pvd: | ||
4273 | 64 | l = l.decode('UTF-8') | ||
4274 | 65 | if l.strip().startswith('VG Name'): | ||
4275 | 66 | vg = ' '.join(l.strip().split()[2:]) | ||
4276 | 67 | return vg | ||
4277 | 68 | |||
4278 | 69 | |||
4279 | 70 | def create_lvm_physical_volume(block_device): | ||
4280 | 71 | ''' | ||
4281 | 72 | Initialize a block device as an LVM physical volume. | ||
4282 | 73 | |||
4283 | 74 | :param block_device: str: Full path of block device to initialize. | ||
4284 | 75 | |||
4285 | 76 | ''' | ||
4286 | 77 | check_call(['pvcreate', block_device]) | ||
4287 | 78 | |||
4288 | 79 | |||
4289 | 80 | def create_lvm_volume_group(volume_group, block_device): | ||
4290 | 81 | ''' | ||
4291 | 82 | Create an LVM volume group backed by a given block device. | ||
4292 | 83 | |||
4293 | 84 | Assumes block device has already been initialized as an LVM PV. | ||
4294 | 85 | |||
4295 | 86 | :param volume_group: str: Name of volume group to create. | ||
4296 | 87 | :block_device: str: Full path of PV-initialized block device. | ||
4297 | 88 | ''' | ||
4298 | 89 | check_call(['vgcreate', volume_group, block_device]) | ||
4299 | 0 | 90 | ||
4300 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
4301 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-17 14:11:53 +0000 | |||
4302 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-01-15 16:18:44 +0000 | |||
4303 | @@ -30,7 +30,8 @@ | |||
4304 | 30 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up | 30 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
4305 | 31 | call(['sgdisk', '--zap-all', '--mbrtogpt', | 31 | call(['sgdisk', '--zap-all', '--mbrtogpt', |
4306 | 32 | '--clear', block_device]) | 32 | '--clear', block_device]) |
4308 | 33 | dev_end = check_output(['blockdev', '--getsz', block_device]) | 33 | dev_end = check_output(['blockdev', '--getsz', |
4309 | 34 | block_device]).decode('UTF-8') | ||
4310 | 34 | gpt_end = int(dev_end.split()[0]) - 100 | 35 | gpt_end = int(dev_end.split()[0]) - 100 |
4311 | 35 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | 36 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
4312 | 36 | 'bs=1M', 'count=1']) | 37 | 'bs=1M', 'count=1']) |
4313 | @@ -47,7 +48,7 @@ | |||
4314 | 47 | it doesn't. | 48 | it doesn't. |
4315 | 48 | ''' | 49 | ''' |
4316 | 49 | is_partition = bool(re.search(r".*[0-9]+\b", device)) | 50 | is_partition = bool(re.search(r".*[0-9]+\b", device)) |
4318 | 50 | out = check_output(['mount']) | 51 | out = check_output(['mount']).decode('UTF-8') |
4319 | 51 | if is_partition: | 52 | if is_partition: |
4320 | 52 | return bool(re.search(device + r"\b", out)) | 53 | return bool(re.search(device + r"\b", out)) |
4321 | 53 | return bool(re.search(device + r"[0-9]+\b", out)) | 54 | return bool(re.search(device + r"[0-9]+\b", out)) |
4322 | 54 | 55 | ||
4323 | === added file 'hooks/charmhelpers/core/decorators.py' | |||
4324 | --- hooks/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000 | |||
4325 | +++ hooks/charmhelpers/core/decorators.py 2015-01-15 16:18:44 +0000 | |||
4326 | @@ -0,0 +1,41 @@ | |||
4327 | 1 | # | ||
4328 | 2 | # Copyright 2014 Canonical Ltd. | ||
4329 | 3 | # | ||
4330 | 4 | # Authors: | ||
4331 | 5 | # Edward Hope-Morley <opentastic@gmail.com> | ||
4332 | 6 | # | ||
4333 | 7 | |||
4334 | 8 | import time | ||
4335 | 9 | |||
4336 | 10 | from charmhelpers.core.hookenv import ( | ||
4337 | 11 | log, | ||
4338 | 12 | INFO, | ||
4339 | 13 | ) | ||
4340 | 14 | |||
4341 | 15 | |||
4342 | 16 | def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): | ||
4343 | 17 | """If the decorated function raises exception exc_type, allow num_retries | ||
4344 | 18 | retry attempts before raise the exception. | ||
4345 | 19 | """ | ||
4346 | 20 | def _retry_on_exception_inner_1(f): | ||
4347 | 21 | def _retry_on_exception_inner_2(*args, **kwargs): | ||
4348 | 22 | retries = num_retries | ||
4349 | 23 | multiplier = 1 | ||
4350 | 24 | while True: | ||
4351 | 25 | try: | ||
4352 | 26 | return f(*args, **kwargs) | ||
4353 | 27 | except exc_type: | ||
4354 | 28 | if not retries: | ||
4355 | 29 | raise | ||
4356 | 30 | |||
4357 | 31 | delay = base_delay * multiplier | ||
4358 | 32 | multiplier += 1 | ||
4359 | 33 | log("Retrying '%s' %d more times (delay=%s)" % | ||
4360 | 34 | (f.__name__, retries, delay), level=INFO) | ||
4361 | 35 | retries -= 1 | ||
4362 | 36 | if delay: | ||
4363 | 37 | time.sleep(delay) | ||
4364 | 38 | |||
4365 | 39 | return _retry_on_exception_inner_2 | ||
4366 | 40 | |||
4367 | 41 | return _retry_on_exception_inner_1 | ||
4368 | 0 | 42 | ||
4369 | === modified file 'hooks/charmhelpers/core/fstab.py' | |||
4370 | --- hooks/charmhelpers/core/fstab.py 2014-07-24 09:43:27 +0000 | |||
4371 | +++ hooks/charmhelpers/core/fstab.py 2015-01-15 16:18:44 +0000 | |||
4372 | @@ -3,10 +3,11 @@ | |||
4373 | 3 | 3 | ||
4374 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' |
4375 | 5 | 5 | ||
4376 | 6 | import io | ||
4377 | 6 | import os | 7 | import os |
4378 | 7 | 8 | ||
4379 | 8 | 9 | ||
4381 | 9 | class Fstab(file): | 10 | class Fstab(io.FileIO): |
4382 | 10 | """This class extends file in order to implement a file reader/writer | 11 | """This class extends file in order to implement a file reader/writer |
4383 | 11 | for file `/etc/fstab` | 12 | for file `/etc/fstab` |
4384 | 12 | """ | 13 | """ |
4385 | @@ -24,8 +25,8 @@ | |||
4386 | 24 | options = "defaults" | 25 | options = "defaults" |
4387 | 25 | 26 | ||
4388 | 26 | self.options = options | 27 | self.options = options |
4391 | 27 | self.d = d | 28 | self.d = int(d) |
4392 | 28 | self.p = p | 29 | self.p = int(p) |
4393 | 29 | 30 | ||
4394 | 30 | def __eq__(self, o): | 31 | def __eq__(self, o): |
4395 | 31 | return str(self) == str(o) | 32 | return str(self) == str(o) |
4396 | @@ -45,7 +46,7 @@ | |||
4397 | 45 | self._path = path | 46 | self._path = path |
4398 | 46 | else: | 47 | else: |
4399 | 47 | self._path = self.DEFAULT_PATH | 48 | self._path = self.DEFAULT_PATH |
4401 | 48 | file.__init__(self, self._path, 'r+') | 49 | super(Fstab, self).__init__(self._path, 'rb+') |
4402 | 49 | 50 | ||
4403 | 50 | def _hydrate_entry(self, line): | 51 | def _hydrate_entry(self, line): |
4404 | 51 | # NOTE: use split with no arguments to split on any | 52 | # NOTE: use split with no arguments to split on any |
4405 | @@ -58,8 +59,9 @@ | |||
4406 | 58 | def entries(self): | 59 | def entries(self): |
4407 | 59 | self.seek(0) | 60 | self.seek(0) |
4408 | 60 | for line in self.readlines(): | 61 | for line in self.readlines(): |
4409 | 62 | line = line.decode('us-ascii') | ||
4410 | 61 | try: | 63 | try: |
4412 | 62 | if not line.startswith("#"): | 64 | if line.strip() and not line.startswith("#"): |
4413 | 63 | yield self._hydrate_entry(line) | 65 | yield self._hydrate_entry(line) |
4414 | 64 | except ValueError: | 66 | except ValueError: |
4415 | 65 | pass | 67 | pass |
4416 | @@ -75,14 +77,14 @@ | |||
4417 | 75 | if self.get_entry_by_attr('device', entry.device): | 77 | if self.get_entry_by_attr('device', entry.device): |
4418 | 76 | return False | 78 | return False |
4419 | 77 | 79 | ||
4421 | 78 | self.write(str(entry) + '\n') | 80 | self.write((str(entry) + '\n').encode('us-ascii')) |
4422 | 79 | self.truncate() | 81 | self.truncate() |
4423 | 80 | return entry | 82 | return entry |
4424 | 81 | 83 | ||
4425 | 82 | def remove_entry(self, entry): | 84 | def remove_entry(self, entry): |
4426 | 83 | self.seek(0) | 85 | self.seek(0) |
4427 | 84 | 86 | ||
4429 | 85 | lines = self.readlines() | 87 | lines = [l.decode('us-ascii') for l in self.readlines()] |
4430 | 86 | 88 | ||
4431 | 87 | found = False | 89 | found = False |
4432 | 88 | for index, line in enumerate(lines): | 90 | for index, line in enumerate(lines): |
4433 | @@ -97,7 +99,7 @@ | |||
4434 | 97 | lines.remove(line) | 99 | lines.remove(line) |
4435 | 98 | 100 | ||
4436 | 99 | self.seek(0) | 101 | self.seek(0) |
4438 | 100 | self.write(''.join(lines)) | 102 | self.write(''.join(lines).encode('us-ascii')) |
4439 | 101 | self.truncate() | 103 | self.truncate() |
4440 | 102 | return True | 104 | return True |
4441 | 103 | 105 | ||
4442 | 104 | 106 | ||
4443 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
4444 | --- hooks/charmhelpers/core/hookenv.py 2014-10-21 07:28:36 +0000 | |||
4445 | +++ hooks/charmhelpers/core/hookenv.py 2015-01-15 16:18:44 +0000 | |||
4446 | @@ -9,9 +9,14 @@ | |||
4447 | 9 | import yaml | 9 | import yaml |
4448 | 10 | import subprocess | 10 | import subprocess |
4449 | 11 | import sys | 11 | import sys |
4450 | 12 | import UserDict | ||
4451 | 13 | from subprocess import CalledProcessError | 12 | from subprocess import CalledProcessError |
4452 | 14 | 13 | ||
4453 | 14 | import six | ||
4454 | 15 | if not six.PY3: | ||
4455 | 16 | from UserDict import UserDict | ||
4456 | 17 | else: | ||
4457 | 18 | from collections import UserDict | ||
4458 | 19 | |||
4459 | 15 | CRITICAL = "CRITICAL" | 20 | CRITICAL = "CRITICAL" |
4460 | 16 | ERROR = "ERROR" | 21 | ERROR = "ERROR" |
4461 | 17 | WARNING = "WARNING" | 22 | WARNING = "WARNING" |
4462 | @@ -63,16 +68,18 @@ | |||
4463 | 63 | command = ['juju-log'] | 68 | command = ['juju-log'] |
4464 | 64 | if level: | 69 | if level: |
4465 | 65 | command += ['-l', level] | 70 | command += ['-l', level] |
4466 | 71 | if not isinstance(message, six.string_types): | ||
4467 | 72 | message = repr(message) | ||
4468 | 66 | command += [message] | 73 | command += [message] |
4469 | 67 | subprocess.call(command) | 74 | subprocess.call(command) |
4470 | 68 | 75 | ||
4471 | 69 | 76 | ||
4473 | 70 | class Serializable(UserDict.IterableUserDict): | 77 | class Serializable(UserDict): |
4474 | 71 | """Wrapper, an object that can be serialized to yaml or json""" | 78 | """Wrapper, an object that can be serialized to yaml or json""" |
4475 | 72 | 79 | ||
4476 | 73 | def __init__(self, obj): | 80 | def __init__(self, obj): |
4477 | 74 | # wrap the object | 81 | # wrap the object |
4479 | 75 | UserDict.IterableUserDict.__init__(self) | 82 | UserDict.__init__(self) |
4480 | 76 | self.data = obj | 83 | self.data = obj |
4481 | 77 | 84 | ||
4482 | 78 | def __getattr__(self, attr): | 85 | def __getattr__(self, attr): |
4483 | @@ -218,7 +225,7 @@ | |||
4484 | 218 | prev_keys = [] | 225 | prev_keys = [] |
4485 | 219 | if self._prev_dict is not None: | 226 | if self._prev_dict is not None: |
4486 | 220 | prev_keys = self._prev_dict.keys() | 227 | prev_keys = self._prev_dict.keys() |
4488 | 221 | return list(set(prev_keys + dict.keys(self))) | 228 | return list(set(prev_keys + list(dict.keys(self)))) |
4489 | 222 | 229 | ||
4490 | 223 | def load_previous(self, path=None): | 230 | def load_previous(self, path=None): |
4491 | 224 | """Load previous copy of config from disk. | 231 | """Load previous copy of config from disk. |
4492 | @@ -269,7 +276,7 @@ | |||
4493 | 269 | 276 | ||
4494 | 270 | """ | 277 | """ |
4495 | 271 | if self._prev_dict: | 278 | if self._prev_dict: |
4497 | 272 | for k, v in self._prev_dict.iteritems(): | 279 | for k, v in six.iteritems(self._prev_dict): |
4498 | 273 | if k not in self: | 280 | if k not in self: |
4499 | 274 | self[k] = v | 281 | self[k] = v |
4500 | 275 | with open(self.path, 'w') as f: | 282 | with open(self.path, 'w') as f: |
4501 | @@ -284,7 +291,8 @@ | |||
4502 | 284 | config_cmd_line.append(scope) | 291 | config_cmd_line.append(scope) |
4503 | 285 | config_cmd_line.append('--format=json') | 292 | config_cmd_line.append('--format=json') |
4504 | 286 | try: | 293 | try: |
4506 | 287 | config_data = json.loads(subprocess.check_output(config_cmd_line)) | 294 | config_data = json.loads( |
4507 | 295 | subprocess.check_output(config_cmd_line).decode('UTF-8')) | ||
4508 | 288 | if scope is not None: | 296 | if scope is not None: |
4509 | 289 | return config_data | 297 | return config_data |
4510 | 290 | return Config(config_data) | 298 | return Config(config_data) |
4511 | @@ -303,10 +311,10 @@ | |||
4512 | 303 | if unit: | 311 | if unit: |
4513 | 304 | _args.append(unit) | 312 | _args.append(unit) |
4514 | 305 | try: | 313 | try: |
4516 | 306 | return json.loads(subprocess.check_output(_args)) | 314 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
4517 | 307 | except ValueError: | 315 | except ValueError: |
4518 | 308 | return None | 316 | return None |
4520 | 309 | except CalledProcessError, e: | 317 | except CalledProcessError as e: |
4521 | 310 | if e.returncode == 2: | 318 | if e.returncode == 2: |
4522 | 311 | return None | 319 | return None |
4523 | 312 | raise | 320 | raise |
4524 | @@ -318,7 +326,7 @@ | |||
4525 | 318 | relation_cmd_line = ['relation-set'] | 326 | relation_cmd_line = ['relation-set'] |
4526 | 319 | if relation_id is not None: | 327 | if relation_id is not None: |
4527 | 320 | relation_cmd_line.extend(('-r', relation_id)) | 328 | relation_cmd_line.extend(('-r', relation_id)) |
4529 | 321 | for k, v in (relation_settings.items() + kwargs.items()): | 329 | for k, v in (list(relation_settings.items()) + list(kwargs.items())): |
4530 | 322 | if v is None: | 330 | if v is None: |
4531 | 323 | relation_cmd_line.append('{}='.format(k)) | 331 | relation_cmd_line.append('{}='.format(k)) |
4532 | 324 | else: | 332 | else: |
4533 | @@ -335,7 +343,8 @@ | |||
4534 | 335 | relid_cmd_line = ['relation-ids', '--format=json'] | 343 | relid_cmd_line = ['relation-ids', '--format=json'] |
4535 | 336 | if reltype is not None: | 344 | if reltype is not None: |
4536 | 337 | relid_cmd_line.append(reltype) | 345 | relid_cmd_line.append(reltype) |
4538 | 338 | return json.loads(subprocess.check_output(relid_cmd_line)) or [] | 346 | return json.loads( |
4539 | 347 | subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] | ||
4540 | 339 | return [] | 348 | return [] |
4541 | 340 | 349 | ||
4542 | 341 | 350 | ||
4543 | @@ -346,7 +355,8 @@ | |||
4544 | 346 | units_cmd_line = ['relation-list', '--format=json'] | 355 | units_cmd_line = ['relation-list', '--format=json'] |
4545 | 347 | if relid is not None: | 356 | if relid is not None: |
4546 | 348 | units_cmd_line.extend(('-r', relid)) | 357 | units_cmd_line.extend(('-r', relid)) |
4548 | 349 | return json.loads(subprocess.check_output(units_cmd_line)) or [] | 358 | return json.loads( |
4549 | 359 | subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] | ||
4550 | 350 | 360 | ||
4551 | 351 | 361 | ||
4552 | 352 | @cached | 362 | @cached |
4553 | @@ -386,21 +396,31 @@ | |||
4554 | 386 | 396 | ||
4555 | 387 | 397 | ||
4556 | 388 | @cached | 398 | @cached |
4557 | 399 | def metadata(): | ||
4558 | 400 | """Get the current charm metadata.yaml contents as a python object""" | ||
4559 | 401 | with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: | ||
4560 | 402 | return yaml.safe_load(md) | ||
4561 | 403 | |||
4562 | 404 | |||
4563 | 405 | @cached | ||
4564 | 389 | def relation_types(): | 406 | def relation_types(): |
4565 | 390 | """Get a list of relation types supported by this charm""" | 407 | """Get a list of relation types supported by this charm""" |
4566 | 391 | charmdir = os.environ.get('CHARM_DIR', '') | ||
4567 | 392 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) | ||
4568 | 393 | md = yaml.safe_load(mdf) | ||
4569 | 394 | rel_types = [] | 408 | rel_types = [] |
4570 | 409 | md = metadata() | ||
4571 | 395 | for key in ('provides', 'requires', 'peers'): | 410 | for key in ('provides', 'requires', 'peers'): |
4572 | 396 | section = md.get(key) | 411 | section = md.get(key) |
4573 | 397 | if section: | 412 | if section: |
4574 | 398 | rel_types.extend(section.keys()) | 413 | rel_types.extend(section.keys()) |
4575 | 399 | mdf.close() | ||
4576 | 400 | return rel_types | 414 | return rel_types |
4577 | 401 | 415 | ||
4578 | 402 | 416 | ||
4579 | 403 | @cached | 417 | @cached |
4580 | 418 | def charm_name(): | ||
4581 | 419 | """Get the name of the current charm as is specified on metadata.yaml""" | ||
4582 | 420 | return metadata().get('name') | ||
4583 | 421 | |||
4584 | 422 | |||
4585 | 423 | @cached | ||
4586 | 404 | def relations(): | 424 | def relations(): |
4587 | 405 | """Get a nested dictionary of relation data for all related units""" | 425 | """Get a nested dictionary of relation data for all related units""" |
4588 | 406 | rels = {} | 426 | rels = {} |
4589 | @@ -455,7 +475,7 @@ | |||
4590 | 455 | """Get the unit ID for the remote unit""" | 475 | """Get the unit ID for the remote unit""" |
4591 | 456 | _args = ['unit-get', '--format=json', attribute] | 476 | _args = ['unit-get', '--format=json', attribute] |
4592 | 457 | try: | 477 | try: |
4594 | 458 | return json.loads(subprocess.check_output(_args)) | 478 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
4595 | 459 | except ValueError: | 479 | except ValueError: |
4596 | 460 | return None | 480 | return None |
4597 | 461 | 481 | ||
4598 | 462 | 482 | ||
4599 | === modified file 'hooks/charmhelpers/core/host.py' | |||
4600 | --- hooks/charmhelpers/core/host.py 2014-10-21 07:28:36 +0000 | |||
4601 | +++ hooks/charmhelpers/core/host.py 2015-01-15 16:18:44 +0000 | |||
4602 | @@ -14,11 +14,12 @@ | |||
4603 | 14 | import subprocess | 14 | import subprocess |
4604 | 15 | import hashlib | 15 | import hashlib |
4605 | 16 | from contextlib import contextmanager | 16 | from contextlib import contextmanager |
4606 | 17 | |||
4607 | 18 | from collections import OrderedDict | 17 | from collections import OrderedDict |
4608 | 19 | 18 | ||
4611 | 20 | from hookenv import log | 19 | import six |
4612 | 21 | from fstab import Fstab | 20 | |
4613 | 21 | from .hookenv import log | ||
4614 | 22 | from .fstab import Fstab | ||
4615 | 22 | 23 | ||
4616 | 23 | 24 | ||
4617 | 24 | def service_start(service_name): | 25 | def service_start(service_name): |
4618 | @@ -54,7 +55,9 @@ | |||
4619 | 54 | def service_running(service): | 55 | def service_running(service): |
4620 | 55 | """Determine whether a system service is running""" | 56 | """Determine whether a system service is running""" |
4621 | 56 | try: | 57 | try: |
4623 | 57 | output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT) | 58 | output = subprocess.check_output( |
4624 | 59 | ['service', service, 'status'], | ||
4625 | 60 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
4626 | 58 | except subprocess.CalledProcessError: | 61 | except subprocess.CalledProcessError: |
4627 | 59 | return False | 62 | return False |
4628 | 60 | else: | 63 | else: |
4629 | @@ -67,7 +70,9 @@ | |||
4630 | 67 | def service_available(service_name): | 70 | def service_available(service_name): |
4631 | 68 | """Determine whether a system service is available""" | 71 | """Determine whether a system service is available""" |
4632 | 69 | try: | 72 | try: |
4634 | 70 | subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT) | 73 | subprocess.check_output( |
4635 | 74 | ['service', service_name, 'status'], | ||
4636 | 75 | stderr=subprocess.STDOUT).decode('UTF-8') | ||
4637 | 71 | except subprocess.CalledProcessError as e: | 76 | except subprocess.CalledProcessError as e: |
4638 | 72 | return 'unrecognized service' not in e.output | 77 | return 'unrecognized service' not in e.output |
4639 | 73 | else: | 78 | else: |
4640 | @@ -96,6 +101,26 @@ | |||
4641 | 96 | return user_info | 101 | return user_info |
4642 | 97 | 102 | ||
4643 | 98 | 103 | ||
4644 | 104 | def add_group(group_name, system_group=False): | ||
4645 | 105 | """Add a group to the system""" | ||
4646 | 106 | try: | ||
4647 | 107 | group_info = grp.getgrnam(group_name) | ||
4648 | 108 | log('group {0} already exists!'.format(group_name)) | ||
4649 | 109 | except KeyError: | ||
4650 | 110 | log('creating group {0}'.format(group_name)) | ||
4651 | 111 | cmd = ['addgroup'] | ||
4652 | 112 | if system_group: | ||
4653 | 113 | cmd.append('--system') | ||
4654 | 114 | else: | ||
4655 | 115 | cmd.extend([ | ||
4656 | 116 | '--group', | ||
4657 | 117 | ]) | ||
4658 | 118 | cmd.append(group_name) | ||
4659 | 119 | subprocess.check_call(cmd) | ||
4660 | 120 | group_info = grp.getgrnam(group_name) | ||
4661 | 121 | return group_info | ||
4662 | 122 | |||
4663 | 123 | |||
4664 | 99 | def add_user_to_group(username, group): | 124 | def add_user_to_group(username, group): |
4665 | 100 | """Add a user to a group""" | 125 | """Add a user to a group""" |
4666 | 101 | cmd = [ | 126 | cmd = [ |
4667 | @@ -115,7 +140,7 @@ | |||
4668 | 115 | cmd.append(from_path) | 140 | cmd.append(from_path) |
4669 | 116 | cmd.append(to_path) | 141 | cmd.append(to_path) |
4670 | 117 | log(" ".join(cmd)) | 142 | log(" ".join(cmd)) |
4672 | 118 | return subprocess.check_output(cmd).strip() | 143 | return subprocess.check_output(cmd).decode('UTF-8').strip() |
4673 | 119 | 144 | ||
4674 | 120 | 145 | ||
4675 | 121 | def symlink(source, destination): | 146 | def symlink(source, destination): |
4676 | @@ -130,23 +155,26 @@ | |||
4677 | 130 | subprocess.check_call(cmd) | 155 | subprocess.check_call(cmd) |
4678 | 131 | 156 | ||
4679 | 132 | 157 | ||
4681 | 133 | def mkdir(path, owner='root', group='root', perms=0555, force=False): | 158 | def mkdir(path, owner='root', group='root', perms=0o555, force=False): |
4682 | 134 | """Create a directory""" | 159 | """Create a directory""" |
4683 | 135 | log("Making dir {} {}:{} {:o}".format(path, owner, group, | 160 | log("Making dir {} {}:{} {:o}".format(path, owner, group, |
4684 | 136 | perms)) | 161 | perms)) |
4685 | 137 | uid = pwd.getpwnam(owner).pw_uid | 162 | uid = pwd.getpwnam(owner).pw_uid |
4686 | 138 | gid = grp.getgrnam(group).gr_gid | 163 | gid = grp.getgrnam(group).gr_gid |
4687 | 139 | realpath = os.path.abspath(path) | 164 | realpath = os.path.abspath(path) |
4690 | 140 | if os.path.exists(realpath): | 165 | path_exists = os.path.exists(realpath) |
4691 | 141 | if force and not os.path.isdir(realpath): | 166 | if path_exists and force: |
4692 | 167 | if not os.path.isdir(realpath): | ||
4693 | 142 | log("Removing non-directory file {} prior to mkdir()".format(path)) | 168 | log("Removing non-directory file {} prior to mkdir()".format(path)) |
4694 | 143 | os.unlink(realpath) | 169 | os.unlink(realpath) |
4696 | 144 | else: | 170 | os.makedirs(realpath, perms) |
4697 | 171 | os.chown(realpath, uid, gid) | ||
4698 | 172 | elif not path_exists: | ||
4699 | 145 | os.makedirs(realpath, perms) | 173 | os.makedirs(realpath, perms) |
4704 | 146 | os.chown(realpath, uid, gid) | 174 | os.chown(realpath, uid, gid) |
4705 | 147 | 175 | ||
4706 | 148 | 176 | ||
4707 | 149 | def write_file(path, content, owner='root', group='root', perms=0444): | 177 | def write_file(path, content, owner='root', group='root', perms=0o444): |
4708 | 150 | """Create or overwrite a file with the contents of a string""" | 178 | """Create or overwrite a file with the contents of a string""" |
4709 | 151 | log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) | 179 | log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) |
4710 | 152 | uid = pwd.getpwnam(owner).pw_uid | 180 | uid = pwd.getpwnam(owner).pw_uid |
4711 | @@ -177,7 +205,7 @@ | |||
4712 | 177 | cmd_args.extend([device, mountpoint]) | 205 | cmd_args.extend([device, mountpoint]) |
4713 | 178 | try: | 206 | try: |
4714 | 179 | subprocess.check_output(cmd_args) | 207 | subprocess.check_output(cmd_args) |
4716 | 180 | except subprocess.CalledProcessError, e: | 208 | except subprocess.CalledProcessError as e: |
4717 | 181 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 209 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
4718 | 182 | return False | 210 | return False |
4719 | 183 | 211 | ||
4720 | @@ -191,7 +219,7 @@ | |||
4721 | 191 | cmd_args = ['umount', mountpoint] | 219 | cmd_args = ['umount', mountpoint] |
4722 | 192 | try: | 220 | try: |
4723 | 193 | subprocess.check_output(cmd_args) | 221 | subprocess.check_output(cmd_args) |
4725 | 194 | except subprocess.CalledProcessError, e: | 222 | except subprocess.CalledProcessError as e: |
4726 | 195 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 223 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
4727 | 196 | return False | 224 | return False |
4728 | 197 | 225 | ||
4729 | @@ -218,8 +246,8 @@ | |||
4730 | 218 | """ | 246 | """ |
4731 | 219 | if os.path.exists(path): | 247 | if os.path.exists(path): |
4732 | 220 | h = getattr(hashlib, hash_type)() | 248 | h = getattr(hashlib, hash_type)() |
4735 | 221 | with open(path, 'r') as source: | 249 | with open(path, 'rb') as source: |
4736 | 222 | h.update(source.read()) # IGNORE:E1101 - it does have update | 250 | h.update(source.read()) |
4737 | 223 | return h.hexdigest() | 251 | return h.hexdigest() |
4738 | 224 | else: | 252 | else: |
4739 | 225 | return None | 253 | return None |
4740 | @@ -297,7 +325,7 @@ | |||
4741 | 297 | if length is None: | 325 | if length is None: |
4742 | 298 | length = random.choice(range(35, 45)) | 326 | length = random.choice(range(35, 45)) |
4743 | 299 | alphanumeric_chars = [ | 327 | alphanumeric_chars = [ |
4745 | 300 | l for l in (string.letters + string.digits) | 328 | l for l in (string.ascii_letters + string.digits) |
4746 | 301 | if l not in 'l0QD1vAEIOUaeiou'] | 329 | if l not in 'l0QD1vAEIOUaeiou'] |
4747 | 302 | random_chars = [ | 330 | random_chars = [ |
4748 | 303 | random.choice(alphanumeric_chars) for _ in range(length)] | 331 | random.choice(alphanumeric_chars) for _ in range(length)] |
4749 | @@ -306,14 +334,14 @@ | |||
4750 | 306 | 334 | ||
4751 | 307 | def list_nics(nic_type): | 335 | def list_nics(nic_type): |
4752 | 308 | '''Return a list of nics of given type(s)''' | 336 | '''Return a list of nics of given type(s)''' |
4754 | 309 | if isinstance(nic_type, basestring): | 337 | if isinstance(nic_type, six.string_types): |
4755 | 310 | int_types = [nic_type] | 338 | int_types = [nic_type] |
4756 | 311 | else: | 339 | else: |
4757 | 312 | int_types = nic_type | 340 | int_types = nic_type |
4758 | 313 | interfaces = [] | 341 | interfaces = [] |
4759 | 314 | for int_type in int_types: | 342 | for int_type in int_types: |
4760 | 315 | cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] | 343 | cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] |
4762 | 316 | ip_output = subprocess.check_output(cmd).split('\n') | 344 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') |
4763 | 317 | ip_output = (line for line in ip_output if line) | 345 | ip_output = (line for line in ip_output if line) |
4764 | 318 | for line in ip_output: | 346 | for line in ip_output: |
4765 | 319 | if line.split()[1].startswith(int_type): | 347 | if line.split()[1].startswith(int_type): |
4766 | @@ -335,7 +363,7 @@ | |||
4767 | 335 | 363 | ||
4768 | 336 | def get_nic_mtu(nic): | 364 | def get_nic_mtu(nic): |
4769 | 337 | cmd = ['ip', 'addr', 'show', nic] | 365 | cmd = ['ip', 'addr', 'show', nic] |
4771 | 338 | ip_output = subprocess.check_output(cmd).split('\n') | 366 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') |
4772 | 339 | mtu = "" | 367 | mtu = "" |
4773 | 340 | for line in ip_output: | 368 | for line in ip_output: |
4774 | 341 | words = line.split() | 369 | words = line.split() |
4775 | @@ -346,7 +374,7 @@ | |||
4776 | 346 | 374 | ||
4777 | 347 | def get_nic_hwaddr(nic): | 375 | def get_nic_hwaddr(nic): |
4778 | 348 | cmd = ['ip', '-o', '-0', 'addr', 'show', nic] | 376 | cmd = ['ip', '-o', '-0', 'addr', 'show', nic] |
4780 | 349 | ip_output = subprocess.check_output(cmd) | 377 | ip_output = subprocess.check_output(cmd).decode('UTF-8') |
4781 | 350 | hwaddr = "" | 378 | hwaddr = "" |
4782 | 351 | words = ip_output.split() | 379 | words = ip_output.split() |
4783 | 352 | if 'link/ether' in words: | 380 | if 'link/ether' in words: |
4784 | @@ -363,8 +391,8 @@ | |||
4785 | 363 | 391 | ||
4786 | 364 | ''' | 392 | ''' |
4787 | 365 | import apt_pkg | 393 | import apt_pkg |
4788 | 366 | from charmhelpers.fetch import apt_cache | ||
4789 | 367 | if not pkgcache: | 394 | if not pkgcache: |
4790 | 395 | from charmhelpers.fetch import apt_cache | ||
4791 | 368 | pkgcache = apt_cache() | 396 | pkgcache = apt_cache() |
4792 | 369 | pkg = pkgcache[package] | 397 | pkg = pkgcache[package] |
4793 | 370 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 398 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
4794 | 371 | 399 | ||
4795 | === modified file 'hooks/charmhelpers/core/services/__init__.py' | |||
4796 | --- hooks/charmhelpers/core/services/__init__.py 2014-09-17 14:11:53 +0000 | |||
4797 | +++ hooks/charmhelpers/core/services/__init__.py 2015-01-15 16:18:44 +0000 | |||
4798 | @@ -1,2 +1,2 @@ | |||
4801 | 1 | from .base import * | 1 | from .base import * # NOQA |
4802 | 2 | from .helpers import * | 2 | from .helpers import * # NOQA |
4803 | 3 | 3 | ||
4804 | === modified file 'hooks/charmhelpers/core/services/helpers.py' | |||
4805 | --- hooks/charmhelpers/core/services/helpers.py 2014-09-27 17:33:59 +0000 | |||
4806 | +++ hooks/charmhelpers/core/services/helpers.py 2015-01-15 16:18:44 +0000 | |||
4807 | @@ -196,7 +196,7 @@ | |||
4808 | 196 | if not os.path.isabs(file_name): | 196 | if not os.path.isabs(file_name): |
4809 | 197 | file_name = os.path.join(hookenv.charm_dir(), file_name) | 197 | file_name = os.path.join(hookenv.charm_dir(), file_name) |
4810 | 198 | with open(file_name, 'w') as file_stream: | 198 | with open(file_name, 'w') as file_stream: |
4812 | 199 | os.fchmod(file_stream.fileno(), 0600) | 199 | os.fchmod(file_stream.fileno(), 0o600) |
4813 | 200 | yaml.dump(config_data, file_stream) | 200 | yaml.dump(config_data, file_stream) |
4814 | 201 | 201 | ||
4815 | 202 | def read_context(self, file_name): | 202 | def read_context(self, file_name): |
4816 | @@ -211,15 +211,19 @@ | |||
4817 | 211 | 211 | ||
4818 | 212 | class TemplateCallback(ManagerCallback): | 212 | class TemplateCallback(ManagerCallback): |
4819 | 213 | """ | 213 | """ |
4823 | 214 | Callback class that will render a Jinja2 template, for use as a ready action. | 214 | Callback class that will render a Jinja2 template, for use as a ready |
4824 | 215 | 215 | action. | |
4825 | 216 | :param str source: The template source file, relative to `$CHARM_DIR/templates` | 216 | |
4826 | 217 | :param str source: The template source file, relative to | ||
4827 | 218 | `$CHARM_DIR/templates` | ||
4828 | 219 | |||
4829 | 217 | :param str target: The target to write the rendered template to | 220 | :param str target: The target to write the rendered template to |
4830 | 218 | :param str owner: The owner of the rendered file | 221 | :param str owner: The owner of the rendered file |
4831 | 219 | :param str group: The group of the rendered file | 222 | :param str group: The group of the rendered file |
4832 | 220 | :param int perms: The permissions of the rendered file | 223 | :param int perms: The permissions of the rendered file |
4833 | 221 | """ | 224 | """ |
4835 | 222 | def __init__(self, source, target, owner='root', group='root', perms=0444): | 225 | def __init__(self, source, target, |
4836 | 226 | owner='root', group='root', perms=0o444): | ||
4837 | 223 | self.source = source | 227 | self.source = source |
4838 | 224 | self.target = target | 228 | self.target = target |
4839 | 225 | self.owner = owner | 229 | self.owner = owner |
4840 | 226 | 230 | ||
4841 | === modified file 'hooks/charmhelpers/core/templating.py' | |||
4842 | --- hooks/charmhelpers/core/templating.py 2014-09-17 14:11:53 +0000 | |||
4843 | +++ hooks/charmhelpers/core/templating.py 2015-01-15 16:18:44 +0000 | |||
4844 | @@ -4,7 +4,8 @@ | |||
4845 | 4 | from charmhelpers.core import hookenv | 4 | from charmhelpers.core import hookenv |
4846 | 5 | 5 | ||
4847 | 6 | 6 | ||
4849 | 7 | def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None): | 7 | def render(source, target, context, owner='root', group='root', |
4850 | 8 | perms=0o444, templates_dir=None): | ||
4851 | 8 | """ | 9 | """ |
4852 | 9 | Render a template. | 10 | Render a template. |
4853 | 10 | 11 | ||
4854 | @@ -47,5 +48,5 @@ | |||
4855 | 47 | level=hookenv.ERROR) | 48 | level=hookenv.ERROR) |
4856 | 48 | raise e | 49 | raise e |
4857 | 49 | content = template.render(context) | 50 | content = template.render(context) |
4859 | 50 | host.mkdir(os.path.dirname(target)) | 51 | host.mkdir(os.path.dirname(target), owner, group) |
4860 | 51 | host.write_file(target, content, owner, group, perms) | 52 | host.write_file(target, content, owner, group, perms) |
4861 | 52 | 53 | ||
4862 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
4863 | --- hooks/charmhelpers/fetch/__init__.py 2014-10-21 07:28:36 +0000 | |||
4864 | +++ hooks/charmhelpers/fetch/__init__.py 2015-01-15 16:18:44 +0000 | |||
4865 | @@ -5,10 +5,6 @@ | |||
4866 | 5 | from charmhelpers.core.host import ( | 5 | from charmhelpers.core.host import ( |
4867 | 6 | lsb_release | 6 | lsb_release |
4868 | 7 | ) | 7 | ) |
4869 | 8 | from urlparse import ( | ||
4870 | 9 | urlparse, | ||
4871 | 10 | urlunparse, | ||
4872 | 11 | ) | ||
4873 | 12 | import subprocess | 8 | import subprocess |
4874 | 13 | from charmhelpers.core.hookenv import ( | 9 | from charmhelpers.core.hookenv import ( |
4875 | 14 | config, | 10 | config, |
4876 | @@ -16,6 +12,12 @@ | |||
4877 | 16 | ) | 12 | ) |
4878 | 17 | import os | 13 | import os |
4879 | 18 | 14 | ||
4880 | 15 | import six | ||
4881 | 16 | if six.PY3: | ||
4882 | 17 | from urllib.parse import urlparse, urlunparse | ||
4883 | 18 | else: | ||
4884 | 19 | from urlparse import urlparse, urlunparse | ||
4885 | 20 | |||
4886 | 19 | 21 | ||
4887 | 20 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive | 22 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
4888 | 21 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | 23 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
4889 | @@ -62,9 +64,16 @@ | |||
4890 | 62 | 'trusty-juno/updates': 'trusty-updates/juno', | 64 | 'trusty-juno/updates': 'trusty-updates/juno', |
4891 | 63 | 'trusty-updates/juno': 'trusty-updates/juno', | 65 | 'trusty-updates/juno': 'trusty-updates/juno', |
4892 | 64 | 'juno/proposed': 'trusty-proposed/juno', | 66 | 'juno/proposed': 'trusty-proposed/juno', |
4893 | 65 | 'juno/proposed': 'trusty-proposed/juno', | ||
4894 | 66 | 'trusty-juno/proposed': 'trusty-proposed/juno', | 67 | 'trusty-juno/proposed': 'trusty-proposed/juno', |
4895 | 67 | 'trusty-proposed/juno': 'trusty-proposed/juno', | 68 | 'trusty-proposed/juno': 'trusty-proposed/juno', |
4896 | 69 | # Kilo | ||
4897 | 70 | 'kilo': 'trusty-updates/kilo', | ||
4898 | 71 | 'trusty-kilo': 'trusty-updates/kilo', | ||
4899 | 72 | 'trusty-kilo/updates': 'trusty-updates/kilo', | ||
4900 | 73 | 'trusty-updates/kilo': 'trusty-updates/kilo', | ||
4901 | 74 | 'kilo/proposed': 'trusty-proposed/kilo', | ||
4902 | 75 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', | ||
4903 | 76 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', | ||
4904 | 68 | } | 77 | } |
4905 | 69 | 78 | ||
4906 | 70 | # The order of this list is very important. Handlers should be listed in from | 79 | # The order of this list is very important. Handlers should be listed in from |
4907 | @@ -149,7 +158,7 @@ | |||
4908 | 149 | cmd = ['apt-get', '--assume-yes'] | 158 | cmd = ['apt-get', '--assume-yes'] |
4909 | 150 | cmd.extend(options) | 159 | cmd.extend(options) |
4910 | 151 | cmd.append('install') | 160 | cmd.append('install') |
4912 | 152 | if isinstance(packages, basestring): | 161 | if isinstance(packages, six.string_types): |
4913 | 153 | cmd.append(packages) | 162 | cmd.append(packages) |
4914 | 154 | else: | 163 | else: |
4915 | 155 | cmd.extend(packages) | 164 | cmd.extend(packages) |
4916 | @@ -182,7 +191,7 @@ | |||
4917 | 182 | def apt_purge(packages, fatal=False): | 191 | def apt_purge(packages, fatal=False): |
4918 | 183 | """Purge one or more packages""" | 192 | """Purge one or more packages""" |
4919 | 184 | cmd = ['apt-get', '--assume-yes', 'purge'] | 193 | cmd = ['apt-get', '--assume-yes', 'purge'] |
4921 | 185 | if isinstance(packages, basestring): | 194 | if isinstance(packages, six.string_types): |
4922 | 186 | cmd.append(packages) | 195 | cmd.append(packages) |
4923 | 187 | else: | 196 | else: |
4924 | 188 | cmd.extend(packages) | 197 | cmd.extend(packages) |
4925 | @@ -193,7 +202,7 @@ | |||
4926 | 193 | def apt_hold(packages, fatal=False): | 202 | def apt_hold(packages, fatal=False): |
4927 | 194 | """Hold one or more packages""" | 203 | """Hold one or more packages""" |
4928 | 195 | cmd = ['apt-mark', 'hold'] | 204 | cmd = ['apt-mark', 'hold'] |
4930 | 196 | if isinstance(packages, basestring): | 205 | if isinstance(packages, six.string_types): |
4931 | 197 | cmd.append(packages) | 206 | cmd.append(packages) |
4932 | 198 | else: | 207 | else: |
4933 | 199 | cmd.extend(packages) | 208 | cmd.extend(packages) |
4934 | @@ -256,11 +265,11 @@ | |||
4935 | 256 | elif source == 'distro': | 265 | elif source == 'distro': |
4936 | 257 | pass | 266 | pass |
4937 | 258 | else: | 267 | else: |
4939 | 259 | raise SourceConfigError("Unknown source: {!r}".format(source)) | 268 | log("Unknown source: {!r}".format(source)) |
4940 | 260 | 269 | ||
4941 | 261 | if key: | 270 | if key: |
4942 | 262 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: | 271 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
4944 | 263 | with NamedTemporaryFile() as key_file: | 272 | with NamedTemporaryFile('w+') as key_file: |
4945 | 264 | key_file.write(key) | 273 | key_file.write(key) |
4946 | 265 | key_file.flush() | 274 | key_file.flush() |
4947 | 266 | key_file.seek(0) | 275 | key_file.seek(0) |
4948 | @@ -297,14 +306,14 @@ | |||
4949 | 297 | sources = safe_load((config(sources_var) or '').strip()) or [] | 306 | sources = safe_load((config(sources_var) or '').strip()) or [] |
4950 | 298 | keys = safe_load((config(keys_var) or '').strip()) or None | 307 | keys = safe_load((config(keys_var) or '').strip()) or None |
4951 | 299 | 308 | ||
4953 | 300 | if isinstance(sources, basestring): | 309 | if isinstance(sources, six.string_types): |
4954 | 301 | sources = [sources] | 310 | sources = [sources] |
4955 | 302 | 311 | ||
4956 | 303 | if keys is None: | 312 | if keys is None: |
4957 | 304 | for source in sources: | 313 | for source in sources: |
4958 | 305 | add_source(source, None) | 314 | add_source(source, None) |
4959 | 306 | else: | 315 | else: |
4961 | 307 | if isinstance(keys, basestring): | 316 | if isinstance(keys, six.string_types): |
4962 | 308 | keys = [keys] | 317 | keys = [keys] |
4963 | 309 | 318 | ||
4964 | 310 | if len(sources) != len(keys): | 319 | if len(sources) != len(keys): |
4965 | @@ -401,7 +410,7 @@ | |||
4966 | 401 | while result is None or result == APT_NO_LOCK: | 410 | while result is None or result == APT_NO_LOCK: |
4967 | 402 | try: | 411 | try: |
4968 | 403 | result = subprocess.check_call(cmd, env=env) | 412 | result = subprocess.check_call(cmd, env=env) |
4970 | 404 | except subprocess.CalledProcessError, e: | 413 | except subprocess.CalledProcessError as e: |
4971 | 405 | retry_count = retry_count + 1 | 414 | retry_count = retry_count + 1 |
4972 | 406 | if retry_count > APT_NO_LOCK_RETRY_COUNT: | 415 | if retry_count > APT_NO_LOCK_RETRY_COUNT: |
4973 | 407 | raise | 416 | raise |
4974 | 408 | 417 | ||
4975 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
4976 | --- hooks/charmhelpers/fetch/archiveurl.py 2014-09-27 17:33:59 +0000 | |||
4977 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-01-15 16:18:44 +0000 | |||
4978 | @@ -1,8 +1,23 @@ | |||
4979 | 1 | import os | 1 | import os |
4980 | 2 | import urllib2 | ||
4981 | 3 | from urllib import urlretrieve | ||
4982 | 4 | import urlparse | ||
4983 | 5 | import hashlib | 2 | import hashlib |
4984 | 3 | import re | ||
4985 | 4 | |||
4986 | 5 | import six | ||
4987 | 6 | if six.PY3: | ||
4988 | 7 | from urllib.request import ( | ||
4989 | 8 | build_opener, install_opener, urlopen, urlretrieve, | ||
4990 | 9 | HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, | ||
4991 | 10 | ) | ||
4992 | 11 | from urllib.parse import urlparse, urlunparse, parse_qs | ||
4993 | 12 | from urllib.error import URLError | ||
4994 | 13 | else: | ||
4995 | 14 | from urllib import urlretrieve | ||
4996 | 15 | from urllib2 import ( | ||
4997 | 16 | build_opener, install_opener, urlopen, | ||
4998 | 17 | HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, | ||
4999 | 18 | URLError | ||
5000 | 19 | ) |
The diff has been truncated for viewing.