Merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 into lp:charms/etherpad-lite
- Precise Pangolin (12.04)
- lp1247636-fix-try2
- Merge into trunk
Proposed by
Erik B. Andersen
Status: | Merged |
---|---|
Merged at revision: | 14 |
Proposed branch: | lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 |
Merge into: | lp:charms/etherpad-lite |
Diff against target: |
4665 lines (+588/-3619) 28 files modified
charm-helpers.yaml (+6/-0) hooks/charmhelpers/contrib/charmhelpers/IMPORT (+0/-4) hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-183) hooks/charmhelpers/contrib/charmsupport/IMPORT (+0/-14) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-217) hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156) hooks/charmhelpers/contrib/hahelpers/IMPORT (+0/-7) hooks/charmhelpers/contrib/hahelpers/apache_utils.py (+0/-196) hooks/charmhelpers/contrib/hahelpers/ceph_utils.py (+0/-256) hooks/charmhelpers/contrib/hahelpers/cluster_utils.py (+0/-130) hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py (+0/-55) hooks/charmhelpers/contrib/hahelpers/utils.py (+0/-332) hooks/charmhelpers/contrib/jujugui/IMPORT (+0/-4) hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602) hooks/charmhelpers/contrib/openstack/IMPORT (+0/-9) hooks/charmhelpers/contrib/openstack/nova/essex (+0/-43) hooks/charmhelpers/contrib/openstack/nova/folsom (+0/-81) hooks/charmhelpers/contrib/openstack/nova/nova-common (+0/-147) hooks/charmhelpers/contrib/openstack/openstack-common (+0/-781) hooks/charmhelpers/contrib/openstack/openstack_utils.py (+0/-228) hooks/charmhelpers/core/hookenv.py (+153/-45) hooks/charmhelpers/core/host.py (+133/-74) hooks/charmhelpers/fetch/__init__.py (+194/-12) hooks/charmhelpers/fetch/archiveurl.py (+48/-0) hooks/charmhelpers/fetch/bzrurl.py (+49/-0) hooks/charmhelpers/payload/__init__.py (+0/-1) hooks/charmhelpers/payload/execd.py (+0/-40) hooks/hooks.py (+5/-2) |
To merge this branch: | bzr merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marco Ceppi (community) | Approve | ||
Review via email:
|
Commit message
Description of the change
Update of the hooks.py and addition of charm-helpers.yaml to be able to use the charm helpers sync tool.
Then a charm helpers sync, which pulls in a version that should fix LP:1247636
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file 'charm-helpers.yaml' | |||
2 | --- charm-helpers.yaml 1970-01-01 00:00:00 +0000 | |||
3 | +++ charm-helpers.yaml 2013-11-05 18:43:49 +0000 | |||
4 | @@ -0,0 +1,6 @@ | |||
5 | 1 | destination: "hooks/charmhelpers" | ||
6 | 2 | branch: "lp:charm-helpers" | ||
7 | 3 | include: | ||
8 | 4 | - core | ||
9 | 5 | - fetch | ||
10 | 6 | |||
11 | 0 | 7 | ||
12 | === removed directory 'hooks/charmhelpers/contrib' | |||
13 | === removed file 'hooks/charmhelpers/contrib/__init__.py' | |||
14 | === removed directory 'hooks/charmhelpers/contrib/charmhelpers' | |||
15 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/IMPORT' | |||
16 | --- hooks/charmhelpers/contrib/charmhelpers/IMPORT 2013-06-07 09:39:50 +0000 | |||
17 | +++ hooks/charmhelpers/contrib/charmhelpers/IMPORT 1970-01-01 00:00:00 +0000 | |||
18 | @@ -1,4 +0,0 @@ | |||
19 | 1 | Source lp:charm-tools/trunk | ||
20 | 2 | |||
21 | 3 | charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py | ||
22 | 4 | charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py | ||
23 | 5 | 0 | ||
24 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py' | |||
25 | --- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-06-07 09:39:50 +0000 | |||
26 | +++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 | |||
27 | @@ -1,183 +0,0 @@ | |||
28 | 1 | # Copyright 2012 Canonical Ltd. This software is licensed under the | ||
29 | 2 | # GNU Affero General Public License version 3 (see the file LICENSE). | ||
30 | 3 | |||
31 | 4 | import warnings | ||
32 | 5 | warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) | ||
33 | 6 | |||
34 | 7 | """Helper functions for writing Juju charms in Python.""" | ||
35 | 8 | |||
36 | 9 | __metaclass__ = type | ||
37 | 10 | __all__ = [ | ||
38 | 11 | #'get_config', # core.hookenv.config() | ||
39 | 12 | #'log', # core.hookenv.log() | ||
40 | 13 | #'log_entry', # core.hookenv.log() | ||
41 | 14 | #'log_exit', # core.hookenv.log() | ||
42 | 15 | #'relation_get', # core.hookenv.relation_get() | ||
43 | 16 | #'relation_set', # core.hookenv.relation_set() | ||
44 | 17 | #'relation_ids', # core.hookenv.relation_ids() | ||
45 | 18 | #'relation_list', # core.hookenv.relation_units() | ||
46 | 19 | #'config_get', # core.hookenv.config() | ||
47 | 20 | #'unit_get', # core.hookenv.unit_get() | ||
48 | 21 | #'open_port', # core.hookenv.open_port() | ||
49 | 22 | #'close_port', # core.hookenv.close_port() | ||
50 | 23 | #'service_control', # core.host.service() | ||
51 | 24 | 'unit_info', # client-side, NOT IMPLEMENTED | ||
52 | 25 | 'wait_for_machine', # client-side, NOT IMPLEMENTED | ||
53 | 26 | 'wait_for_page_contents', # client-side, NOT IMPLEMENTED | ||
54 | 27 | 'wait_for_relation', # client-side, NOT IMPLEMENTED | ||
55 | 28 | 'wait_for_unit', # client-side, NOT IMPLEMENTED | ||
56 | 29 | ] | ||
57 | 30 | |||
58 | 31 | import operator | ||
59 | 32 | from shelltoolbox import ( | ||
60 | 33 | command, | ||
61 | 34 | ) | ||
62 | 35 | import tempfile | ||
63 | 36 | import time | ||
64 | 37 | import urllib2 | ||
65 | 38 | import yaml | ||
66 | 39 | |||
67 | 40 | SLEEP_AMOUNT = 0.1 | ||
68 | 41 | # We create a juju_status Command here because it makes testing much, | ||
69 | 42 | # much easier. | ||
70 | 43 | juju_status = lambda: command('juju')('status') | ||
71 | 44 | |||
72 | 45 | # re-implemented as charmhelpers.fetch.configure_sources() | ||
73 | 46 | #def configure_source(update=False): | ||
74 | 47 | # source = config_get('source') | ||
75 | 48 | # if ((source.startswith('ppa:') or | ||
76 | 49 | # source.startswith('cloud:') or | ||
77 | 50 | # source.startswith('http:'))): | ||
78 | 51 | # run('add-apt-repository', source) | ||
79 | 52 | # if source.startswith("http:"): | ||
80 | 53 | # run('apt-key', 'import', config_get('key')) | ||
81 | 54 | # if update: | ||
82 | 55 | # run('apt-get', 'update') | ||
83 | 56 | |||
84 | 57 | # DEPRECATED: client-side only | ||
85 | 58 | def make_charm_config_file(charm_config): | ||
86 | 59 | charm_config_file = tempfile.NamedTemporaryFile() | ||
87 | 60 | charm_config_file.write(yaml.dump(charm_config)) | ||
88 | 61 | charm_config_file.flush() | ||
89 | 62 | # The NamedTemporaryFile instance is returned instead of just the name | ||
90 | 63 | # because we want to take advantage of garbage collection-triggered | ||
91 | 64 | # deletion of the temp file when it goes out of scope in the caller. | ||
92 | 65 | return charm_config_file | ||
93 | 66 | |||
94 | 67 | |||
95 | 68 | # DEPRECATED: client-side only | ||
96 | 69 | def unit_info(service_name, item_name, data=None, unit=None): | ||
97 | 70 | if data is None: | ||
98 | 71 | data = yaml.safe_load(juju_status()) | ||
99 | 72 | service = data['services'].get(service_name) | ||
100 | 73 | if service is None: | ||
101 | 74 | # XXX 2012-02-08 gmb: | ||
102 | 75 | # This allows us to cope with the race condition that we | ||
103 | 76 | # have between deploying a service and having it come up in | ||
104 | 77 | # `juju status`. We could probably do with cleaning it up so | ||
105 | 78 | # that it fails a bit more noisily after a while. | ||
106 | 79 | return '' | ||
107 | 80 | units = service['units'] | ||
108 | 81 | if unit is not None: | ||
109 | 82 | item = units[unit][item_name] | ||
110 | 83 | else: | ||
111 | 84 | # It might seem odd to sort the units here, but we do it to | ||
112 | 85 | # ensure that when no unit is specified, the first unit for the | ||
113 | 86 | # service (or at least the one with the lowest number) is the | ||
114 | 87 | # one whose data gets returned. | ||
115 | 88 | sorted_unit_names = sorted(units.keys()) | ||
116 | 89 | item = units[sorted_unit_names[0]][item_name] | ||
117 | 90 | return item | ||
118 | 91 | |||
119 | 92 | |||
120 | 93 | # DEPRECATED: client-side only | ||
121 | 94 | def get_machine_data(): | ||
122 | 95 | return yaml.safe_load(juju_status())['machines'] | ||
123 | 96 | |||
124 | 97 | |||
125 | 98 | # DEPRECATED: client-side only | ||
126 | 99 | def wait_for_machine(num_machines=1, timeout=300): | ||
127 | 100 | """Wait `timeout` seconds for `num_machines` machines to come up. | ||
128 | 101 | |||
129 | 102 | This wait_for... function can be called by other wait_for functions | ||
130 | 103 | whose timeouts might be too short in situations where only a bare | ||
131 | 104 | Juju setup has been bootstrapped. | ||
132 | 105 | |||
133 | 106 | :return: A tuple of (num_machines, time_taken). This is used for | ||
134 | 107 | testing. | ||
135 | 108 | """ | ||
136 | 109 | # You may think this is a hack, and you'd be right. The easiest way | ||
137 | 110 | # to tell what environment we're working in (LXC vs EC2) is to check | ||
138 | 111 | # the dns-name of the first machine. If it's localhost we're in LXC | ||
139 | 112 | # and we can just return here. | ||
140 | 113 | if get_machine_data()[0]['dns-name'] == 'localhost': | ||
141 | 114 | return 1, 0 | ||
142 | 115 | start_time = time.time() | ||
143 | 116 | while True: | ||
144 | 117 | # Drop the first machine, since it's the Zookeeper and that's | ||
145 | 118 | # not a machine that we need to wait for. This will only work | ||
146 | 119 | # for EC2 environments, which is why we return early above if | ||
147 | 120 | # we're in LXC. | ||
148 | 121 | machine_data = get_machine_data() | ||
149 | 122 | non_zookeeper_machines = [ | ||
150 | 123 | machine_data[key] for key in machine_data.keys()[1:]] | ||
151 | 124 | if len(non_zookeeper_machines) >= num_machines: | ||
152 | 125 | all_machines_running = True | ||
153 | 126 | for machine in non_zookeeper_machines: | ||
154 | 127 | if machine.get('instance-state') != 'running': | ||
155 | 128 | all_machines_running = False | ||
156 | 129 | break | ||
157 | 130 | if all_machines_running: | ||
158 | 131 | break | ||
159 | 132 | if time.time() - start_time >= timeout: | ||
160 | 133 | raise RuntimeError('timeout waiting for service to start') | ||
161 | 134 | time.sleep(SLEEP_AMOUNT) | ||
162 | 135 | return num_machines, time.time() - start_time | ||
163 | 136 | |||
164 | 137 | |||
165 | 138 | # DEPRECATED: client-side only | ||
166 | 139 | def wait_for_unit(service_name, timeout=480): | ||
167 | 140 | """Wait `timeout` seconds for a given service name to come up.""" | ||
168 | 141 | wait_for_machine(num_machines=1) | ||
169 | 142 | start_time = time.time() | ||
170 | 143 | while True: | ||
171 | 144 | state = unit_info(service_name, 'agent-state') | ||
172 | 145 | if 'error' in state or state == 'started': | ||
173 | 146 | break | ||
174 | 147 | if time.time() - start_time >= timeout: | ||
175 | 148 | raise RuntimeError('timeout waiting for service to start') | ||
176 | 149 | time.sleep(SLEEP_AMOUNT) | ||
177 | 150 | if state != 'started': | ||
178 | 151 | raise RuntimeError('unit did not start, agent-state: ' + state) | ||
179 | 152 | |||
180 | 153 | |||
181 | 154 | # DEPRECATED: client-side only | ||
182 | 155 | def wait_for_relation(service_name, relation_name, timeout=120): | ||
183 | 156 | """Wait `timeout` seconds for a given relation to come up.""" | ||
184 | 157 | start_time = time.time() | ||
185 | 158 | while True: | ||
186 | 159 | relation = unit_info(service_name, 'relations').get(relation_name) | ||
187 | 160 | if relation is not None and relation['state'] == 'up': | ||
188 | 161 | break | ||
189 | 162 | if time.time() - start_time >= timeout: | ||
190 | 163 | raise RuntimeError('timeout waiting for relation to be up') | ||
191 | 164 | time.sleep(SLEEP_AMOUNT) | ||
192 | 165 | |||
193 | 166 | |||
194 | 167 | # DEPRECATED: client-side only | ||
195 | 168 | def wait_for_page_contents(url, contents, timeout=120, validate=None): | ||
196 | 169 | if validate is None: | ||
197 | 170 | validate = operator.contains | ||
198 | 171 | start_time = time.time() | ||
199 | 172 | while True: | ||
200 | 173 | try: | ||
201 | 174 | stream = urllib2.urlopen(url) | ||
202 | 175 | except (urllib2.HTTPError, urllib2.URLError): | ||
203 | 176 | pass | ||
204 | 177 | else: | ||
205 | 178 | page = stream.read() | ||
206 | 179 | if validate(page, contents): | ||
207 | 180 | return page | ||
208 | 181 | if time.time() - start_time >= timeout: | ||
209 | 182 | raise RuntimeError('timeout waiting for contents of ' + url) | ||
210 | 183 | time.sleep(SLEEP_AMOUNT) | ||
211 | 184 | 0 | ||
212 | === removed directory 'hooks/charmhelpers/contrib/charmsupport' | |||
213 | === removed file 'hooks/charmhelpers/contrib/charmsupport/IMPORT' | |||
214 | --- hooks/charmhelpers/contrib/charmsupport/IMPORT 2013-06-07 09:39:50 +0000 | |||
215 | +++ hooks/charmhelpers/contrib/charmsupport/IMPORT 1970-01-01 00:00:00 +0000 | |||
216 | @@ -1,14 +0,0 @@ | |||
217 | 1 | Source: lp:charmsupport/trunk | ||
218 | 2 | |||
219 | 3 | charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py | ||
220 | 4 | charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py | ||
221 | 5 | charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py | ||
222 | 6 | charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py | ||
223 | 7 | charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py | ||
224 | 8 | |||
225 | 9 | charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py | ||
226 | 10 | charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py | ||
227 | 11 | charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py | ||
228 | 12 | charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py | ||
229 | 13 | |||
230 | 14 | charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport | ||
231 | 15 | 0 | ||
232 | === removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py' | |||
233 | === removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
234 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-06-07 09:39:50 +0000 | |||
235 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 | |||
236 | @@ -1,217 +0,0 @@ | |||
237 | 1 | """Compatibility with the nrpe-external-master charm""" | ||
238 | 2 | # Copyright 2012 Canonical Ltd. | ||
239 | 3 | # | ||
240 | 4 | # Authors: | ||
241 | 5 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | ||
242 | 6 | |||
243 | 7 | import subprocess | ||
244 | 8 | import pwd | ||
245 | 9 | import grp | ||
246 | 10 | import os | ||
247 | 11 | import re | ||
248 | 12 | import shlex | ||
249 | 13 | import yaml | ||
250 | 14 | |||
251 | 15 | from charmhelpers.core.hookenv import ( | ||
252 | 16 | config, | ||
253 | 17 | local_unit, | ||
254 | 18 | log, | ||
255 | 19 | relation_ids, | ||
256 | 20 | relation_set, | ||
257 | 21 | ) | ||
258 | 22 | from charmhelpers.core.host import service | ||
259 | 23 | |||
260 | 24 | # This module adds compatibility with the nrpe-external-master and plain nrpe | ||
261 | 25 | # subordinate charms. To use it in your charm: | ||
262 | 26 | # | ||
263 | 27 | # 1. Update metadata.yaml | ||
264 | 28 | # | ||
265 | 29 | # provides: | ||
266 | 30 | # (...) | ||
267 | 31 | # nrpe-external-master: | ||
268 | 32 | # interface: nrpe-external-master | ||
269 | 33 | # scope: container | ||
270 | 34 | # | ||
271 | 35 | # and/or | ||
272 | 36 | # | ||
273 | 37 | # provides: | ||
274 | 38 | # (...) | ||
275 | 39 | # local-monitors: | ||
276 | 40 | # interface: local-monitors | ||
277 | 41 | # scope: container | ||
278 | 42 | |||
279 | 43 | # | ||
280 | 44 | # 2. Add the following to config.yaml | ||
281 | 45 | # | ||
282 | 46 | # nagios_context: | ||
283 | 47 | # default: "juju" | ||
284 | 48 | # type: string | ||
285 | 49 | # description: | | ||
286 | 50 | # Used by the nrpe subordinate charms. | ||
287 | 51 | # A string that will be prepended to instance name to set the host name | ||
288 | 52 | # in nagios. So for instance the hostname would be something like: | ||
289 | 53 | # juju-myservice-0 | ||
290 | 54 | # If you're running multiple environments with the same services in them | ||
291 | 55 | # this allows you to differentiate between them. | ||
292 | 56 | # | ||
293 | 57 | # 3. Add custom checks (Nagios plugins) to files/nrpe-external-master | ||
294 | 58 | # | ||
295 | 59 | # 4. Update your hooks.py with something like this: | ||
296 | 60 | # | ||
297 | 61 | # from charmsupport.nrpe import NRPE | ||
298 | 62 | # (...) | ||
299 | 63 | # def update_nrpe_config(): | ||
300 | 64 | # nrpe_compat = NRPE() | ||
301 | 65 | # nrpe_compat.add_check( | ||
302 | 66 | # shortname = "myservice", | ||
303 | 67 | # description = "Check MyService", | ||
304 | 68 | # check_cmd = "check_http -w 2 -c 10 http://localhost" | ||
305 | 69 | # ) | ||
306 | 70 | # nrpe_compat.add_check( | ||
307 | 71 | # "myservice_other", | ||
308 | 72 | # "Check for widget failures", | ||
309 | 73 | # check_cmd = "/srv/myapp/scripts/widget_check" | ||
310 | 74 | # ) | ||
311 | 75 | # nrpe_compat.write() | ||
312 | 76 | # | ||
313 | 77 | # def config_changed(): | ||
314 | 78 | # (...) | ||
315 | 79 | # update_nrpe_config() | ||
316 | 80 | # | ||
317 | 81 | # def nrpe_external_master_relation_changed(): | ||
318 | 82 | # update_nrpe_config() | ||
319 | 83 | # | ||
320 | 84 | # def local_monitors_relation_changed(): | ||
321 | 85 | # update_nrpe_config() | ||
322 | 86 | # | ||
323 | 87 | # 5. ln -s hooks.py nrpe-external-master-relation-changed | ||
324 | 88 | # ln -s hooks.py local-monitors-relation-changed | ||
325 | 89 | |||
326 | 90 | |||
327 | 91 | class CheckException(Exception): | ||
328 | 92 | pass | ||
329 | 93 | |||
330 | 94 | |||
331 | 95 | class Check(object): | ||
332 | 96 | shortname_re = '[A-Za-z0-9-_]+$' | ||
333 | 97 | service_template = (""" | ||
334 | 98 | #--------------------------------------------------- | ||
335 | 99 | # This file is Juju managed | ||
336 | 100 | #--------------------------------------------------- | ||
337 | 101 | define service {{ | ||
338 | 102 | use active-service | ||
339 | 103 | host_name {nagios_hostname} | ||
340 | 104 | service_description {nagios_hostname}[{shortname}] """ | ||
341 | 105 | """{description} | ||
342 | 106 | check_command check_nrpe!{command} | ||
343 | 107 | servicegroups {nagios_servicegroup} | ||
344 | 108 | }} | ||
345 | 109 | """) | ||
346 | 110 | |||
347 | 111 | def __init__(self, shortname, description, check_cmd): | ||
348 | 112 | super(Check, self).__init__() | ||
349 | 113 | # XXX: could be better to calculate this from the service name | ||
350 | 114 | if not re.match(self.shortname_re, shortname): | ||
351 | 115 | raise CheckException("shortname must match {}".format( | ||
352 | 116 | Check.shortname_re)) | ||
353 | 117 | self.shortname = shortname | ||
354 | 118 | self.command = "check_{}".format(shortname) | ||
355 | 119 | # Note: a set of invalid characters is defined by the | ||
356 | 120 | # Nagios server config | ||
357 | 121 | # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= | ||
358 | 122 | self.description = description | ||
359 | 123 | self.check_cmd = self._locate_cmd(check_cmd) | ||
360 | 124 | |||
361 | 125 | def _locate_cmd(self, check_cmd): | ||
362 | 126 | search_path = ( | ||
363 | 127 | '/', | ||
364 | 128 | os.path.join(os.environ['CHARM_DIR'], | ||
365 | 129 | 'files/nrpe-external-master'), | ||
366 | 130 | '/usr/lib/nagios/plugins', | ||
367 | 131 | ) | ||
368 | 132 | parts = shlex.split(check_cmd) | ||
369 | 133 | for path in search_path: | ||
370 | 134 | if os.path.exists(os.path.join(path, parts[0])): | ||
371 | 135 | command = os.path.join(path, parts[0]) | ||
372 | 136 | if len(parts) > 1: | ||
373 | 137 | command += " " + " ".join(parts[1:]) | ||
374 | 138 | return command | ||
375 | 139 | log('Check command not found: {}'.format(parts[0])) | ||
376 | 140 | return '' | ||
377 | 141 | |||
378 | 142 | def write(self, nagios_context, hostname): | ||
379 | 143 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | ||
380 | 144 | self.command) | ||
381 | 145 | with open(nrpe_check_file, 'w') as nrpe_check_config: | ||
382 | 146 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | ||
383 | 147 | nrpe_check_config.write("command[{}]={}\n".format( | ||
384 | 148 | self.command, self.check_cmd)) | ||
385 | 149 | |||
386 | 150 | if not os.path.exists(NRPE.nagios_exportdir): | ||
387 | 151 | log('Not writing service config as {} is not accessible'.format( | ||
388 | 152 | NRPE.nagios_exportdir)) | ||
389 | 153 | else: | ||
390 | 154 | self.write_service_config(nagios_context, hostname) | ||
391 | 155 | |||
392 | 156 | def write_service_config(self, nagios_context, hostname): | ||
393 | 157 | for f in os.listdir(NRPE.nagios_exportdir): | ||
394 | 158 | if re.search('.*{}.cfg'.format(self.command), f): | ||
395 | 159 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
396 | 160 | |||
397 | 161 | templ_vars = { | ||
398 | 162 | 'nagios_hostname': hostname, | ||
399 | 163 | 'nagios_servicegroup': nagios_context, | ||
400 | 164 | 'description': self.description, | ||
401 | 165 | 'shortname': self.shortname, | ||
402 | 166 | 'command': self.command, | ||
403 | 167 | } | ||
404 | 168 | nrpe_service_text = Check.service_template.format(**templ_vars) | ||
405 | 169 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | ||
406 | 170 | NRPE.nagios_exportdir, hostname, self.command) | ||
407 | 171 | with open(nrpe_service_file, 'w') as nrpe_service_config: | ||
408 | 172 | nrpe_service_config.write(str(nrpe_service_text)) | ||
409 | 173 | |||
410 | 174 | def run(self): | ||
411 | 175 | subprocess.call(self.check_cmd) | ||
412 | 176 | |||
413 | 177 | |||
414 | 178 | class NRPE(object): | ||
415 | 179 | nagios_logdir = '/var/log/nagios' | ||
416 | 180 | nagios_exportdir = '/var/lib/nagios/export' | ||
417 | 181 | nrpe_confdir = '/etc/nagios/nrpe.d' | ||
418 | 182 | |||
419 | 183 | def __init__(self): | ||
420 | 184 | super(NRPE, self).__init__() | ||
421 | 185 | self.config = config() | ||
422 | 186 | self.nagios_context = self.config['nagios_context'] | ||
423 | 187 | self.unit_name = local_unit().replace('/', '-') | ||
424 | 188 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
425 | 189 | self.checks = [] | ||
426 | 190 | |||
427 | 191 | def add_check(self, *args, **kwargs): | ||
428 | 192 | self.checks.append(Check(*args, **kwargs)) | ||
429 | 193 | |||
430 | 194 | def write(self): | ||
431 | 195 | try: | ||
432 | 196 | nagios_uid = pwd.getpwnam('nagios').pw_uid | ||
433 | 197 | nagios_gid = grp.getgrnam('nagios').gr_gid | ||
434 | 198 | except: | ||
435 | 199 | log("Nagios user not set up, nrpe checks not updated") | ||
436 | 200 | return | ||
437 | 201 | |||
438 | 202 | if not os.path.exists(NRPE.nagios_logdir): | ||
439 | 203 | os.mkdir(NRPE.nagios_logdir) | ||
440 | 204 | os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) | ||
441 | 205 | |||
442 | 206 | nrpe_monitors = {} | ||
443 | 207 | monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} | ||
444 | 208 | for nrpecheck in self.checks: | ||
445 | 209 | nrpecheck.write(self.nagios_context, self.hostname) | ||
446 | 210 | nrpe_monitors[nrpecheck.shortname] = { | ||
447 | 211 | "command": nrpecheck.command, | ||
448 | 212 | } | ||
449 | 213 | |||
450 | 214 | service('restart', 'nagios-nrpe-server') | ||
451 | 215 | |||
452 | 216 | for rid in relation_ids("local-monitors"): | ||
453 | 217 | relation_set(relation_id=rid, monitors=yaml.dump(monitors)) | ||
454 | 218 | 0 | ||
455 | === removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' | |||
456 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-06-07 09:39:50 +0000 | |||
457 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 | |||
458 | @@ -1,156 +0,0 @@ | |||
459 | 1 | ''' | ||
460 | 2 | Functions for managing volumes in juju units. One volume is supported per unit. | ||
461 | 3 | Subordinates may have their own storage, provided it is on its own partition. | ||
462 | 4 | |||
463 | 5 | Configuration stanzas: | ||
464 | 6 | volume-ephemeral: | ||
465 | 7 | type: boolean | ||
466 | 8 | default: true | ||
467 | 9 | description: > | ||
468 | 10 | If false, a volume is mounted as sepecified in "volume-map" | ||
469 | 11 | If true, ephemeral storage will be used, meaning that log data | ||
470 | 12 | will only exist as long as the machine. YOU HAVE BEEN WARNED. | ||
471 | 13 | volume-map: | ||
472 | 14 | type: string | ||
473 | 15 | default: {} | ||
474 | 16 | description: > | ||
475 | 17 | YAML map of units to device names, e.g: | ||
476 | 18 | "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" | ||
477 | 19 | Service units will raise a configure-error if volume-ephemeral | ||
478 | 20 | is 'true' and no volume-map value is set. Use 'juju set' to set a | ||
479 | 21 | value and 'juju resolved' to complete configuration. | ||
480 | 22 | |||
481 | 23 | Usage: | ||
482 | 24 | from charmsupport.volumes import configure_volume, VolumeConfigurationError | ||
483 | 25 | from charmsupport.hookenv import log, ERROR | ||
484 | 26 | def post_mount_hook(): | ||
485 | 27 | stop_service('myservice') | ||
486 | 28 | def post_mount_hook(): | ||
487 | 29 | start_service('myservice') | ||
488 | 30 | |||
489 | 31 | if __name__ == '__main__': | ||
490 | 32 | try: | ||
491 | 33 | configure_volume(before_change=pre_mount_hook, | ||
492 | 34 | after_change=post_mount_hook) | ||
493 | 35 | except VolumeConfigurationError: | ||
494 | 36 | log('Storage could not be configured', ERROR) | ||
495 | 37 | ''' | ||
496 | 38 | |||
497 | 39 | # XXX: Known limitations | ||
498 | 40 | # - fstab is neither consulted nor updated | ||
499 | 41 | |||
500 | 42 | import os | ||
501 | 43 | import hookenv | ||
502 | 44 | import host | ||
503 | 45 | import yaml | ||
504 | 46 | |||
505 | 47 | |||
506 | 48 | MOUNT_BASE = '/srv/juju/volumes' | ||
507 | 49 | |||
508 | 50 | |||
509 | 51 | class VolumeConfigurationError(Exception): | ||
510 | 52 | '''Volume configuration data is missing or invalid''' | ||
511 | 53 | pass | ||
512 | 54 | |||
513 | 55 | |||
514 | 56 | def get_config(): | ||
515 | 57 | '''Gather and sanity-check volume configuration data''' | ||
516 | 58 | volume_config = {} | ||
517 | 59 | config = hookenv.config() | ||
518 | 60 | |||
519 | 61 | errors = False | ||
520 | 62 | |||
521 | 63 | if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): | ||
522 | 64 | volume_config['ephemeral'] = True | ||
523 | 65 | else: | ||
524 | 66 | volume_config['ephemeral'] = False | ||
525 | 67 | |||
526 | 68 | try: | ||
527 | 69 | volume_map = yaml.safe_load(config.get('volume-map', '{}')) | ||
528 | 70 | except yaml.YAMLError as e: | ||
529 | 71 | hookenv.log("Error parsing YAML volume-map: {}".format(e), | ||
530 | 72 | hookenv.ERROR) | ||
531 | 73 | errors = True | ||
532 | 74 | if volume_map is None: | ||
533 | 75 | # probably an empty string | ||
534 | 76 | volume_map = {} | ||
535 | 77 | elif isinstance(volume_map, dict): | ||
536 | 78 | hookenv.log("Volume-map should be a dictionary, not {}".format( | ||
537 | 79 | type(volume_map))) | ||
538 | 80 | errors = True | ||
539 | 81 | |||
540 | 82 | volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) | ||
541 | 83 | if volume_config['device'] and volume_config['ephemeral']: | ||
542 | 84 | # asked for ephemeral storage but also defined a volume ID | ||
543 | 85 | hookenv.log('A volume is defined for this unit, but ephemeral ' | ||
544 | 86 | 'storage was requested', hookenv.ERROR) | ||
545 | 87 | errors = True | ||
546 | 88 | elif not volume_config['device'] and not volume_config['ephemeral']: | ||
547 | 89 | # asked for permanent storage but did not define volume ID | ||
548 | 90 | hookenv.log('Ephemeral storage was requested, but there is no volume ' | ||
549 | 91 | 'defined for this unit.', hookenv.ERROR) | ||
550 | 92 | errors = True | ||
551 | 93 | |||
552 | 94 | unit_mount_name = hookenv.local_unit().replace('/', '-') | ||
553 | 95 | volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) | ||
554 | 96 | |||
555 | 97 | if errors: | ||
556 | 98 | return None | ||
557 | 99 | return volume_config | ||
558 | 100 | |||
559 | 101 | |||
560 | 102 | def mount_volume(config): | ||
561 | 103 | if os.path.exists(config['mountpoint']): | ||
562 | 104 | if not os.path.isdir(config['mountpoint']): | ||
563 | 105 | hookenv.log('Not a directory: {}'.format(config['mountpoint'])) | ||
564 | 106 | raise VolumeConfigurationError() | ||
565 | 107 | else: | ||
566 | 108 | host.mkdir(config['mountpoint']) | ||
567 | 109 | if os.path.ismount(config['mountpoint']): | ||
568 | 110 | unmount_volume(config) | ||
569 | 111 | if not host.mount(config['device'], config['mountpoint'], persist=True): | ||
570 | 112 | raise VolumeConfigurationError() | ||
571 | 113 | |||
572 | 114 | |||
573 | 115 | def unmount_volume(config): | ||
574 | 116 | if os.path.ismount(config['mountpoint']): | ||
575 | 117 | if not host.umount(config['mountpoint'], persist=True): | ||
576 | 118 | raise VolumeConfigurationError() | ||
577 | 119 | |||
578 | 120 | |||
579 | 121 | def managed_mounts(): | ||
580 | 122 | '''List of all mounted managed volumes''' | ||
581 | 123 | return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) | ||
582 | 124 | |||
583 | 125 | |||
584 | 126 | def configure_volume(before_change=lambda: None, after_change=lambda: None): | ||
585 | 127 | '''Set up storage (or don't) according to the charm's volume configuration. | ||
586 | 128 | Returns the mount point or "ephemeral". before_change and after_change | ||
587 | 129 | are optional functions to be called if the volume configuration changes. | ||
588 | 130 | ''' | ||
589 | 131 | |||
590 | 132 | config = get_config() | ||
591 | 133 | if not config: | ||
592 | 134 | hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) | ||
593 | 135 | raise VolumeConfigurationError() | ||
594 | 136 | |||
595 | 137 | if config['ephemeral']: | ||
596 | 138 | if os.path.ismount(config['mountpoint']): | ||
597 | 139 | before_change() | ||
598 | 140 | unmount_volume(config) | ||
599 | 141 | after_change() | ||
600 | 142 | return 'ephemeral' | ||
601 | 143 | else: | ||
602 | 144 | # persistent storage | ||
603 | 145 | if os.path.ismount(config['mountpoint']): | ||
604 | 146 | mounts = dict(managed_mounts()) | ||
605 | 147 | if mounts.get(config['mountpoint']) != config['device']: | ||
606 | 148 | before_change() | ||
607 | 149 | unmount_volume(config) | ||
608 | 150 | mount_volume(config) | ||
609 | 151 | after_change() | ||
610 | 152 | else: | ||
611 | 153 | before_change() | ||
612 | 154 | mount_volume(config) | ||
613 | 155 | after_change() | ||
614 | 156 | return config['mountpoint'] | ||
615 | 157 | 0 | ||
616 | === removed directory 'hooks/charmhelpers/contrib/hahelpers' | |||
617 | === removed file 'hooks/charmhelpers/contrib/hahelpers/IMPORT' | |||
618 | --- hooks/charmhelpers/contrib/hahelpers/IMPORT 2013-06-07 09:39:50 +0000 | |||
619 | +++ hooks/charmhelpers/contrib/hahelpers/IMPORT 1970-01-01 00:00:00 +0000 | |||
620 | @@ -1,7 +0,0 @@ | |||
621 | 1 | Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers | ||
622 | 2 | |||
623 | 3 | ha-helpers/lib/apache_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/apache_utils.py | ||
624 | 4 | ha-helpers/lib/cluster_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/cluster_utils.py | ||
625 | 5 | ha-helpers/lib/ceph_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/ceph_utils.py | ||
626 | 6 | ha-helpers/lib/haproxy_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/haproxy_utils.py | ||
627 | 7 | ha-helpers/lib/utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/utils.py | ||
628 | 8 | 0 | ||
629 | === removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py' | |||
630 | === removed file 'hooks/charmhelpers/contrib/hahelpers/apache_utils.py' | |||
631 | --- hooks/charmhelpers/contrib/hahelpers/apache_utils.py 2013-06-07 09:39:50 +0000 | |||
632 | +++ hooks/charmhelpers/contrib/hahelpers/apache_utils.py 1970-01-01 00:00:00 +0000 | |||
633 | @@ -1,196 +0,0 @@ | |||
634 | 1 | # | ||
635 | 2 | # Copyright 2012 Canonical Ltd. | ||
636 | 3 | # | ||
637 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
638 | 5 | # | ||
639 | 6 | # Authors: | ||
640 | 7 | # James Page <james.page@ubuntu.com> | ||
641 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
642 | 9 | # | ||
643 | 10 | |||
644 | 11 | from hahelpers.utils import ( | ||
645 | 12 | relation_ids, | ||
646 | 13 | relation_list, | ||
647 | 14 | relation_get, | ||
648 | 15 | render_template, | ||
649 | 16 | juju_log, | ||
650 | 17 | config_get, | ||
651 | 18 | install, | ||
652 | 19 | get_host_ip, | ||
653 | 20 | restart | ||
654 | 21 | ) | ||
655 | 22 | from hahelpers.cluster_utils import https | ||
656 | 23 | |||
657 | 24 | import os | ||
658 | 25 | import subprocess | ||
659 | 26 | from base64 import b64decode | ||
660 | 27 | |||
661 | 28 | APACHE_SITE_DIR = "/etc/apache2/sites-available" | ||
662 | 29 | SITE_TEMPLATE = "apache2_site.tmpl" | ||
663 | 30 | RELOAD_CHECK = "To activate the new configuration" | ||
664 | 31 | |||
665 | 32 | |||
666 | 33 | def get_cert(): | ||
667 | 34 | cert = config_get('ssl_cert') | ||
668 | 35 | key = config_get('ssl_key') | ||
669 | 36 | if not (cert and key): | ||
670 | 37 | juju_log('INFO', | ||
671 | 38 | "Inspecting identity-service relations for SSL certificate.") | ||
672 | 39 | cert = key = None | ||
673 | 40 | for r_id in relation_ids('identity-service'): | ||
674 | 41 | for unit in relation_list(r_id): | ||
675 | 42 | if not cert: | ||
676 | 43 | cert = relation_get('ssl_cert', | ||
677 | 44 | rid=r_id, unit=unit) | ||
678 | 45 | if not key: | ||
679 | 46 | key = relation_get('ssl_key', | ||
680 | 47 | rid=r_id, unit=unit) | ||
681 | 48 | return (cert, key) | ||
682 | 49 | |||
683 | 50 | |||
684 | 51 | def get_ca_cert(): | ||
685 | 52 | ca_cert = None | ||
686 | 53 | juju_log('INFO', | ||
687 | 54 | "Inspecting identity-service relations for CA SSL certificate.") | ||
688 | 55 | for r_id in relation_ids('identity-service'): | ||
689 | 56 | for unit in relation_list(r_id): | ||
690 | 57 | if not ca_cert: | ||
691 | 58 | ca_cert = relation_get('ca_cert', | ||
692 | 59 | rid=r_id, unit=unit) | ||
693 | 60 | return ca_cert | ||
694 | 61 | |||
695 | 62 | |||
696 | 63 | def install_ca_cert(ca_cert): | ||
697 | 64 | if ca_cert: | ||
698 | 65 | with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', | ||
699 | 66 | 'w') as crt: | ||
700 | 67 | crt.write(ca_cert) | ||
701 | 68 | subprocess.check_call(['update-ca-certificates', '--fresh']) | ||
702 | 69 | |||
703 | 70 | |||
704 | 71 | def enable_https(port_maps, namespace, cert, key, ca_cert=None): | ||
705 | 72 | ''' | ||
706 | 73 | For a given number of port mappings, configures apache2 | ||
707 | 74 | HTTPs local reverse proxying using certficates and keys provided in | ||
708 | 75 | either configuration data (preferred) or relation data. Assumes ports | ||
709 | 76 | are not in use (calling charm should ensure that). | ||
710 | 77 | |||
711 | 78 | port_maps: dict: external to internal port mappings | ||
712 | 79 | namespace: str: name of charm | ||
713 | 80 | ''' | ||
714 | 81 | def _write_if_changed(path, new_content): | ||
715 | 82 | content = None | ||
716 | 83 | if os.path.exists(path): | ||
717 | 84 | with open(path, 'r') as f: | ||
718 | 85 | content = f.read().strip() | ||
719 | 86 | if content != new_content: | ||
720 | 87 | with open(path, 'w') as f: | ||
721 | 88 | f.write(new_content) | ||
722 | 89 | return True | ||
723 | 90 | else: | ||
724 | 91 | return False | ||
725 | 92 | |||
726 | 93 | juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) | ||
727 | 94 | http_restart = False | ||
728 | 95 | |||
729 | 96 | if cert: | ||
730 | 97 | cert = b64decode(cert) | ||
731 | 98 | if key: | ||
732 | 99 | key = b64decode(key) | ||
733 | 100 | if ca_cert: | ||
734 | 101 | ca_cert = b64decode(ca_cert) | ||
735 | 102 | |||
736 | 103 | if not cert and not key: | ||
737 | 104 | juju_log('ERROR', | ||
738 | 105 | "Expected but could not find SSL certificate data, not " | ||
739 | 106 | "configuring HTTPS!") | ||
740 | 107 | return False | ||
741 | 108 | |||
742 | 109 | install('apache2') | ||
743 | 110 | if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', | ||
744 | 111 | 'proxy', 'proxy_http']): | ||
745 | 112 | http_restart = True | ||
746 | 113 | |||
747 | 114 | ssl_dir = os.path.join('/etc/apache2/ssl', namespace) | ||
748 | 115 | if not os.path.exists(ssl_dir): | ||
749 | 116 | os.makedirs(ssl_dir) | ||
750 | 117 | |||
751 | 118 | if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): | ||
752 | 119 | http_restart = True | ||
753 | 120 | if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): | ||
754 | 121 | http_restart = True | ||
755 | 122 | os.chmod(os.path.join(ssl_dir, 'key'), 0600) | ||
756 | 123 | |||
757 | 124 | install_ca_cert(ca_cert) | ||
758 | 125 | |||
759 | 126 | sites_dir = '/etc/apache2/sites-available' | ||
760 | 127 | for ext_port, int_port in port_maps.items(): | ||
761 | 128 | juju_log('INFO', | ||
762 | 129 | 'Creating apache2 reverse proxy vhost' | ||
763 | 130 | ' for {}:{}'.format(ext_port, | ||
764 | 131 | int_port)) | ||
765 | 132 | site = "{}_{}".format(namespace, ext_port) | ||
766 | 133 | site_path = os.path.join(sites_dir, site) | ||
767 | 134 | with open(site_path, 'w') as fsite: | ||
768 | 135 | context = { | ||
769 | 136 | "ext": ext_port, | ||
770 | 137 | "int": int_port, | ||
771 | 138 | "namespace": namespace, | ||
772 | 139 | "private_address": get_host_ip() | ||
773 | 140 | } | ||
774 | 141 | fsite.write(render_template(SITE_TEMPLATE, | ||
775 | 142 | context)) | ||
776 | 143 | |||
777 | 144 | if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): | ||
778 | 145 | http_restart = True | ||
779 | 146 | |||
780 | 147 | if http_restart: | ||
781 | 148 | restart('apache2') | ||
782 | 149 | |||
783 | 150 | return True | ||
784 | 151 | |||
785 | 152 | |||
786 | 153 | def disable_https(port_maps, namespace): | ||
787 | 154 | ''' | ||
788 | 155 | Ensure HTTPS reverse proxying is disables for given port mappings | ||
789 | 156 | |||
790 | 157 | port_maps: dict: of ext -> int port mappings | ||
791 | 158 | namespace: str: name of chamr | ||
792 | 159 | ''' | ||
793 | 160 | juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps)) | ||
794 | 161 | |||
795 | 162 | if (not os.path.exists('/etc/apache2') or | ||
796 | 163 | not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))): | ||
797 | 164 | return | ||
798 | 165 | |||
799 | 166 | http_restart = False | ||
800 | 167 | for ext_port in port_maps.keys(): | ||
801 | 168 | if os.path.exists(os.path.join(APACHE_SITE_DIR, | ||
802 | 169 | "{}_{}".format(namespace, | ||
803 | 170 | ext_port))): | ||
804 | 171 | juju_log('INFO', | ||
805 | 172 | "Disabling HTTPS reverse proxy" | ||
806 | 173 | " for {} {}.".format(namespace, | ||
807 | 174 | ext_port)) | ||
808 | 175 | if (RELOAD_CHECK in | ||
809 | 176 | subprocess.check_output(['a2dissite', | ||
810 | 177 | '{}_{}'.format(namespace, | ||
811 | 178 | ext_port)])): | ||
812 | 179 | http_restart = True | ||
813 | 180 | |||
814 | 181 | if http_restart: | ||
815 | 182 | restart(['apache2']) | ||
816 | 183 | |||
817 | 184 | |||
818 | 185 | def setup_https(port_maps, namespace, cert, key, ca_cert=None): | ||
819 | 186 | ''' | ||
820 | 187 | Ensures HTTPS is either enabled or disabled for given port | ||
821 | 188 | mapping. | ||
822 | 189 | |||
823 | 190 | port_maps: dict: of ext -> int port mappings | ||
824 | 191 | namespace: str: name of charm | ||
825 | 192 | ''' | ||
826 | 193 | if not https: | ||
827 | 194 | disable_https(port_maps, namespace) | ||
828 | 195 | else: | ||
829 | 196 | enable_https(port_maps, namespace, cert, key, ca_cert) | ||
830 | 197 | 0 | ||
831 | === removed file 'hooks/charmhelpers/contrib/hahelpers/ceph_utils.py' | |||
832 | --- hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 2013-06-07 09:39:50 +0000 | |||
833 | +++ hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 1970-01-01 00:00:00 +0000 | |||
834 | @@ -1,256 +0,0 @@ | |||
835 | 1 | # | ||
836 | 2 | # Copyright 2012 Canonical Ltd. | ||
837 | 3 | # | ||
838 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
839 | 5 | # | ||
840 | 6 | # Authors: | ||
841 | 7 | # James Page <james.page@ubuntu.com> | ||
842 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
843 | 9 | # | ||
844 | 10 | |||
845 | 11 | import commands | ||
846 | 12 | import subprocess | ||
847 | 13 | import os | ||
848 | 14 | import shutil | ||
849 | 15 | import hahelpers.utils as utils | ||
850 | 16 | |||
851 | 17 | KEYRING = '/etc/ceph/ceph.client.%s.keyring' | ||
852 | 18 | KEYFILE = '/etc/ceph/ceph.client.%s.key' | ||
853 | 19 | |||
854 | 20 | CEPH_CONF = """[global] | ||
855 | 21 | auth supported = %(auth)s | ||
856 | 22 | keyring = %(keyring)s | ||
857 | 23 | mon host = %(mon_hosts)s | ||
858 | 24 | """ | ||
859 | 25 | |||
860 | 26 | |||
861 | 27 | def execute(cmd): | ||
862 | 28 | subprocess.check_call(cmd) | ||
863 | 29 | |||
864 | 30 | |||
865 | 31 | def execute_shell(cmd): | ||
866 | 32 | subprocess.check_call(cmd, shell=True) | ||
867 | 33 | |||
868 | 34 | |||
869 | 35 | def install(): | ||
870 | 36 | ceph_dir = "/etc/ceph" | ||
871 | 37 | if not os.path.isdir(ceph_dir): | ||
872 | 38 | os.mkdir(ceph_dir) | ||
873 | 39 | utils.install('ceph-common') | ||
874 | 40 | |||
875 | 41 | |||
876 | 42 | def rbd_exists(service, pool, rbd_img): | ||
877 | 43 | (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\ | ||
878 | 44 | (service, pool)) | ||
879 | 45 | return rbd_img in out | ||
880 | 46 | |||
881 | 47 | |||
882 | 48 | def create_rbd_image(service, pool, image, sizemb): | ||
883 | 49 | cmd = [ | ||
884 | 50 | 'rbd', | ||
885 | 51 | 'create', | ||
886 | 52 | image, | ||
887 | 53 | '--size', | ||
888 | 54 | str(sizemb), | ||
889 | 55 | '--id', | ||
890 | 56 | service, | ||
891 | 57 | '--pool', | ||
892 | 58 | pool | ||
893 | 59 | ] | ||
894 | 60 | execute(cmd) | ||
895 | 61 | |||
896 | 62 | |||
897 | 63 | def pool_exists(service, name): | ||
898 | 64 | (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) | ||
899 | 65 | return name in out | ||
900 | 66 | |||
901 | 67 | |||
902 | 68 | def create_pool(service, name): | ||
903 | 69 | cmd = [ | ||
904 | 70 | 'rados', | ||
905 | 71 | '--id', | ||
906 | 72 | service, | ||
907 | 73 | 'mkpool', | ||
908 | 74 | name | ||
909 | 75 | ] | ||
910 | 76 | execute(cmd) | ||
911 | 77 | |||
912 | 78 | |||
913 | 79 | def keyfile_path(service): | ||
914 | 80 | return KEYFILE % service | ||
915 | 81 | |||
916 | 82 | |||
917 | 83 | def keyring_path(service): | ||
918 | 84 | return KEYRING % service | ||
919 | 85 | |||
920 | 86 | |||
921 | 87 | def create_keyring(service, key): | ||
922 | 88 | keyring = keyring_path(service) | ||
923 | 89 | if os.path.exists(keyring): | ||
924 | 90 | utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring) | ||
925 | 91 | cmd = [ | ||
926 | 92 | 'ceph-authtool', | ||
927 | 93 | keyring, | ||
928 | 94 | '--create-keyring', | ||
929 | 95 | '--name=client.%s' % service, | ||
930 | 96 | '--add-key=%s' % key | ||
931 | 97 | ] | ||
932 | 98 | execute(cmd) | ||
933 | 99 | utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring) | ||
934 | 100 | |||
935 | 101 | |||
936 | 102 | def create_key_file(service, key): | ||
937 | 103 | # create a file containing the key | ||
938 | 104 | keyfile = keyfile_path(service) | ||
939 | 105 | if os.path.exists(keyfile): | ||
940 | 106 | utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile) | ||
941 | 107 | fd = open(keyfile, 'w') | ||
942 | 108 | fd.write(key) | ||
943 | 109 | fd.close() | ||
944 | 110 | utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile) | ||
945 | 111 | |||
946 | 112 | |||
947 | 113 | def get_ceph_nodes(): | ||
948 | 114 | hosts = [] | ||
949 | 115 | for r_id in utils.relation_ids('ceph'): | ||
950 | 116 | for unit in utils.relation_list(r_id): | ||
951 | 117 | hosts.append(utils.relation_get('private-address', | ||
952 | 118 | unit=unit, rid=r_id)) | ||
953 | 119 | return hosts | ||
954 | 120 | |||
955 | 121 | |||
956 | 122 | def configure(service, key, auth): | ||
957 | 123 | create_keyring(service, key) | ||
958 | 124 | create_key_file(service, key) | ||
959 | 125 | hosts = get_ceph_nodes() | ||
960 | 126 | mon_hosts = ",".join(map(str, hosts)) | ||
961 | 127 | keyring = keyring_path(service) | ||
962 | 128 | with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: | ||
963 | 129 | ceph_conf.write(CEPH_CONF % locals()) | ||
964 | 130 | modprobe_kernel_module('rbd') | ||
965 | 131 | |||
966 | 132 | |||
967 | 133 | def image_mapped(image_name): | ||
968 | 134 | (rc, out) = commands.getstatusoutput('rbd showmapped') | ||
969 | 135 | return image_name in out | ||
970 | 136 | |||
971 | 137 | |||
972 | 138 | def map_block_storage(service, pool, image): | ||
973 | 139 | cmd = [ | ||
974 | 140 | 'rbd', | ||
975 | 141 | 'map', | ||
976 | 142 | '%s/%s' % (pool, image), | ||
977 | 143 | '--user', | ||
978 | 144 | service, | ||
979 | 145 | '--secret', | ||
980 | 146 | keyfile_path(service), | ||
981 | 147 | ] | ||
982 | 148 | execute(cmd) | ||
983 | 149 | |||
984 | 150 | |||
985 | 151 | def filesystem_mounted(fs): | ||
986 | 152 | return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 | ||
987 | 153 | |||
988 | 154 | |||
989 | 155 | def make_filesystem(blk_device, fstype='ext4'): | ||
990 | 156 | utils.juju_log('INFO', | ||
991 | 157 | 'ceph: Formatting block device %s as filesystem %s.' %\ | ||
992 | 158 | (blk_device, fstype)) | ||
993 | 159 | cmd = ['mkfs', '-t', fstype, blk_device] | ||
994 | 160 | execute(cmd) | ||
995 | 161 | |||
996 | 162 | |||
997 | 163 | def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): | ||
998 | 164 | # mount block device into /mnt | ||
999 | 165 | cmd = ['mount', '-t', fstype, blk_device, '/mnt'] | ||
1000 | 166 | execute(cmd) | ||
1001 | 167 | |||
1002 | 168 | # copy data to /mnt | ||
1003 | 169 | try: | ||
1004 | 170 | copy_files(data_src_dst, '/mnt') | ||
1005 | 171 | except: | ||
1006 | 172 | pass | ||
1007 | 173 | |||
1008 | 174 | # umount block device | ||
1009 | 175 | cmd = ['umount', '/mnt'] | ||
1010 | 176 | execute(cmd) | ||
1011 | 177 | |||
1012 | 178 | _dir = os.stat(data_src_dst) | ||
1013 | 179 | uid = _dir.st_uid | ||
1014 | 180 | gid = _dir.st_gid | ||
1015 | 181 | |||
1016 | 182 | # re-mount where the data should originally be | ||
1017 | 183 | cmd = ['mount', '-t', fstype, blk_device, data_src_dst] | ||
1018 | 184 | execute(cmd) | ||
1019 | 185 | |||
1020 | 186 | # ensure original ownership of new mount. | ||
1021 | 187 | cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] | ||
1022 | 188 | execute(cmd) | ||
1023 | 189 | |||
1024 | 190 | |||
1025 | 191 | # TODO: re-use | ||
1026 | 192 | def modprobe_kernel_module(module): | ||
1027 | 193 | utils.juju_log('INFO', 'Loading kernel module') | ||
1028 | 194 | cmd = ['modprobe', module] | ||
1029 | 195 | execute(cmd) | ||
1030 | 196 | cmd = 'echo %s >> /etc/modules' % module | ||
1031 | 197 | execute_shell(cmd) | ||
1032 | 198 | |||
1033 | 199 | |||
1034 | 200 | def copy_files(src, dst, symlinks=False, ignore=None): | ||
1035 | 201 | for item in os.listdir(src): | ||
1036 | 202 | s = os.path.join(src, item) | ||
1037 | 203 | d = os.path.join(dst, item) | ||
1038 | 204 | if os.path.isdir(s): | ||
1039 | 205 | shutil.copytree(s, d, symlinks, ignore) | ||
1040 | 206 | else: | ||
1041 | 207 | shutil.copy2(s, d) | ||
1042 | 208 | |||
1043 | 209 | |||
1044 | 210 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | ||
1045 | 211 | blk_device, fstype, system_services=[]): | ||
1046 | 212 | """ | ||
1047 | 213 | To be called from the current cluster leader. | ||
1048 | 214 | Ensures given pool and RBD image exists, is mapped to a block device, | ||
1049 | 215 | and the device is formatted and mounted at the given mount_point. | ||
1050 | 216 | |||
1051 | 217 | If formatting a device for the first time, data existing at mount_point | ||
1052 | 218 | will be migrated to the RBD device before being remounted. | ||
1053 | 219 | |||
1054 | 220 | All services listed in system_services will be stopped prior to data | ||
1055 | 221 | migration and restarted when complete. | ||
1056 | 222 | """ | ||
1057 | 223 | # Ensure pool, RBD image, RBD mappings are in place. | ||
1058 | 224 | if not pool_exists(service, pool): | ||
1059 | 225 | utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool) | ||
1060 | 226 | create_pool(service, pool) | ||
1061 | 227 | |||
1062 | 228 | if not rbd_exists(service, pool, rbd_img): | ||
1063 | 229 | utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img) | ||
1064 | 230 | create_rbd_image(service, pool, rbd_img, sizemb) | ||
1065 | 231 | |||
1066 | 232 | if not image_mapped(rbd_img): | ||
1067 | 233 | utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.') | ||
1068 | 234 | map_block_storage(service, pool, rbd_img) | ||
1069 | 235 | |||
1070 | 236 | # make file system | ||
1071 | 237 | # TODO: What happens if for whatever reason this is run again and | ||
1072 | 238 | # the data is already in the rbd device and/or is mounted?? | ||
1073 | 239 | # When it is mounted already, it will fail to make the fs | ||
1074 | 240 | # XXX: This is really sketchy! Need to at least add an fstab entry | ||
1075 | 241 | # otherwise this hook will blow away existing data if its executed | ||
1076 | 242 | # after a reboot. | ||
1077 | 243 | if not filesystem_mounted(mount_point): | ||
1078 | 244 | make_filesystem(blk_device, fstype) | ||
1079 | 245 | |||
1080 | 246 | for svc in system_services: | ||
1081 | 247 | if utils.running(svc): | ||
1082 | 248 | utils.juju_log('INFO', | ||
1083 | 249 | 'Stopping services %s prior to migrating '\ | ||
1084 | 250 | 'data' % svc) | ||
1085 | 251 | utils.stop(svc) | ||
1086 | 252 | |||
1087 | 253 | place_data_on_ceph(service, blk_device, mount_point, fstype) | ||
1088 | 254 | |||
1089 | 255 | for svc in system_services: | ||
1090 | 256 | utils.start(svc) | ||
1091 | 257 | 0 | ||
1092 | === removed file 'hooks/charmhelpers/contrib/hahelpers/cluster_utils.py' | |||
1093 | --- hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 2013-06-07 09:39:50 +0000 | |||
1094 | +++ hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 1970-01-01 00:00:00 +0000 | |||
1095 | @@ -1,130 +0,0 @@ | |||
1096 | 1 | # | ||
1097 | 2 | # Copyright 2012 Canonical Ltd. | ||
1098 | 3 | # | ||
1099 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
1100 | 5 | # | ||
1101 | 6 | # Authors: | ||
1102 | 7 | # James Page <james.page@ubuntu.com> | ||
1103 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
1104 | 9 | # | ||
1105 | 10 | |||
1106 | 11 | from hahelpers.utils import ( | ||
1107 | 12 | juju_log, | ||
1108 | 13 | relation_ids, | ||
1109 | 14 | relation_list, | ||
1110 | 15 | relation_get, | ||
1111 | 16 | get_unit_hostname, | ||
1112 | 17 | config_get | ||
1113 | 18 | ) | ||
1114 | 19 | import subprocess | ||
1115 | 20 | import os | ||
1116 | 21 | |||
1117 | 22 | |||
1118 | 23 | def is_clustered(): | ||
1119 | 24 | for r_id in (relation_ids('ha') or []): | ||
1120 | 25 | for unit in (relation_list(r_id) or []): | ||
1121 | 26 | clustered = relation_get('clustered', | ||
1122 | 27 | rid=r_id, | ||
1123 | 28 | unit=unit) | ||
1124 | 29 | if clustered: | ||
1125 | 30 | return True | ||
1126 | 31 | return False | ||
1127 | 32 | |||
1128 | 33 | |||
1129 | 34 | def is_leader(resource): | ||
1130 | 35 | cmd = [ | ||
1131 | 36 | "crm", "resource", | ||
1132 | 37 | "show", resource | ||
1133 | 38 | ] | ||
1134 | 39 | try: | ||
1135 | 40 | status = subprocess.check_output(cmd) | ||
1136 | 41 | except subprocess.CalledProcessError: | ||
1137 | 42 | return False | ||
1138 | 43 | else: | ||
1139 | 44 | if get_unit_hostname() in status: | ||
1140 | 45 | return True | ||
1141 | 46 | else: | ||
1142 | 47 | return False | ||
1143 | 48 | |||
1144 | 49 | |||
1145 | 50 | def peer_units(): | ||
1146 | 51 | peers = [] | ||
1147 | 52 | for r_id in (relation_ids('cluster') or []): | ||
1148 | 53 | for unit in (relation_list(r_id) or []): | ||
1149 | 54 | peers.append(unit) | ||
1150 | 55 | return peers | ||
1151 | 56 | |||
1152 | 57 | |||
1153 | 58 | def oldest_peer(peers): | ||
1154 | 59 | local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) | ||
1155 | 60 | for peer in peers: | ||
1156 | 61 | remote_unit_no = int(peer.split('/')[1]) | ||
1157 | 62 | if remote_unit_no < local_unit_no: | ||
1158 | 63 | return False | ||
1159 | 64 | return True | ||
1160 | 65 | |||
1161 | 66 | |||
1162 | 67 | def eligible_leader(resource): | ||
1163 | 68 | if is_clustered(): | ||
1164 | 69 | if not is_leader(resource): | ||
1165 | 70 | juju_log('INFO', 'Deferring action to CRM leader.') | ||
1166 | 71 | return False | ||
1167 | 72 | else: | ||
1168 | 73 | peers = peer_units() | ||
1169 | 74 | if peers and not oldest_peer(peers): | ||
1170 | 75 | juju_log('INFO', 'Deferring action to oldest service unit.') | ||
1171 | 76 | return False | ||
1172 | 77 | return True | ||
1173 | 78 | |||
1174 | 79 | |||
1175 | 80 | def https(): | ||
1176 | 81 | ''' | ||
1177 | 82 | Determines whether enough data has been provided in configuration | ||
1178 | 83 | or relation data to configure HTTPS | ||
1179 | 84 | . | ||
1180 | 85 | returns: boolean | ||
1181 | 86 | ''' | ||
1182 | 87 | if config_get('use-https') == "yes": | ||
1183 | 88 | return True | ||
1184 | 89 | if config_get('ssl_cert') and config_get('ssl_key'): | ||
1185 | 90 | return True | ||
1186 | 91 | for r_id in relation_ids('identity-service'): | ||
1187 | 92 | for unit in relation_list(r_id): | ||
1188 | 93 | if (relation_get('https_keystone', rid=r_id, unit=unit) and | ||
1189 | 94 | relation_get('ssl_cert', rid=r_id, unit=unit) and | ||
1190 | 95 | relation_get('ssl_key', rid=r_id, unit=unit) and | ||
1191 | 96 | relation_get('ca_cert', rid=r_id, unit=unit)): | ||
1192 | 97 | return True | ||
1193 | 98 | return False | ||
1194 | 99 | |||
1195 | 100 | |||
1196 | 101 | def determine_api_port(public_port): | ||
1197 | 102 | ''' | ||
1198 | 103 | Determine correct API server listening port based on | ||
1199 | 104 | existence of HTTPS reverse proxy and/or haproxy. | ||
1200 | 105 | |||
1201 | 106 | public_port: int: standard public port for given service | ||
1202 | 107 | |||
1203 | 108 | returns: int: the correct listening port for the API service | ||
1204 | 109 | ''' | ||
1205 | 110 | i = 0 | ||
1206 | 111 | if len(peer_units()) > 0 or is_clustered(): | ||
1207 | 112 | i += 1 | ||
1208 | 113 | if https(): | ||
1209 | 114 | i += 1 | ||
1210 | 115 | return public_port - (i * 10) | ||
1211 | 116 | |||
1212 | 117 | |||
1213 | 118 | def determine_haproxy_port(public_port): | ||
1214 | 119 | ''' | ||
1215 | 120 | Description: Determine correct proxy listening port based on public IP + | ||
1216 | 121 | existence of HTTPS reverse proxy. | ||
1217 | 122 | |||
1218 | 123 | public_port: int: standard public port for given service | ||
1219 | 124 | |||
1220 | 125 | returns: int: the correct listening port for the HAProxy service | ||
1221 | 126 | ''' | ||
1222 | 127 | i = 0 | ||
1223 | 128 | if https(): | ||
1224 | 129 | i += 1 | ||
1225 | 130 | return public_port - (i * 10) | ||
1226 | 131 | 0 | ||
1227 | === removed file 'hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py' | |||
1228 | --- hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 2013-06-07 09:39:50 +0000 | |||
1229 | +++ hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 1970-01-01 00:00:00 +0000 | |||
1230 | @@ -1,55 +0,0 @@ | |||
1231 | 1 | # | ||
1232 | 2 | # Copyright 2012 Canonical Ltd. | ||
1233 | 3 | # | ||
1234 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
1235 | 5 | # | ||
1236 | 6 | # Authors: | ||
1237 | 7 | # James Page <james.page@ubuntu.com> | ||
1238 | 8 | # Adam Gandelman <adamg@ubuntu.com> | ||
1239 | 9 | # | ||
1240 | 10 | |||
1241 | 11 | from lib.utils import ( | ||
1242 | 12 | relation_ids, | ||
1243 | 13 | relation_list, | ||
1244 | 14 | relation_get, | ||
1245 | 15 | unit_get, | ||
1246 | 16 | reload, | ||
1247 | 17 | render_template | ||
1248 | 18 | ) | ||
1249 | 19 | import os | ||
1250 | 20 | |||
1251 | 21 | HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' | ||
1252 | 22 | HAPROXY_DEFAULT = '/etc/default/haproxy' | ||
1253 | 23 | |||
1254 | 24 | |||
1255 | 25 | def configure_haproxy(service_ports): | ||
1256 | 26 | ''' | ||
1257 | 27 | Configure HAProxy based on the current peers in the service | ||
1258 | 28 | cluster using the provided port map: | ||
1259 | 29 | |||
1260 | 30 | "swift": [ 8080, 8070 ] | ||
1261 | 31 | |||
1262 | 32 | HAproxy will also be reloaded/started if required | ||
1263 | 33 | |||
1264 | 34 | service_ports: dict: dict of lists of [ frontend, backend ] | ||
1265 | 35 | ''' | ||
1266 | 36 | cluster_hosts = {} | ||
1267 | 37 | cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ | ||
1268 | 38 | unit_get('private-address') | ||
1269 | 39 | for r_id in relation_ids('cluster'): | ||
1270 | 40 | for unit in relation_list(r_id): | ||
1271 | 41 | cluster_hosts[unit.replace('/', '-')] = \ | ||
1272 | 42 | relation_get(attribute='private-address', | ||
1273 | 43 | rid=r_id, | ||
1274 | 44 | unit=unit) | ||
1275 | 45 | context = { | ||
1276 | 46 | 'units': cluster_hosts, | ||
1277 | 47 | 'service_ports': service_ports | ||
1278 | 48 | } | ||
1279 | 49 | with open(HAPROXY_CONF, 'w') as f: | ||
1280 | 50 | f.write(render_template(os.path.basename(HAPROXY_CONF), | ||
1281 | 51 | context)) | ||
1282 | 52 | with open(HAPROXY_DEFAULT, 'w') as f: | ||
1283 | 53 | f.write('ENABLED=1') | ||
1284 | 54 | |||
1285 | 55 | reload('haproxy') | ||
1286 | 56 | 0 | ||
1287 | === removed file 'hooks/charmhelpers/contrib/hahelpers/utils.py' | |||
1288 | --- hooks/charmhelpers/contrib/hahelpers/utils.py 2013-06-07 09:39:50 +0000 | |||
1289 | +++ hooks/charmhelpers/contrib/hahelpers/utils.py 1970-01-01 00:00:00 +0000 | |||
1290 | @@ -1,332 +0,0 @@ | |||
1291 | 1 | # | ||
1292 | 2 | # Copyright 2012 Canonical Ltd. | ||
1293 | 3 | # | ||
1294 | 4 | # This file is sourced from lp:openstack-charm-helpers | ||
1295 | 5 | # | ||
1296 | 6 | # Authors: | ||
1297 | 7 | # James Page <james.page@ubuntu.com> | ||
1298 | 8 | # Paul Collins <paul.collins@canonical.com> | ||
1299 | 9 | # Adam Gandelman <adamg@ubuntu.com> | ||
1300 | 10 | # | ||
1301 | 11 | |||
1302 | 12 | import json | ||
1303 | 13 | import os | ||
1304 | 14 | import subprocess | ||
1305 | 15 | import socket | ||
1306 | 16 | import sys | ||
1307 | 17 | |||
1308 | 18 | |||
1309 | 19 | def do_hooks(hooks): | ||
1310 | 20 | hook = os.path.basename(sys.argv[0]) | ||
1311 | 21 | |||
1312 | 22 | try: | ||
1313 | 23 | hook_func = hooks[hook] | ||
1314 | 24 | except KeyError: | ||
1315 | 25 | juju_log('INFO', | ||
1316 | 26 | "This charm doesn't know how to handle '{}'.".format(hook)) | ||
1317 | 27 | else: | ||
1318 | 28 | hook_func() | ||
1319 | 29 | |||
1320 | 30 | |||
1321 | 31 | def install(*pkgs): | ||
1322 | 32 | cmd = [ | ||
1323 | 33 | 'apt-get', | ||
1324 | 34 | '-y', | ||
1325 | 35 | 'install' | ||
1326 | 36 | ] | ||
1327 | 37 | for pkg in pkgs: | ||
1328 | 38 | cmd.append(pkg) | ||
1329 | 39 | subprocess.check_call(cmd) | ||
1330 | 40 | |||
1331 | 41 | TEMPLATES_DIR = 'templates' | ||
1332 | 42 | |||
1333 | 43 | try: | ||
1334 | 44 | import jinja2 | ||
1335 | 45 | except ImportError: | ||
1336 | 46 | install('python-jinja2') | ||
1337 | 47 | import jinja2 | ||
1338 | 48 | |||
1339 | 49 | try: | ||
1340 | 50 | import dns.resolver | ||
1341 | 51 | except ImportError: | ||
1342 | 52 | install('python-dnspython') | ||
1343 | 53 | import dns.resolver | ||
1344 | 54 | |||
1345 | 55 | |||
1346 | 56 | def render_template(template_name, context, template_dir=TEMPLATES_DIR): | ||
1347 | 57 | templates = jinja2.Environment( | ||
1348 | 58 | loader=jinja2.FileSystemLoader(template_dir) | ||
1349 | 59 | ) | ||
1350 | 60 | template = templates.get_template(template_name) | ||
1351 | 61 | return template.render(context) | ||
1352 | 62 | |||
1353 | 63 | CLOUD_ARCHIVE = \ | ||
1354 | 64 | """ # Ubuntu Cloud Archive | ||
1355 | 65 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | ||
1356 | 66 | """ | ||
1357 | 67 | |||
1358 | 68 | CLOUD_ARCHIVE_POCKETS = { | ||
1359 | 69 | 'folsom': 'precise-updates/folsom', | ||
1360 | 70 | 'folsom/updates': 'precise-updates/folsom', | ||
1361 | 71 | 'folsom/proposed': 'precise-proposed/folsom', | ||
1362 | 72 | 'grizzly': 'precise-updates/grizzly', | ||
1363 | 73 | 'grizzly/updates': 'precise-updates/grizzly', | ||
1364 | 74 | 'grizzly/proposed': 'precise-proposed/grizzly' | ||
1365 | 75 | } | ||
1366 | 76 | |||
1367 | 77 | |||
1368 | 78 | def configure_source(): | ||
1369 | 79 | source = str(config_get('openstack-origin')) | ||
1370 | 80 | if not source: | ||
1371 | 81 | return | ||
1372 | 82 | if source.startswith('ppa:'): | ||
1373 | 83 | cmd = [ | ||
1374 | 84 | 'add-apt-repository', | ||
1375 | 85 | source | ||
1376 | 86 | ] | ||
1377 | 87 | subprocess.check_call(cmd) | ||
1378 | 88 | if source.startswith('cloud:'): | ||
1379 | 89 | # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg: | ||
1380 | 90 | # cloud:precise-folsom/updates or cloud:precise-folsom/proposed | ||
1381 | 91 | install('ubuntu-cloud-keyring') | ||
1382 | 92 | pocket = source.split(':')[1] | ||
1383 | 93 | pocket = pocket.split('-')[1] | ||
1384 | 94 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: | ||
1385 | 95 | apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) | ||
1386 | 96 | if source.startswith('deb'): | ||
1387 | 97 | l = len(source.split('|')) | ||
1388 | 98 | if l == 2: | ||
1389 | 99 | (apt_line, key) = source.split('|') | ||
1390 | 100 | cmd = [ | ||
1391 | 101 | 'apt-key', | ||
1392 | 102 | 'adv', '--keyserver keyserver.ubuntu.com', | ||
1393 | 103 | '--recv-keys', key | ||
1394 | 104 | ] | ||
1395 | 105 | subprocess.check_call(cmd) | ||
1396 | 106 | elif l == 1: | ||
1397 | 107 | apt_line = source | ||
1398 | 108 | |||
1399 | 109 | with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: | ||
1400 | 110 | apt.write(apt_line + "\n") | ||
1401 | 111 | cmd = [ | ||
1402 | 112 | 'apt-get', | ||
1403 | 113 | 'update' | ||
1404 | 114 | ] | ||
1405 | 115 | subprocess.check_call(cmd) | ||
1406 | 116 | |||
1407 | 117 | # Protocols | ||
1408 | 118 | TCP = 'TCP' | ||
1409 | 119 | UDP = 'UDP' | ||
1410 | 120 | |||
1411 | 121 | |||
1412 | 122 | def expose(port, protocol='TCP'): | ||
1413 | 123 | cmd = [ | ||
1414 | 124 | 'open-port', | ||
1415 | 125 | '{}/{}'.format(port, protocol) | ||
1416 | 126 | ] | ||
1417 | 127 | subprocess.check_call(cmd) | ||
1418 | 128 | |||
1419 | 129 | |||
1420 | 130 | def juju_log(severity, message): | ||
1421 | 131 | cmd = [ | ||
1422 | 132 | 'juju-log', | ||
1423 | 133 | '--log-level', severity, | ||
1424 | 134 | message | ||
1425 | 135 | ] | ||
1426 | 136 | subprocess.check_call(cmd) | ||
1427 | 137 | |||
1428 | 138 | |||
1429 | 139 | cache = {} | ||
1430 | 140 | |||
1431 | 141 | |||
1432 | 142 | def cached(func): | ||
1433 | 143 | def wrapper(*args, **kwargs): | ||
1434 | 144 | global cache | ||
1435 | 145 | key = str((func, args, kwargs)) | ||
1436 | 146 | try: | ||
1437 | 147 | return cache[key] | ||
1438 | 148 | except KeyError: | ||
1439 | 149 | res = func(*args, **kwargs) | ||
1440 | 150 | cache[key] = res | ||
1441 | 151 | return res | ||
1442 | 152 | return wrapper | ||
1443 | 153 | |||
1444 | 154 | |||
1445 | 155 | @cached | ||
1446 | 156 | def relation_ids(relation): | ||
1447 | 157 | cmd = [ | ||
1448 | 158 | 'relation-ids', | ||
1449 | 159 | relation | ||
1450 | 160 | ] | ||
1451 | 161 | result = str(subprocess.check_output(cmd)).split() | ||
1452 | 162 | if result == "": | ||
1453 | 163 | return None | ||
1454 | 164 | else: | ||
1455 | 165 | return result | ||
1456 | 166 | |||
1457 | 167 | |||
1458 | 168 | @cached | ||
1459 | 169 | def relation_list(rid): | ||
1460 | 170 | cmd = [ | ||
1461 | 171 | 'relation-list', | ||
1462 | 172 | '-r', rid, | ||
1463 | 173 | ] | ||
1464 | 174 | result = str(subprocess.check_output(cmd)).split() | ||
1465 | 175 | if result == "": | ||
1466 | 176 | return None | ||
1467 | 177 | else: | ||
1468 | 178 | return result | ||
1469 | 179 | |||
1470 | 180 | |||
1471 | 181 | @cached | ||
1472 | 182 | def relation_get(attribute, unit=None, rid=None): | ||
1473 | 183 | cmd = [ | ||
1474 | 184 | 'relation-get', | ||
1475 | 185 | ] | ||
1476 | 186 | if rid: | ||
1477 | 187 | cmd.append('-r') | ||
1478 | 188 | cmd.append(rid) | ||
1479 | 189 | cmd.append(attribute) | ||
1480 | 190 | if unit: | ||
1481 | 191 | cmd.append(unit) | ||
1482 | 192 | value = subprocess.check_output(cmd).strip() # IGNORE:E1103 | ||
1483 | 193 | if value == "": | ||
1484 | 194 | return None | ||
1485 | 195 | else: | ||
1486 | 196 | return value | ||
1487 | 197 | |||
1488 | 198 | |||
1489 | 199 | @cached | ||
1490 | 200 | def relation_get_dict(relation_id=None, remote_unit=None): | ||
1491 | 201 | """Obtain all relation data as dict by way of JSON""" | ||
1492 | 202 | cmd = [ | ||
1493 | 203 | 'relation-get', '--format=json' | ||
1494 | 204 | ] | ||
1495 | 205 | if relation_id: | ||
1496 | 206 | cmd.append('-r') | ||
1497 | 207 | cmd.append(relation_id) | ||
1498 | 208 | if remote_unit: | ||
1499 | 209 | remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) | ||
1500 | 210 | os.environ['JUJU_REMOTE_UNIT'] = remote_unit | ||
1501 | 211 | j = subprocess.check_output(cmd) | ||
1502 | 212 | if remote_unit and remote_unit_orig: | ||
1503 | 213 | os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig | ||
1504 | 214 | d = json.loads(j) | ||
1505 | 215 | settings = {} | ||
1506 | 216 | # convert unicode to strings | ||
1507 | 217 | for k, v in d.iteritems(): | ||
1508 | 218 | settings[str(k)] = str(v) | ||
1509 | 219 | return settings | ||
1510 | 220 | |||
1511 | 221 | |||
1512 | 222 | def relation_set(**kwargs): | ||
1513 | 223 | cmd = [ | ||
1514 | 224 | 'relation-set' | ||
1515 | 225 | ] | ||
1516 | 226 | args = [] | ||
1517 | 227 | for k, v in kwargs.items(): | ||
1518 | 228 | if k == 'rid': | ||
1519 | 229 | if v: | ||
1520 | 230 | cmd.append('-r') | ||
1521 | 231 | cmd.append(v) | ||
1522 | 232 | else: | ||
1523 | 233 | args.append('{}={}'.format(k, v)) | ||
1524 | 234 | cmd += args | ||
1525 | 235 | subprocess.check_call(cmd) | ||
1526 | 236 | |||
1527 | 237 | |||
1528 | 238 | @cached | ||
1529 | 239 | def unit_get(attribute): | ||
1530 | 240 | cmd = [ | ||
1531 | 241 | 'unit-get', | ||
1532 | 242 | attribute | ||
1533 | 243 | ] | ||
1534 | 244 | value = subprocess.check_output(cmd).strip() # IGNORE:E1103 | ||
1535 | 245 | if value == "": | ||
1536 | 246 | return None | ||
1537 | 247 | else: | ||
1538 | 248 | return value | ||
1539 | 249 | |||
1540 | 250 | |||
1541 | 251 | @cached | ||
1542 | 252 | def config_get(attribute): | ||
1543 | 253 | cmd = [ | ||
1544 | 254 | 'config-get', | ||
1545 | 255 | '--format', | ||
1546 | 256 | 'json', | ||
1547 | 257 | ] | ||
1548 | 258 | out = subprocess.check_output(cmd).strip() # IGNORE:E1103 | ||
1549 | 259 | cfg = json.loads(out) | ||
1550 | 260 | |||
1551 | 261 | try: | ||
1552 | 262 | return cfg[attribute] | ||
1553 | 263 | except KeyError: | ||
1554 | 264 | return None | ||
1555 | 265 | |||
1556 | 266 | |||
1557 | 267 | @cached | ||
1558 | 268 | def get_unit_hostname(): | ||
1559 | 269 | return socket.gethostname() | ||
1560 | 270 | |||
1561 | 271 | |||
1562 | 272 | @cached | ||
1563 | 273 | def get_host_ip(hostname=unit_get('private-address')): | ||
1564 | 274 | try: | ||
1565 | 275 | # Test to see if already an IPv4 address | ||
1566 | 276 | socket.inet_aton(hostname) | ||
1567 | 277 | return hostname | ||
1568 | 278 | except socket.error: | ||
1569 | 279 | answers = dns.resolver.query(hostname, 'A') | ||
1570 | 280 | if answers: | ||
1571 | 281 | return answers[0].address | ||
1572 | 282 | return None | ||
1573 | 283 | |||
1574 | 284 | |||
1575 | 285 | def _svc_control(service, action): | ||
1576 | 286 | subprocess.check_call(['service', service, action]) | ||
1577 | 287 | |||
1578 | 288 | |||
1579 | 289 | def restart(*services): | ||
1580 | 290 | for service in services: | ||
1581 | 291 | _svc_control(service, 'restart') | ||
1582 | 292 | |||
1583 | 293 | |||
1584 | 294 | def stop(*services): | ||
1585 | 295 | for service in services: | ||
1586 | 296 | _svc_control(service, 'stop') | ||
1587 | 297 | |||
1588 | 298 | |||
1589 | 299 | def start(*services): | ||
1590 | 300 | for service in services: | ||
1591 | 301 | _svc_control(service, 'start') | ||
1592 | 302 | |||
1593 | 303 | |||
1594 | 304 | def reload(*services): | ||
1595 | 305 | for service in services: | ||
1596 | 306 | try: | ||
1597 | 307 | _svc_control(service, 'reload') | ||
1598 | 308 | except subprocess.CalledProcessError: | ||
1599 | 309 | # Reload failed - either service does not support reload | ||
1600 | 310 | # or it was not running - restart will fixup most things | ||
1601 | 311 | _svc_control(service, 'restart') | ||
1602 | 312 | |||
1603 | 313 | |||
1604 | 314 | def running(service): | ||
1605 | 315 | try: | ||
1606 | 316 | output = subprocess.check_output(['service', service, 'status']) | ||
1607 | 317 | except subprocess.CalledProcessError: | ||
1608 | 318 | return False | ||
1609 | 319 | else: | ||
1610 | 320 | if ("start/running" in output or | ||
1611 | 321 | "is running" in output): | ||
1612 | 322 | return True | ||
1613 | 323 | else: | ||
1614 | 324 | return False | ||
1615 | 325 | |||
1616 | 326 | |||
1617 | 327 | def is_relation_made(relation, key='private-address'): | ||
1618 | 328 | for r_id in (relation_ids(relation) or []): | ||
1619 | 329 | for unit in (relation_list(r_id) or []): | ||
1620 | 330 | if relation_get(key, rid=r_id, unit=unit): | ||
1621 | 331 | return True | ||
1622 | 332 | return False | ||
1623 | 333 | 0 | ||
1624 | === removed directory 'hooks/charmhelpers/contrib/jujugui' | |||
1625 | === removed file 'hooks/charmhelpers/contrib/jujugui/IMPORT' | |||
1626 | --- hooks/charmhelpers/contrib/jujugui/IMPORT 2013-06-07 09:39:50 +0000 | |||
1627 | +++ hooks/charmhelpers/contrib/jujugui/IMPORT 1970-01-01 00:00:00 +0000 | |||
1628 | @@ -1,4 +0,0 @@ | |||
1629 | 1 | Source: lp:charms/juju-gui | ||
1630 | 2 | |||
1631 | 3 | juju-gui/hooks/utils.py -> charm-helpers/charmhelpers/contrib/jujugui/utils.py | ||
1632 | 4 | juju-gui/tests/test_utils.py -> charm-helpers/tests/contrib/jujugui/test_utils.py | ||
1633 | 5 | 0 | ||
1634 | === removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py' | |||
1635 | === removed file 'hooks/charmhelpers/contrib/jujugui/utils.py' | |||
1636 | --- hooks/charmhelpers/contrib/jujugui/utils.py 2013-06-07 09:39:50 +0000 | |||
1637 | +++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000 | |||
1638 | @@ -1,602 +0,0 @@ | |||
1639 | 1 | """Juju GUI charm utilities.""" | ||
1640 | 2 | |||
1641 | 3 | __all__ = [ | ||
1642 | 4 | 'AGENT', | ||
1643 | 5 | 'APACHE', | ||
1644 | 6 | 'API_PORT', | ||
1645 | 7 | 'CURRENT_DIR', | ||
1646 | 8 | 'HAPROXY', | ||
1647 | 9 | 'IMPROV', | ||
1648 | 10 | 'JUJU_DIR', | ||
1649 | 11 | 'JUJU_GUI_DIR', | ||
1650 | 12 | 'JUJU_GUI_SITE', | ||
1651 | 13 | 'JUJU_PEM', | ||
1652 | 14 | 'WEB_PORT', | ||
1653 | 15 | 'bzr_checkout', | ||
1654 | 16 | 'chain', | ||
1655 | 17 | 'cmd_log', | ||
1656 | 18 | 'fetch_api', | ||
1657 | 19 | 'fetch_gui', | ||
1658 | 20 | 'find_missing_packages', | ||
1659 | 21 | 'first_path_in_dir', | ||
1660 | 22 | 'get_api_address', | ||
1661 | 23 | 'get_npm_cache_archive_url', | ||
1662 | 24 | 'get_release_file_url', | ||
1663 | 25 | 'get_staging_dependencies', | ||
1664 | 26 | 'get_zookeeper_address', | ||
1665 | 27 | 'legacy_juju', | ||
1666 | 28 | 'log_hook', | ||
1667 | 29 | 'merge', | ||
1668 | 30 | 'parse_source', | ||
1669 | 31 | 'prime_npm_cache', | ||
1670 | 32 | 'render_to_file', | ||
1671 | 33 | 'save_or_create_certificates', | ||
1672 | 34 | 'setup_apache', | ||
1673 | 35 | 'setup_gui', | ||
1674 | 36 | 'start_agent', | ||
1675 | 37 | 'start_gui', | ||
1676 | 38 | 'start_improv', | ||
1677 | 39 | 'write_apache_config', | ||
1678 | 40 | ] | ||
1679 | 41 | |||
1680 | 42 | from contextlib import contextmanager | ||
1681 | 43 | import errno | ||
1682 | 44 | import json | ||
1683 | 45 | import os | ||
1684 | 46 | import logging | ||
1685 | 47 | import shutil | ||
1686 | 48 | from subprocess import CalledProcessError | ||
1687 | 49 | import tempfile | ||
1688 | 50 | from urlparse import urlparse | ||
1689 | 51 | |||
1690 | 52 | import apt | ||
1691 | 53 | import tempita | ||
1692 | 54 | |||
1693 | 55 | from launchpadlib.launchpad import Launchpad | ||
1694 | 56 | from shelltoolbox import ( | ||
1695 | 57 | Serializer, | ||
1696 | 58 | apt_get_install, | ||
1697 | 59 | command, | ||
1698 | 60 | environ, | ||
1699 | 61 | install_extra_repositories, | ||
1700 | 62 | run, | ||
1701 | 63 | script_name, | ||
1702 | 64 | search_file, | ||
1703 | 65 | su, | ||
1704 | 66 | ) | ||
1705 | 67 | from charmhelpers.core.host import ( | ||
1706 | 68 | service_start, | ||
1707 | 69 | ) | ||
1708 | 70 | from charmhelpers.core.hookenv import ( | ||
1709 | 71 | log, | ||
1710 | 72 | config, | ||
1711 | 73 | unit_get, | ||
1712 | 74 | ) | ||
1713 | 75 | |||
1714 | 76 | |||
1715 | 77 | AGENT = 'juju-api-agent' | ||
1716 | 78 | APACHE = 'apache2' | ||
1717 | 79 | IMPROV = 'juju-api-improv' | ||
1718 | 80 | HAPROXY = 'haproxy' | ||
1719 | 81 | |||
1720 | 82 | API_PORT = 8080 | ||
1721 | 83 | WEB_PORT = 8000 | ||
1722 | 84 | |||
1723 | 85 | CURRENT_DIR = os.getcwd() | ||
1724 | 86 | JUJU_DIR = os.path.join(CURRENT_DIR, 'juju') | ||
1725 | 87 | JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui') | ||
1726 | 88 | JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui' | ||
1727 | 89 | JUJU_GUI_PORTS = '/etc/apache2/ports.conf' | ||
1728 | 90 | JUJU_PEM = 'juju.includes-private-key.pem' | ||
1729 | 91 | BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',) | ||
1730 | 92 | DEB_BUILD_DEPENDENCIES = ( | ||
1731 | 93 | 'bzr', 'imagemagick', 'make', 'nodejs', 'npm', | ||
1732 | 94 | ) | ||
1733 | 95 | DEB_STAGE_DEPENDENCIES = ( | ||
1734 | 96 | 'zookeeper', | ||
1735 | 97 | ) | ||
1736 | 98 | |||
1737 | 99 | |||
1738 | 100 | # Store the configuration from on invocation to the next. | ||
1739 | 101 | config_json = Serializer('/tmp/config.json') | ||
1740 | 102 | # Bazaar checkout command. | ||
1741 | 103 | bzr_checkout = command('bzr', 'co', '--lightweight') | ||
1742 | 104 | # Whether or not the charm is deployed using juju-core. | ||
1743 | 105 | # If juju-core has been used to deploy the charm, an agent.conf file must | ||
1744 | 106 | # be present in the charm parent directory. | ||
1745 | 107 | legacy_juju = lambda: not os.path.exists( | ||
1746 | 108 | os.path.join(CURRENT_DIR, '..', 'agent.conf')) | ||
1747 | 109 | |||
1748 | 110 | |||
1749 | 111 | def _get_build_dependencies(): | ||
1750 | 112 | """Install deb dependencies for building.""" | ||
1751 | 113 | log('Installing build dependencies.') | ||
1752 | 114 | cmd_log(install_extra_repositories(*BUILD_REPOSITORIES)) | ||
1753 | 115 | cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES)) | ||
1754 | 116 | |||
1755 | 117 | |||
1756 | 118 | def get_api_address(unit_dir): | ||
1757 | 119 | """Return the Juju API address stored in the uniter agent.conf file.""" | ||
1758 | 120 | import yaml # python-yaml is only installed if juju-core is used. | ||
1759 | 121 | # XXX 2013-03-27 frankban bug=1161443: | ||
1760 | 122 | # currently the uniter agent.conf file does not include the API | ||
1761 | 123 | # address. For now retrieve it from the machine agent file. | ||
1762 | 124 | base_dir = os.path.abspath(os.path.join(unit_dir, '..')) | ||
1763 | 125 | for dirname in os.listdir(base_dir): | ||
1764 | 126 | if dirname.startswith('machine-'): | ||
1765 | 127 | agent_conf = os.path.join(base_dir, dirname, 'agent.conf') | ||
1766 | 128 | break | ||
1767 | 129 | else: | ||
1768 | 130 | raise IOError('Juju agent configuration file not found.') | ||
1769 | 131 | contents = yaml.load(open(agent_conf)) | ||
1770 | 132 | return contents['apiinfo']['addrs'][0] | ||
1771 | 133 | |||
1772 | 134 | |||
1773 | 135 | def get_staging_dependencies(): | ||
1774 | 136 | """Install deb dependencies for the stage (improv) environment.""" | ||
1775 | 137 | log('Installing stage dependencies.') | ||
1776 | 138 | cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES)) | ||
1777 | 139 | |||
1778 | 140 | |||
1779 | 141 | def first_path_in_dir(directory): | ||
1780 | 142 | """Return the full path of the first file/dir in *directory*.""" | ||
1781 | 143 | return os.path.join(directory, os.listdir(directory)[0]) | ||
1782 | 144 | |||
1783 | 145 | |||
1784 | 146 | def _get_by_attr(collection, attr, value): | ||
1785 | 147 | """Return the first item in collection having attr == value. | ||
1786 | 148 | |||
1787 | 149 | Return None if the item is not found. | ||
1788 | 150 | """ | ||
1789 | 151 | for item in collection: | ||
1790 | 152 | if getattr(item, attr) == value: | ||
1791 | 153 | return item | ||
1792 | 154 | |||
1793 | 155 | |||
1794 | 156 | def get_release_file_url(project, series_name, release_version): | ||
1795 | 157 | """Return the URL of the release file hosted in Launchpad. | ||
1796 | 158 | |||
1797 | 159 | The returned URL points to a release file for the given project, series | ||
1798 | 160 | name and release version. | ||
1799 | 161 | The argument *project* is a project object as returned by launchpadlib. | ||
1800 | 162 | The arguments *series_name* and *release_version* are strings. If | ||
1801 | 163 | *release_version* is None, the URL of the latest release will be returned. | ||
1802 | 164 | """ | ||
1803 | 165 | series = _get_by_attr(project.series, 'name', series_name) | ||
1804 | 166 | if series is None: | ||
1805 | 167 | raise ValueError('%r: series not found' % series_name) | ||
1806 | 168 | # Releases are returned by Launchpad in reverse date order. | ||
1807 | 169 | releases = list(series.releases) | ||
1808 | 170 | if not releases: | ||
1809 | 171 | raise ValueError('%r: series does not contain releases' % series_name) | ||
1810 | 172 | if release_version is not None: | ||
1811 | 173 | release = _get_by_attr(releases, 'version', release_version) | ||
1812 | 174 | if release is None: | ||
1813 | 175 | raise ValueError('%r: release not found' % release_version) | ||
1814 | 176 | releases = [release] | ||
1815 | 177 | for release in releases: | ||
1816 | 178 | for file_ in release.files: | ||
1817 | 179 | if str(file_).endswith('.tgz'): | ||
1818 | 180 | return file_.file_link | ||
1819 | 181 | raise ValueError('%r: file not found' % release_version) | ||
1820 | 182 | |||
1821 | 183 | |||
1822 | 184 | def get_zookeeper_address(agent_file_path): | ||
1823 | 185 | """Retrieve the Zookeeper address contained in the given *agent_file_path*. | ||
1824 | 186 | |||
1825 | 187 | The *agent_file_path* is a path to a file containing a line similar to the | ||
1826 | 188 | following:: | ||
1827 | 189 | |||
1828 | 190 | env JUJU_ZOOKEEPER="address" | ||
1829 | 191 | """ | ||
1830 | 192 | line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip() | ||
1831 | 193 | return line.split('=')[1].strip('"') | ||
1832 | 194 | |||
1833 | 195 | |||
1834 | 196 | @contextmanager | ||
1835 | 197 | def log_hook(): | ||
1836 | 198 | """Log when a hook starts and stops its execution. | ||
1837 | 199 | |||
1838 | 200 | Also log to stdout possible CalledProcessError exceptions raised executing | ||
1839 | 201 | the hook. | ||
1840 | 202 | """ | ||
1841 | 203 | script = script_name() | ||
1842 | 204 | log(">>> Entering {}".format(script)) | ||
1843 | 205 | try: | ||
1844 | 206 | yield | ||
1845 | 207 | except CalledProcessError as err: | ||
1846 | 208 | log('Exception caught:') | ||
1847 | 209 | log(err.output) | ||
1848 | 210 | raise | ||
1849 | 211 | finally: | ||
1850 | 212 | log("<<< Exiting {}".format(script)) | ||
1851 | 213 | |||
1852 | 214 | |||
1853 | 215 | def parse_source(source): | ||
1854 | 216 | """Parse the ``juju-gui-source`` option. | ||
1855 | 217 | |||
1856 | 218 | Return a tuple of two elements representing info on how to deploy Juju GUI. | ||
1857 | 219 | Examples: | ||
1858 | 220 | - ('stable', None): latest stable release; | ||
1859 | 221 | - ('stable', '0.1.0'): stable release v0.1.0; | ||
1860 | 222 | - ('trunk', None): latest trunk release; | ||
1861 | 223 | - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1; | ||
1862 | 224 | - ('branch', 'lp:juju-gui'): release is made from a branch; | ||
1863 | 225 | - ('url', 'http://example.com/gui'): release from a downloaded file. | ||
1864 | 226 | """ | ||
1865 | 227 | if source.startswith('url:'): | ||
1866 | 228 | source = source[4:] | ||
1867 | 229 | # Support file paths, including relative paths. | ||
1868 | 230 | if urlparse(source).scheme == '': | ||
1869 | 231 | if not source.startswith('/'): | ||
1870 | 232 | source = os.path.join(os.path.abspath(CURRENT_DIR), source) | ||
1871 | 233 | source = "file://%s" % source | ||
1872 | 234 | return 'url', source | ||
1873 | 235 | if source in ('stable', 'trunk'): | ||
1874 | 236 | return source, None | ||
1875 | 237 | if source.startswith('lp:') or source.startswith('http://'): | ||
1876 | 238 | return 'branch', source | ||
1877 | 239 | if 'build' in source: | ||
1878 | 240 | return 'trunk', source | ||
1879 | 241 | return 'stable', source | ||
1880 | 242 | |||
1881 | 243 | |||
1882 | 244 | def render_to_file(template_name, context, destination): | ||
1883 | 245 | """Render the given *template_name* into *destination* using *context*. | ||
1884 | 246 | |||
1885 | 247 | The tempita template language is used to render contents | ||
1886 | 248 | (see http://pythonpaste.org/tempita/). | ||
1887 | 249 | The argument *template_name* is the name or path of the template file: | ||
1888 | 250 | it may be either a path relative to ``../config`` or an absolute path. | ||
1889 | 251 | The argument *destination* is a file path. | ||
1890 | 252 | The argument *context* is a dict-like object. | ||
1891 | 253 | """ | ||
1892 | 254 | template_path = os.path.abspath(template_name) | ||
1893 | 255 | template = tempita.Template.from_filename(template_path) | ||
1894 | 256 | with open(destination, 'w') as stream: | ||
1895 | 257 | stream.write(template.substitute(context)) | ||
1896 | 258 | |||
1897 | 259 | |||
1898 | 260 | results_log = None | ||
1899 | 261 | |||
1900 | 262 | |||
1901 | 263 | def _setupLogging(): | ||
1902 | 264 | global results_log | ||
1903 | 265 | if results_log is not None: | ||
1904 | 266 | return | ||
1905 | 267 | cfg = config() | ||
1906 | 268 | logging.basicConfig( | ||
1907 | 269 | filename=cfg['command-log-file'], | ||
1908 | 270 | level=logging.INFO, | ||
1909 | 271 | format="%(asctime)s: %(name)s@%(levelname)s %(message)s") | ||
1910 | 272 | results_log = logging.getLogger('juju-gui') | ||
1911 | 273 | |||
1912 | 274 | |||
1913 | 275 | def cmd_log(results): | ||
1914 | 276 | global results_log | ||
1915 | 277 | if not results: | ||
1916 | 278 | return | ||
1917 | 279 | if results_log is None: | ||
1918 | 280 | _setupLogging() | ||
1919 | 281 | # Since 'results' may be multi-line output, start it on a separate line | ||
1920 | 282 | # from the logger timestamp, etc. | ||
1921 | 283 | results_log.info('\n' + results) | ||
1922 | 284 | |||
1923 | 285 | |||
1924 | 286 | def start_improv(staging_env, ssl_cert_path, | ||
1925 | 287 | config_path='/etc/init/juju-api-improv.conf'): | ||
1926 | 288 | """Start a simulated juju environment using ``improv.py``.""" | ||
1927 | 289 | log('Setting up staging start up script.') | ||
1928 | 290 | context = { | ||
1929 | 291 | 'juju_dir': JUJU_DIR, | ||
1930 | 292 | 'keys': ssl_cert_path, | ||
1931 | 293 | 'port': API_PORT, | ||
1932 | 294 | 'staging_env': staging_env, | ||
1933 | 295 | } | ||
1934 | 296 | render_to_file('config/juju-api-improv.conf.template', context, config_path) | ||
1935 | 297 | log('Starting the staging backend.') | ||
1936 | 298 | with su('root'): | ||
1937 | 299 | service_start(IMPROV) | ||
1938 | 300 | |||
1939 | 301 | |||
1940 | 302 | def start_agent( | ||
1941 | 303 | ssl_cert_path, config_path='/etc/init/juju-api-agent.conf', | ||
1942 | 304 | read_only=False): | ||
1943 | 305 | """Start the Juju agent and connect to the current environment.""" | ||
1944 | 306 | # Retrieve the Zookeeper address from the start up script. | ||
1945 | 307 | unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..')) | ||
1946 | 308 | agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir)) | ||
1947 | 309 | zookeeper = get_zookeeper_address(agent_file) | ||
1948 | 310 | log('Setting up API agent start up script.') | ||
1949 | 311 | context = { | ||
1950 | 312 | 'juju_dir': JUJU_DIR, | ||
1951 | 313 | 'keys': ssl_cert_path, | ||
1952 | 314 | 'port': API_PORT, | ||
1953 | 315 | 'zookeeper': zookeeper, | ||
1954 | 316 | 'read_only': read_only | ||
1955 | 317 | } | ||
1956 | 318 | render_to_file('config/juju-api-agent.conf.template', context, config_path) | ||
1957 | 319 | log('Starting API agent.') | ||
1958 | 320 | with su('root'): | ||
1959 | 321 | service_start(AGENT) | ||
1960 | 322 | |||
1961 | 323 | |||
1962 | 324 | def start_gui( | ||
1963 | 325 | console_enabled, login_help, readonly, in_staging, ssl_cert_path, | ||
1964 | 326 | charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg', | ||
1965 | 327 | config_js_path=None, secure=True, sandbox=False): | ||
1966 | 328 | """Set up and start the Juju GUI server.""" | ||
1967 | 329 | with su('root'): | ||
1968 | 330 | run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR) | ||
1969 | 331 | # XXX 2013-02-05 frankban bug=1116320: | ||
1970 | 332 | # External insecure resources are still loaded when testing in the | ||
1971 | 333 | # debug environment. For now, switch to the production environment if | ||
1972 | 334 | # the charm is configured to serve tests. | ||
1973 | 335 | if in_staging and not serve_tests: | ||
1974 | 336 | build_dirname = 'build-debug' | ||
1975 | 337 | else: | ||
1976 | 338 | build_dirname = 'build-prod' | ||
1977 | 339 | build_dir = os.path.join(JUJU_GUI_DIR, build_dirname) | ||
1978 | 340 | log('Generating the Juju GUI configuration file.') | ||
1979 | 341 | is_legacy_juju = legacy_juju() | ||
1980 | 342 | user, password = None, None | ||
1981 | 343 | if (is_legacy_juju and in_staging) or sandbox: | ||
1982 | 344 | user, password = 'admin', 'admin' | ||
1983 | 345 | else: | ||
1984 | 346 | user, password = None, None | ||
1985 | 347 | |||
1986 | 348 | api_backend = 'python' if is_legacy_juju else 'go' | ||
1987 | 349 | if secure: | ||
1988 | 350 | protocol = 'wss' | ||
1989 | 351 | else: | ||
1990 | 352 | log('Running in insecure mode! Port 80 will serve unencrypted.') | ||
1991 | 353 | protocol = 'ws' | ||
1992 | 354 | |||
1993 | 355 | context = { | ||
1994 | 356 | 'raw_protocol': protocol, | ||
1995 | 357 | 'address': unit_get('public-address'), | ||
1996 | 358 | 'console_enabled': json.dumps(console_enabled), | ||
1997 | 359 | 'login_help': json.dumps(login_help), | ||
1998 | 360 | 'password': json.dumps(password), | ||
1999 | 361 | 'api_backend': json.dumps(api_backend), | ||
2000 | 362 | 'readonly': json.dumps(readonly), | ||
2001 | 363 | 'user': json.dumps(user), | ||
2002 | 364 | 'protocol': json.dumps(protocol), | ||
2003 | 365 | 'sandbox': json.dumps(sandbox), | ||
2004 | 366 | 'charmworld_url': json.dumps(charmworld_url), | ||
2005 | 367 | } | ||
2006 | 368 | if config_js_path is None: | ||
2007 | 369 | config_js_path = os.path.join( | ||
2008 | 370 | build_dir, 'juju-ui', 'assets', 'config.js') | ||
2009 | 371 | render_to_file('config/config.js.template', context, config_js_path) | ||
2010 | 372 | |||
2011 | 373 | write_apache_config(build_dir, serve_tests) | ||
2012 | 374 | |||
2013 | 375 | log('Generating haproxy configuration file.') | ||
2014 | 376 | if is_legacy_juju: | ||
2015 | 377 | # The PyJuju API agent is listening on localhost. | ||
2016 | 378 | api_address = '127.0.0.1:{0}'.format(API_PORT) | ||
2017 | 379 | else: | ||
2018 | 380 | # Retrieve the juju-core API server address. | ||
2019 | 381 | api_address = get_api_address(os.path.join(CURRENT_DIR, '..')) | ||
2020 | 382 | context = { | ||
2021 | 383 | 'api_address': api_address, | ||
2022 | 384 | 'api_pem': JUJU_PEM, | ||
2023 | 385 | 'legacy_juju': is_legacy_juju, | ||
2024 | 386 | 'ssl_cert_path': ssl_cert_path, | ||
2025 | 387 | # In PyJuju environments, use the same certificate for both HTTPS and | ||
2026 | 388 | # WebSocket connections. In juju-core the system already has the proper | ||
2027 | 389 | # certificate installed. | ||
2028 | 390 | 'web_pem': JUJU_PEM, | ||
2029 | 391 | 'web_port': WEB_PORT, | ||
2030 | 392 | 'secure': secure | ||
2031 | 393 | } | ||
2032 | 394 | render_to_file('config/haproxy.cfg.template', context, haproxy_path) | ||
2033 | 395 | log('Starting Juju GUI.') | ||
2034 | 396 | |||
2035 | 397 | |||
2036 | 398 | def write_apache_config(build_dir, serve_tests=False): | ||
2037 | 399 | log('Generating the apache site configuration file.') | ||
2038 | 400 | context = { | ||
2039 | 401 | 'port': WEB_PORT, | ||
2040 | 402 | 'serve_tests': serve_tests, | ||
2041 | 403 | 'server_root': build_dir, | ||
2042 | 404 | 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''), | ||
2043 | 405 | } | ||
2044 | 406 | render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS) | ||
2045 | 407 | render_to_file('config/apache-site.template', context, JUJU_GUI_SITE) | ||
2046 | 408 | |||
2047 | 409 | |||
2048 | 410 | def get_npm_cache_archive_url(Launchpad=Launchpad): | ||
2049 | 411 | """Figure out the URL of the most recent NPM cache archive on Launchpad.""" | ||
2050 | 412 | launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production') | ||
2051 | 413 | project = launchpad.projects['juju-gui'] | ||
2052 | 414 | # Find the URL of the most recently created NPM cache archive. | ||
2053 | 415 | npm_cache_url = get_release_file_url(project, 'npm-cache', None) | ||
2054 | 416 | return npm_cache_url | ||
2055 | 417 | |||
2056 | 418 | |||
2057 | 419 | def prime_npm_cache(npm_cache_url): | ||
2058 | 420 | """Download NPM cache archive and prime the NPM cache with it.""" | ||
2059 | 421 | # Download the cache archive and then uncompress it into the NPM cache. | ||
2060 | 422 | npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz') | ||
2061 | 423 | cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url)) | ||
2062 | 424 | npm_cache_dir = os.path.expanduser('~/.npm') | ||
2063 | 425 | # The NPM cache directory probably does not exist, so make it if not. | ||
2064 | 426 | try: | ||
2065 | 427 | os.mkdir(npm_cache_dir) | ||
2066 | 428 | except OSError, e: | ||
2067 | 429 | # If the directory already exists then ignore the error. | ||
2068 | 430 | if e.errno != errno.EEXIST: # File exists. | ||
2069 | 431 | raise | ||
2070 | 432 | uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f') | ||
2071 | 433 | cmd_log(uncompress(npm_cache_archive)) | ||
2072 | 434 | |||
2073 | 435 | |||
2074 | 436 | def fetch_gui(juju_gui_source, logpath): | ||
2075 | 437 | """Retrieve the Juju GUI release/branch.""" | ||
2076 | 438 | # Retrieve a Juju GUI release. | ||
2077 | 439 | origin, version_or_branch = parse_source(juju_gui_source) | ||
2078 | 440 | if origin == 'branch': | ||
2079 | 441 | # Make sure we have the dependencies necessary for us to actually make | ||
2080 | 442 | # a build. | ||
2081 | 443 | _get_build_dependencies() | ||
2082 | 444 | # Create a release starting from a branch. | ||
2083 | 445 | juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source') | ||
2084 | 446 | log('Retrieving Juju GUI source checkout from %s.' % version_or_branch) | ||
2085 | 447 | cmd_log(run('rm', '-rf', juju_gui_source_dir)) | ||
2086 | 448 | cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir)) | ||
2087 | 449 | log('Preparing a Juju GUI release.') | ||
2088 | 450 | logdir = os.path.dirname(logpath) | ||
2089 | 451 | fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir) | ||
2090 | 452 | log('Output from "make distfile" sent to %s' % name) | ||
2091 | 453 | with environ(NO_BZR='1'): | ||
2092 | 454 | run('make', '-C', juju_gui_source_dir, 'distfile', | ||
2093 | 455 | stdout=fd, stderr=fd) | ||
2094 | 456 | release_tarball = first_path_in_dir( | ||
2095 | 457 | os.path.join(juju_gui_source_dir, 'releases')) | ||
2096 | 458 | else: | ||
2097 | 459 | log('Retrieving Juju GUI release.') | ||
2098 | 460 | if origin == 'url': | ||
2099 | 461 | file_url = version_or_branch | ||
2100 | 462 | else: | ||
2101 | 463 | # Retrieve a release from Launchpad. | ||
2102 | 464 | launchpad = Launchpad.login_anonymously( | ||
2103 | 465 | 'Juju GUI charm', 'production') | ||
2104 | 466 | project = launchpad.projects['juju-gui'] | ||
2105 | 467 | file_url = get_release_file_url(project, origin, version_or_branch) | ||
2106 | 468 | log('Downloading release file from %s.' % file_url) | ||
2107 | 469 | release_tarball = os.path.join(CURRENT_DIR, 'release.tgz') | ||
2108 | 470 | cmd_log(run('curl', '-L', '-o', release_tarball, file_url)) | ||
2109 | 471 | return release_tarball | ||
2110 | 472 | |||
2111 | 473 | |||
2112 | 474 | def fetch_api(juju_api_branch): | ||
2113 | 475 | """Retrieve the Juju branch.""" | ||
2114 | 476 | # Retrieve Juju API source checkout. | ||
2115 | 477 | log('Retrieving Juju API source checkout.') | ||
2116 | 478 | cmd_log(run('rm', '-rf', JUJU_DIR)) | ||
2117 | 479 | cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR)) | ||
2118 | 480 | |||
2119 | 481 | |||
2120 | 482 | def setup_gui(release_tarball): | ||
2121 | 483 | """Set up Juju GUI.""" | ||
2122 | 484 | # Uncompress the release tarball. | ||
2123 | 485 | log('Installing Juju GUI.') | ||
2124 | 486 | release_dir = os.path.join(CURRENT_DIR, 'release') | ||
2125 | 487 | cmd_log(run('rm', '-rf', release_dir)) | ||
2126 | 488 | os.mkdir(release_dir) | ||
2127 | 489 | uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f') | ||
2128 | 490 | cmd_log(uncompress(release_tarball)) | ||
2129 | 491 | # Link the Juju GUI dir to the contents of the release tarball. | ||
2130 | 492 | cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR)) | ||
2131 | 493 | |||
2132 | 494 | |||
2133 | 495 | def setup_apache(): | ||
2134 | 496 | """Set up apache.""" | ||
2135 | 497 | log('Setting up apache.') | ||
2136 | 498 | if not os.path.exists(JUJU_GUI_SITE): | ||
2137 | 499 | cmd_log(run('touch', JUJU_GUI_SITE)) | ||
2138 | 500 | cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE)) | ||
2139 | 501 | cmd_log( | ||
2140 | 502 | run('ln', '-s', JUJU_GUI_SITE, | ||
2141 | 503 | '/etc/apache2/sites-enabled/juju-gui')) | ||
2142 | 504 | |||
2143 | 505 | if not os.path.exists(JUJU_GUI_PORTS): | ||
2144 | 506 | cmd_log(run('touch', JUJU_GUI_PORTS)) | ||
2145 | 507 | cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS)) | ||
2146 | 508 | |||
2147 | 509 | with su('root'): | ||
2148 | 510 | run('a2dissite', 'default') | ||
2149 | 511 | run('a2ensite', 'juju-gui') | ||
2150 | 512 | |||
2151 | 513 | |||
2152 | 514 | def save_or_create_certificates( | ||
2153 | 515 | ssl_cert_path, ssl_cert_contents, ssl_key_contents): | ||
2154 | 516 | """Generate the SSL certificates. | ||
2155 | 517 | |||
2156 | 518 | If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them | ||
2157 | 519 | as certificates; otherwise, generate them. | ||
2158 | 520 | |||
2159 | 521 | Also create a pem file, suitable for use in the haproxy configuration, | ||
2160 | 522 | concatenating the key and the certificate files. | ||
2161 | 523 | """ | ||
2162 | 524 | crt_path = os.path.join(ssl_cert_path, 'juju.crt') | ||
2163 | 525 | key_path = os.path.join(ssl_cert_path, 'juju.key') | ||
2164 | 526 | if not os.path.exists(ssl_cert_path): | ||
2165 | 527 | os.makedirs(ssl_cert_path) | ||
2166 | 528 | if ssl_cert_contents and ssl_key_contents: | ||
2167 | 529 | # Save the provided certificates. | ||
2168 | 530 | with open(crt_path, 'w') as cert_file: | ||
2169 | 531 | cert_file.write(ssl_cert_contents) | ||
2170 | 532 | with open(key_path, 'w') as key_file: | ||
2171 | 533 | key_file.write(ssl_key_contents) | ||
2172 | 534 | else: | ||
2173 | 535 | # Generate certificates. | ||
2174 | 536 | # See http://superuser.com/questions/226192/openssl-without-prompt | ||
2175 | 537 | cmd_log(run( | ||
2176 | 538 | 'openssl', 'req', '-new', '-newkey', 'rsa:4096', | ||
2177 | 539 | '-days', '365', '-nodes', '-x509', '-subj', | ||
2178 | 540 | # These are arbitrary test values for the certificate. | ||
2179 | 541 | '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com', | ||
2180 | 542 | '-keyout', key_path, '-out', crt_path)) | ||
2181 | 543 | # Generate the pem file. | ||
2182 | 544 | pem_path = os.path.join(ssl_cert_path, JUJU_PEM) | ||
2183 | 545 | if os.path.exists(pem_path): | ||
2184 | 546 | os.remove(pem_path) | ||
2185 | 547 | with open(pem_path, 'w') as pem_file: | ||
2186 | 548 | shutil.copyfileobj(open(key_path), pem_file) | ||
2187 | 549 | shutil.copyfileobj(open(crt_path), pem_file) | ||
2188 | 550 | |||
2189 | 551 | |||
2190 | 552 | def find_missing_packages(*packages): | ||
2191 | 553 | """Given a list of packages, return the packages which are not installed. | ||
2192 | 554 | """ | ||
2193 | 555 | cache = apt.Cache() | ||
2194 | 556 | missing = set() | ||
2195 | 557 | for pkg_name in packages: | ||
2196 | 558 | try: | ||
2197 | 559 | pkg = cache[pkg_name] | ||
2198 | 560 | except KeyError: | ||
2199 | 561 | missing.add(pkg_name) | ||
2200 | 562 | continue | ||
2201 | 563 | if pkg.is_installed: | ||
2202 | 564 | continue | ||
2203 | 565 | missing.add(pkg_name) | ||
2204 | 566 | return missing | ||
2205 | 567 | |||
2206 | 568 | |||
2207 | 569 | ## Backend support decorators | ||
2208 | 570 | |||
2209 | 571 | def chain(name): | ||
2210 | 572 | """Helper method to compose a set of mixin objects into a callable. | ||
2211 | 573 | |||
2212 | 574 | Each method is called in the context of its mixin instance, and its | ||
2213 | 575 | argument is the Backend instance. | ||
2214 | 576 | """ | ||
2215 | 577 | # Chain method calls through all implementing mixins. | ||
2216 | 578 | def method(self): | ||
2217 | 579 | for mixin in self.mixins: | ||
2218 | 580 | a_callable = getattr(type(mixin), name, None) | ||
2219 | 581 | if a_callable: | ||
2220 | 582 | a_callable(mixin, self) | ||
2221 | 583 | |||
2222 | 584 | method.__name__ = name | ||
2223 | 585 | return method | ||
2224 | 586 | |||
2225 | 587 | |||
2226 | 588 | def merge(name): | ||
2227 | 589 | """Helper to merge a property from a set of strategy objects | ||
2228 | 590 | into a unified set. | ||
2229 | 591 | """ | ||
2230 | 592 | # Return merged property from every providing mixin as a set. | ||
2231 | 593 | @property | ||
2232 | 594 | def method(self): | ||
2233 | 595 | result = set() | ||
2234 | 596 | for mixin in self.mixins: | ||
2235 | 597 | segment = getattr(type(mixin), name, None) | ||
2236 | 598 | if segment and isinstance(segment, (list, tuple, set)): | ||
2237 | 599 | result |= set(segment) | ||
2238 | 600 | |||
2239 | 601 | return result | ||
2240 | 602 | return method | ||
2241 | 603 | 0 | ||
2242 | === removed directory 'hooks/charmhelpers/contrib/openstack' | |||
2243 | === removed file 'hooks/charmhelpers/contrib/openstack/IMPORT' | |||
2244 | --- hooks/charmhelpers/contrib/openstack/IMPORT 2013-06-07 09:39:50 +0000 | |||
2245 | +++ hooks/charmhelpers/contrib/openstack/IMPORT 1970-01-01 00:00:00 +0000 | |||
2246 | @@ -1,9 +0,0 @@ | |||
2247 | 1 | Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers | ||
2248 | 2 | |||
2249 | 3 | ha-helpers/lib/openstack-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack-common | ||
2250 | 4 | ha-helpers/lib/openstack_common.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack_common.py | ||
2251 | 5 | ha-helpers/lib/nova -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova | ||
2252 | 6 | ha-helpers/lib/nova/nova-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/nova-common | ||
2253 | 7 | ha-helpers/lib/nova/grizzly -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/grizzly | ||
2254 | 8 | ha-helpers/lib/nova/essex -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/essex | ||
2255 | 9 | ha-helpers/lib/nova/folsom -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/folsom | ||
2256 | 10 | 0 | ||
2257 | === removed file 'hooks/charmhelpers/contrib/openstack/__init__.py' | |||
2258 | === removed directory 'hooks/charmhelpers/contrib/openstack/nova' | |||
2259 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/essex' | |||
2260 | --- hooks/charmhelpers/contrib/openstack/nova/essex 2013-06-07 09:39:50 +0000 | |||
2261 | +++ hooks/charmhelpers/contrib/openstack/nova/essex 1970-01-01 00:00:00 +0000 | |||
2262 | @@ -1,43 +0,0 @@ | |||
2263 | 1 | #!/bin/bash -e | ||
2264 | 2 | |||
2265 | 3 | # Essex-specific functions | ||
2266 | 4 | |||
2267 | 5 | nova_set_or_update() { | ||
2268 | 6 | # Set a config option in nova.conf or api-paste.ini, depending | ||
2269 | 7 | # Defaults to updating nova.conf | ||
2270 | 8 | local key=$1 | ||
2271 | 9 | local value=$2 | ||
2272 | 10 | local conf_file=$3 | ||
2273 | 11 | local pattern="" | ||
2274 | 12 | |||
2275 | 13 | local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} | ||
2276 | 14 | local api_conf=${API_CONF:-/etc/nova/api-paste.ini} | ||
2277 | 15 | local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} | ||
2278 | 16 | [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1 | ||
2279 | 17 | [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1 | ||
2280 | 18 | [[ -z "$conf_file" ]] && conf_file=$nova_conf | ||
2281 | 19 | |||
2282 | 20 | case "$conf_file" in | ||
2283 | 21 | "$nova_conf") match="\-\-$key=" | ||
2284 | 22 | pattern="--$key=" | ||
2285 | 23 | out=$pattern | ||
2286 | 24 | ;; | ||
2287 | 25 | "$api_conf"|"$libvirtd_conf") match="^$key = " | ||
2288 | 26 | pattern="$match" | ||
2289 | 27 | out="$key = " | ||
2290 | 28 | ;; | ||
2291 | 29 | *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)" | ||
2292 | 30 | esac | ||
2293 | 31 | |||
2294 | 32 | cat $conf_file | grep "$match$value" >/dev/null && | ||
2295 | 33 | juju-log "$CHARM: $key=$value already in set in $conf_file" \ | ||
2296 | 34 | && return 0 | ||
2297 | 35 | if cat $conf_file | grep "$match" >/dev/null ; then | ||
2298 | 36 | juju-log "$CHARM: Updating $conf_file, $key=$value" | ||
2299 | 37 | sed -i "s|\($pattern\).*|\1$value|" $conf_file | ||
2300 | 38 | else | ||
2301 | 39 | juju-log "$CHARM: Setting new option $key=$value in $conf_file" | ||
2302 | 40 | echo "$out$value" >>$conf_file | ||
2303 | 41 | fi | ||
2304 | 42 | CONFIG_CHANGED=True | ||
2305 | 43 | } | ||
2306 | 44 | 0 | ||
2307 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/folsom' | |||
2308 | --- hooks/charmhelpers/contrib/openstack/nova/folsom 2013-06-07 09:39:50 +0000 | |||
2309 | +++ hooks/charmhelpers/contrib/openstack/nova/folsom 1970-01-01 00:00:00 +0000 | |||
2310 | @@ -1,81 +0,0 @@ | |||
2311 | 1 | #!/bin/bash -e | ||
2312 | 2 | |||
2313 | 3 | # Folsom-specific functions | ||
2314 | 4 | |||
2315 | 5 | nova_set_or_update() { | ||
2316 | 6 | # TODO: This needs to be shared among folsom, grizzly and beyond. | ||
2317 | 7 | # Set a config option in nova.conf or api-paste.ini, depending | ||
2318 | 8 | # Defaults to updating nova.conf | ||
2319 | 9 | local key="$1" | ||
2320 | 10 | local value="$2" | ||
2321 | 11 | local conf_file="$3" | ||
2322 | 12 | local section="${4:-DEFAULT}" | ||
2323 | 13 | |||
2324 | 14 | local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} | ||
2325 | 15 | local api_conf=${API_CONF:-/etc/nova/api-paste.ini} | ||
2326 | 16 | local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} | ||
2327 | 17 | local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} | ||
2328 | 18 | local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} | ||
2329 | 19 | local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} | ||
2330 | 20 | |||
2331 | 21 | [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 | ||
2332 | 22 | [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 | ||
2333 | 23 | |||
2334 | 24 | [[ -z "$conf_file" ]] && conf_file=$nova_conf | ||
2335 | 25 | |||
2336 | 26 | local pattern="" | ||
2337 | 27 | case "$conf_file" in | ||
2338 | 28 | "$nova_conf") match="^$key=" | ||
2339 | 29 | pattern="$key=" | ||
2340 | 30 | out=$pattern | ||
2341 | 31 | ;; | ||
2342 | 32 | "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ | ||
2343 | 33 | "$libvirtd_conf") | ||
2344 | 34 | match="^$key = " | ||
2345 | 35 | pattern="$match" | ||
2346 | 36 | out="$key = " | ||
2347 | 37 | ;; | ||
2348 | 38 | *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" | ||
2349 | 39 | esac | ||
2350 | 40 | |||
2351 | 41 | cat $conf_file | grep "$match$value" >/dev/null && | ||
2352 | 42 | juju-log "$CHARM: $key=$value already in set in $conf_file" \ | ||
2353 | 43 | && return 0 | ||
2354 | 44 | |||
2355 | 45 | case $conf_file in | ||
2356 | 46 | "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") | ||
2357 | 47 | python -c " | ||
2358 | 48 | import ConfigParser | ||
2359 | 49 | config = ConfigParser.RawConfigParser() | ||
2360 | 50 | config.read('$conf_file') | ||
2361 | 51 | config.set('$section','$key','$value') | ||
2362 | 52 | with open('$conf_file', 'wb') as configfile: | ||
2363 | 53 | config.write(configfile) | ||
2364 | 54 | " | ||
2365 | 55 | ;; | ||
2366 | 56 | *) | ||
2367 | 57 | if cat $conf_file | grep "$match" >/dev/null ; then | ||
2368 | 58 | juju-log "$CHARM: Updating $conf_file, $key=$value" | ||
2369 | 59 | sed -i "s|\($pattern\).*|\1$value|" $conf_file | ||
2370 | 60 | else | ||
2371 | 61 | juju-log "$CHARM: Setting new option $key=$value in $conf_file" | ||
2372 | 62 | echo "$out$value" >>$conf_file | ||
2373 | 63 | fi | ||
2374 | 64 | ;; | ||
2375 | 65 | esac | ||
2376 | 66 | CONFIG_CHANGED="True" | ||
2377 | 67 | } | ||
2378 | 68 | |||
2379 | 69 | # Upgrade Helpers | ||
2380 | 70 | nova_pre_upgrade() { | ||
2381 | 71 | # Pre-upgrade helper. Caller should pass the version of OpenStack we are | ||
2382 | 72 | # upgrading from. | ||
2383 | 73 | return 0 # Nothing to do here, yet. | ||
2384 | 74 | } | ||
2385 | 75 | |||
2386 | 76 | nova_post_upgrade() { | ||
2387 | 77 | # Post-upgrade helper. Caller should pass the version of OpenStack we are | ||
2388 | 78 | # upgrading from. | ||
2389 | 79 | juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom." | ||
2390 | 80 | # nothing to do here yet. | ||
2391 | 81 | } | ||
2392 | 82 | 0 | ||
2393 | === removed symlink 'hooks/charmhelpers/contrib/openstack/nova/grizzly' | |||
2394 | === target was u'folsom' | |||
2395 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/nova-common' | |||
2396 | --- hooks/charmhelpers/contrib/openstack/nova/nova-common 2013-06-07 09:39:50 +0000 | |||
2397 | +++ hooks/charmhelpers/contrib/openstack/nova/nova-common 1970-01-01 00:00:00 +0000 | |||
2398 | @@ -1,147 +0,0 @@ | |||
2399 | 1 | #!/bin/bash -e | ||
2400 | 2 | |||
2401 | 3 | # Common utility functions used across all nova charms. | ||
2402 | 4 | |||
2403 | 5 | CONFIG_CHANGED=False | ||
2404 | 6 | |||
2405 | 7 | # Load the common OpenStack helper library. | ||
2406 | 8 | if [[ -e $CHARM_DIR/lib/openstack-common ]] ; then | ||
2407 | 9 | . $CHARM_DIR/lib/openstack-common | ||
2408 | 10 | else | ||
2409 | 11 | juju-log "Couldn't load $CHARM_DIR/lib/opentack-common." && exit 1 | ||
2410 | 12 | fi | ||
2411 | 13 | |||
2412 | 14 | set_or_update() { | ||
2413 | 15 | # Update config flags in nova.conf or api-paste.ini. | ||
2414 | 16 | # Config layout changed in Folsom, so this is now OpenStack release specific. | ||
2415 | 17 | local rel=$(get_os_codename_package "nova-common") | ||
2416 | 18 | . $CHARM_DIR/lib/nova/$rel | ||
2417 | 19 | nova_set_or_update $@ | ||
2418 | 20 | } | ||
2419 | 21 | |||
2420 | 22 | function set_config_flags() { | ||
2421 | 23 | # Set user-defined nova.conf flags from deployment config | ||
2422 | 24 | juju-log "$CHARM: Processing config-flags." | ||
2423 | 25 | flags=$(config-get config-flags) | ||
2424 | 26 | if [[ "$flags" != "None" && -n "$flags" ]] ; then | ||
2425 | 27 | for f in $(echo $flags | sed -e 's/,/ /g') ; do | ||
2426 | 28 | k=$(echo $f | cut -d= -f1) | ||
2427 | 29 | v=$(echo $f | cut -d= -f2) | ||
2428 | 30 | set_or_update "$k" "$v" | ||
2429 | 31 | done | ||
2430 | 32 | fi | ||
2431 | 33 | } | ||
2432 | 34 | |||
2433 | 35 | configure_volume_service() { | ||
2434 | 36 | local svc="$1" | ||
2435 | 37 | local cur_vers="$(get_os_codename_package "nova-common")" | ||
2436 | 38 | case "$svc" in | ||
2437 | 39 | "cinder") | ||
2438 | 40 | set_or_update "volume_api_class" "nova.volume.cinder.API" ;; | ||
2439 | 41 | "nova-volume") | ||
2440 | 42 | # nova-volume only supported before grizzly. | ||
2441 | 43 | [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] && | ||
2442 | 44 | set_or_update "volume_api_class" "nova.volume.api.API" | ||
2443 | 45 | ;; | ||
2444 | 46 | *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc" | ||
2445 | 47 | return 1 ;; | ||
2446 | 48 | esac | ||
2447 | 49 | } | ||
2448 | 50 | |||
2449 | 51 | function configure_network_manager { | ||
2450 | 52 | local manager="$1" | ||
2451 | 53 | echo "$CHARM: configuring $manager network manager" | ||
2452 | 54 | case $1 in | ||
2453 | 55 | "FlatManager") | ||
2454 | 56 | set_or_update "network_manager" "nova.network.manager.FlatManager" | ||
2455 | 57 | ;; | ||
2456 | 58 | "FlatDHCPManager") | ||
2457 | 59 | set_or_update "network_manager" "nova.network.manager.FlatDHCPManager" | ||
2458 | 60 | |||
2459 | 61 | if [[ "$CHARM" == "nova-compute" ]] ; then | ||
2460 | 62 | local flat_interface=$(config-get flat-interface) | ||
2461 | 63 | local ec2_host=$(relation-get ec2_host) | ||
2462 | 64 | set_or_update flat_inteface "$flat_interface" | ||
2463 | 65 | set_or_update ec2_dmz_host "$ec2_host" | ||
2464 | 66 | |||
2465 | 67 | # Ensure flat_interface has link. | ||
2466 | 68 | if ip link show $flat_interface >/dev/null 2>&1 ; then | ||
2467 | 69 | ip link set $flat_interface up | ||
2468 | 70 | fi | ||
2469 | 71 | |||
2470 | 72 | # work around (LP: #1035172) | ||
2471 | 73 | if [[ -e /dev/vhost-net ]] ; then | ||
2472 | 74 | iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \ | ||
2473 | 75 | --checksum-fill | ||
2474 | 76 | fi | ||
2475 | 77 | fi | ||
2476 | 78 | |||
2477 | 79 | ;; | ||
2478 | 80 | "Quantum") | ||
2479 | 81 | local local_ip=$(get_ip `unit-get private-address`) | ||
2480 | 82 | [[ -n $local_ip ]] || { | ||
2481 | 83 | juju-log "Unable to resolve local IP address" | ||
2482 | 84 | exit 1 | ||
2483 | 85 | } | ||
2484 | 86 | set_or_update "network_api_class" "nova.network.quantumv2.api.API" | ||
2485 | 87 | set_or_update "quantum_auth_strategy" "keystone" | ||
2486 | 88 | set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF" | ||
2487 | 89 | set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" | ||
2488 | 90 | if [ "$QUANTUM_PLUGIN" == "ovs" ]; then | ||
2489 | 91 | set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS" | ||
2490 | 92 | set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS" | ||
2491 | 93 | set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS" | ||
2492 | 94 | set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS" | ||
2493 | 95 | fi | ||
2494 | 96 | ;; | ||
2495 | 97 | *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;; | ||
2496 | 98 | esac | ||
2497 | 99 | } | ||
2498 | 100 | |||
2499 | 101 | function trigger_remote_service_restarts() { | ||
2500 | 102 | # Trigger a service restart on all other nova nodes that have a relation | ||
2501 | 103 | # via the cloud-controller interface. | ||
2502 | 104 | |||
2503 | 105 | # possible relations to other nova services. | ||
2504 | 106 | local relations="cloud-compute nova-volume-service" | ||
2505 | 107 | |||
2506 | 108 | for rel in $relations; do | ||
2507 | 109 | local r_ids=$(relation-ids $rel) | ||
2508 | 110 | for r_id in $r_ids ; do | ||
2509 | 111 | juju-log "$CHARM: Triggering a service restart on relation $r_id." | ||
2510 | 112 | relation-set -r $r_id restart-trigger=$(uuid) | ||
2511 | 113 | done | ||
2512 | 114 | done | ||
2513 | 115 | } | ||
2514 | 116 | |||
2515 | 117 | do_openstack_upgrade() { | ||
2516 | 118 | # update openstack components to those provided by a new installation source | ||
2517 | 119 | # it is assumed the calling hook has confirmed that the upgrade is sane. | ||
2518 | 120 | local rel="$1" | ||
2519 | 121 | shift | ||
2520 | 122 | local packages=$@ | ||
2521 | 123 | |||
2522 | 124 | orig_os_rel=$(get_os_codename_package "nova-common") | ||
2523 | 125 | new_rel=$(get_os_codename_install_source "$rel") | ||
2524 | 126 | |||
2525 | 127 | # Backup the config directory. | ||
2526 | 128 | local stamp=$(date +"%Y%m%d%M%S") | ||
2527 | 129 | tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR | ||
2528 | 130 | |||
2529 | 131 | # load the release helper library for pre/post upgrade hooks specific to the | ||
2530 | 132 | # release we are upgrading to. | ||
2531 | 133 | . $CHARM_DIR/lib/nova/$new_rel | ||
2532 | 134 | |||
2533 | 135 | # new release specific pre-upgrade hook | ||
2534 | 136 | nova_pre_upgrade "$orig_os_rel" | ||
2535 | 137 | |||
2536 | 138 | # Setup apt repository access and kick off the actual package upgrade. | ||
2537 | 139 | configure_install_source "$rel" | ||
2538 | 140 | apt-get update | ||
2539 | 141 | DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \ | ||
2540 | 142 | install --no-install-recommends $packages | ||
2541 | 143 | |||
2542 | 144 | # new release sepcific post-upgrade hook | ||
2543 | 145 | nova_post_upgrade "$orig_os_rel" | ||
2544 | 146 | |||
2545 | 147 | } | ||
2546 | 148 | 0 | ||
2547 | === removed file 'hooks/charmhelpers/contrib/openstack/openstack-common' | |||
2548 | --- hooks/charmhelpers/contrib/openstack/openstack-common 2013-06-07 09:39:50 +0000 | |||
2549 | +++ hooks/charmhelpers/contrib/openstack/openstack-common 1970-01-01 00:00:00 +0000 | |||
2550 | @@ -1,781 +0,0 @@ | |||
2551 | 1 | #!/bin/bash -e | ||
2552 | 2 | |||
2553 | 3 | # Common utility functions used across all OpenStack charms. | ||
2554 | 4 | |||
2555 | 5 | error_out() { | ||
2556 | 6 | juju-log "$CHARM ERROR: $@" | ||
2557 | 7 | exit 1 | ||
2558 | 8 | } | ||
2559 | 9 | |||
2560 | 10 | function service_ctl_status { | ||
2561 | 11 | # Return 0 if a service is running, 1 otherwise. | ||
2562 | 12 | local svc="$1" | ||
2563 | 13 | local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }') | ||
2564 | 14 | case $status in | ||
2565 | 15 | "start") return 0 ;; | ||
2566 | 16 | "stop") return 1 ;; | ||
2567 | 17 | *) error_out "Unexpected status of service $svc: $status" ;; | ||
2568 | 18 | esac | ||
2569 | 19 | } | ||
2570 | 20 | |||
2571 | 21 | function service_ctl { | ||
2572 | 22 | # control a specific service, or all (as defined by $SERVICES) | ||
2573 | 23 | # service restarts will only occur depending on global $CONFIG_CHANGED, | ||
2574 | 24 | # which should be updated in charm's set_or_update(). | ||
2575 | 25 | local config_changed=${CONFIG_CHANGED:-True} | ||
2576 | 26 | if [[ $1 == "all" ]] ; then | ||
2577 | 27 | ctl="$SERVICES" | ||
2578 | 28 | else | ||
2579 | 29 | ctl="$1" | ||
2580 | 30 | fi | ||
2581 | 31 | action="$2" | ||
2582 | 32 | if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then | ||
2583 | 33 | error_out "ERROR service_ctl: Not enough arguments" | ||
2584 | 34 | fi | ||
2585 | 35 | |||
2586 | 36 | for i in $ctl ; do | ||
2587 | 37 | case $action in | ||
2588 | 38 | "start") | ||
2589 | 39 | service_ctl_status $i || service $i start ;; | ||
2590 | 40 | "stop") | ||
2591 | 41 | service_ctl_status $i && service $i stop || return 0 ;; | ||
2592 | 42 | "restart") | ||
2593 | 43 | if [[ "$config_changed" == "True" ]] ; then | ||
2594 | 44 | service_ctl_status $i && service $i restart || service $i start | ||
2595 | 45 | fi | ||
2596 | 46 | ;; | ||
2597 | 47 | esac | ||
2598 | 48 | if [[ $? != 0 ]] ; then | ||
2599 | 49 | juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" | ||
2600 | 50 | fi | ||
2601 | 51 | done | ||
2602 | 52 | # all configs should have been reloaded on restart of all services, reset | ||
2603 | 53 | # flag if its being used. | ||
2604 | 54 | if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] && | ||
2605 | 55 | [[ "$ctl" == "all" ]]; then | ||
2606 | 56 | CONFIG_CHANGED="False" | ||
2607 | 57 | fi | ||
2608 | 58 | } | ||
2609 | 59 | |||
2610 | 60 | function configure_install_source { | ||
2611 | 61 | # Setup and configure installation source based on a config flag. | ||
2612 | 62 | local src="$1" | ||
2613 | 63 | |||
2614 | 64 | # Default to installing from the main Ubuntu archive. | ||
2615 | 65 | [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0 | ||
2616 | 66 | |||
2617 | 67 | . /etc/lsb-release | ||
2618 | 68 | |||
2619 | 69 | # standard 'ppa:someppa/name' format. | ||
2620 | 70 | if [[ "${src:0:4}" == "ppa:" ]] ; then | ||
2621 | 71 | juju-log "$CHARM: Configuring installation from custom src ($src)" | ||
2622 | 72 | add-apt-repository -y "$src" || error_out "Could not configure PPA access." | ||
2623 | 73 | return 0 | ||
2624 | 74 | fi | ||
2625 | 75 | |||
2626 | 76 | # standard 'deb http://url/ubuntu main' entries. gpg key ids must | ||
2627 | 77 | # be appended to the end of url after a |, ie: | ||
2628 | 78 | # 'deb http://url/ubuntu main|$GPGKEYID' | ||
2629 | 79 | if [[ "${src:0:3}" == "deb" ]] ; then | ||
2630 | 80 | juju-log "$CHARM: Configuring installation from custom src URL ($src)" | ||
2631 | 81 | if echo "$src" | grep -q "|" ; then | ||
2632 | 82 | # gpg key id tagged to end of url folloed by a | | ||
2633 | 83 | url=$(echo $src | cut -d'|' -f1) | ||
2634 | 84 | key=$(echo $src | cut -d'|' -f2) | ||
2635 | 85 | juju-log "$CHARM: Importing repository key: $key" | ||
2636 | 86 | apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \ | ||
2637 | 87 | juju-log "$CHARM WARN: Could not import key from keyserver: $key" | ||
2638 | 88 | else | ||
2639 | 89 | juju-log "$CHARM No repository key specified." | ||
2640 | 90 | url="$src" | ||
2641 | 91 | fi | ||
2642 | 92 | echo "$url" > /etc/apt/sources.list.d/juju_deb.list | ||
2643 | 93 | return 0 | ||
2644 | 94 | fi | ||
2645 | 95 | |||
2646 | 96 | # Cloud Archive | ||
2647 | 97 | if [[ "${src:0:6}" == "cloud:" ]] ; then | ||
2648 | 98 | |||
2649 | 99 | # current os releases supported by the UCA. | ||
2650 | 100 | local cloud_archive_versions="folsom grizzly" | ||
2651 | 101 | |||
2652 | 102 | local ca_rel=$(echo $src | cut -d: -f2) | ||
2653 | 103 | local u_rel=$(echo $ca_rel | cut -d- -f1) | ||
2654 | 104 | local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1) | ||
2655 | 105 | |||
2656 | 106 | [[ "$u_rel" != "$DISTRIB_CODENAME" ]] && | ||
2657 | 107 | error_out "Cannot install from Cloud Archive pocket $src " \ | ||
2658 | 108 | "on this Ubuntu version ($DISTRIB_CODENAME)!" | ||
2659 | 109 | |||
2660 | 110 | valid_release="" | ||
2661 | 111 | for rel in $cloud_archive_versions ; do | ||
2662 | 112 | if [[ "$os_rel" == "$rel" ]] ; then | ||
2663 | 113 | valid_release=1 | ||
2664 | 114 | juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive." | ||
2665 | 115 | fi | ||
2666 | 116 | done | ||
2667 | 117 | if [[ -z "$valid_release" ]] ; then | ||
2668 | 118 | error_out "OpenStack release ($os_rel) not supported by "\ | ||
2669 | 119 | "the Ubuntu Cloud Archive." | ||
2670 | 120 | fi | ||
2671 | 121 | |||
2672 | 122 | # CA staging repos are standard PPAs. | ||
2673 | 123 | if echo $ca_rel | grep -q "staging" ; then | ||
2674 | 124 | add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging | ||
2675 | 125 | return 0 | ||
2676 | 126 | fi | ||
2677 | 127 | |||
2678 | 128 | # the others are LP-external deb repos. | ||
2679 | 129 | case "$ca_rel" in | ||
2680 | 130 | "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; | ||
2681 | 131 | "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; | ||
2682 | 132 | "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; | ||
2683 | 133 | "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; | ||
2684 | 134 | *) error_out "Invalid Cloud Archive repo specified: $src" | ||
2685 | 135 | esac | ||
2686 | 136 | |||
2687 | 137 | apt-get -y install ubuntu-cloud-keyring | ||
2688 | 138 | entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main" | ||
2689 | 139 | echo "$entry" \ | ||
2690 | 140 | >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list | ||
2691 | 141 | return 0 | ||
2692 | 142 | fi | ||
2693 | 143 | |||
2694 | 144 | error_out "Invalid installation source specified in config: $src" | ||
2695 | 145 | |||
2696 | 146 | } | ||
2697 | 147 | |||
2698 | 148 | get_os_codename_install_source() { | ||
2699 | 149 | # derive the openstack release provided by a supported installation source. | ||
2700 | 150 | local rel="$1" | ||
2701 | 151 | local codename="unknown" | ||
2702 | 152 | . /etc/lsb-release | ||
2703 | 153 | |||
2704 | 154 | # map ubuntu releases to the openstack version shipped with it. | ||
2705 | 155 | if [[ "$rel" == "distro" ]] ; then | ||
2706 | 156 | case "$DISTRIB_CODENAME" in | ||
2707 | 157 | "oneiric") codename="diablo" ;; | ||
2708 | 158 | "precise") codename="essex" ;; | ||
2709 | 159 | "quantal") codename="folsom" ;; | ||
2710 | 160 | "raring") codename="grizzly" ;; | ||
2711 | 161 | esac | ||
2712 | 162 | fi | ||
2713 | 163 | |||
2714 | 164 | # derive version from cloud archive strings. | ||
2715 | 165 | if [[ "${rel:0:6}" == "cloud:" ]] ; then | ||
2716 | 166 | rel=$(echo $rel | cut -d: -f2) | ||
2717 | 167 | local u_rel=$(echo $rel | cut -d- -f1) | ||
2718 | 168 | local ca_rel=$(echo $rel | cut -d- -f2) | ||
2719 | 169 | if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then | ||
2720 | 170 | case "$ca_rel" in | ||
2721 | 171 | "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging") | ||
2722 | 172 | codename="folsom" ;; | ||
2723 | 173 | "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging") | ||
2724 | 174 | codename="grizzly" ;; | ||
2725 | 175 | esac | ||
2726 | 176 | fi | ||
2727 | 177 | fi | ||
2728 | 178 | |||
2729 | 179 | # have a guess based on the deb string provided | ||
2730 | 180 | if [[ "${rel:0:3}" == "deb" ]] || \ | ||
2731 | 181 | [[ "${rel:0:3}" == "ppa" ]] ; then | ||
2732 | 182 | CODENAMES="diablo essex folsom grizzly havana" | ||
2733 | 183 | for cname in $CODENAMES; do | ||
2734 | 184 | if echo $rel | grep -q $cname; then | ||
2735 | 185 | codename=$cname | ||
2736 | 186 | fi | ||
2737 | 187 | done | ||
2738 | 188 | fi | ||
2739 | 189 | echo $codename | ||
2740 | 190 | } | ||
2741 | 191 | |||
2742 | 192 | get_os_codename_package() { | ||
2743 | 193 | local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" | ||
2744 | 194 | pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs | ||
2745 | 195 | case "${pkg_vers:0:6}" in | ||
2746 | 196 | "2011.2") echo "diablo" ;; | ||
2747 | 197 | "2012.1") echo "essex" ;; | ||
2748 | 198 | "2012.2") echo "folsom" ;; | ||
2749 | 199 | "2013.1") echo "grizzly" ;; | ||
2750 | 200 | "2013.2") echo "havana" ;; | ||
2751 | 201 | esac | ||
2752 | 202 | } | ||
2753 | 203 | |||
2754 | 204 | get_os_version_codename() { | ||
2755 | 205 | case "$1" in | ||
2756 | 206 | "diablo") echo "2011.2" ;; | ||
2757 | 207 | "essex") echo "2012.1" ;; | ||
2758 | 208 | "folsom") echo "2012.2" ;; | ||
2759 | 209 | "grizzly") echo "2013.1" ;; | ||
2760 | 210 | "havana") echo "2013.2" ;; | ||
2761 | 211 | esac | ||
2762 | 212 | } | ||
2763 | 213 | |||
2764 | 214 | get_ip() { | ||
2765 | 215 | dpkg -l | grep -q python-dnspython || { | ||
2766 | 216 | apt-get -y install python-dnspython 2>&1 > /dev/null | ||
2767 | 217 | } | ||
2768 | 218 | hostname=$1 | ||
2769 | 219 | python -c " | ||
2770 | 220 | import dns.resolver | ||
2771 | 221 | import socket | ||
2772 | 222 | try: | ||
2773 | 223 | # Test to see if already an IPv4 address | ||
2774 | 224 | socket.inet_aton('$hostname') | ||
2775 | 225 | print '$hostname' | ||
2776 | 226 | except socket.error: | ||
2777 | 227 | try: | ||
2778 | 228 | answers = dns.resolver.query('$hostname', 'A') | ||
2779 | 229 | if answers: | ||
2780 | 230 | print answers[0].address | ||
2781 | 231 | except dns.resolver.NXDOMAIN: | ||
2782 | 232 | pass | ||
2783 | 233 | " | ||
2784 | 234 | } | ||
2785 | 235 | |||
2786 | 236 | # Common storage routines used by cinder, nova-volume and swift-storage. | ||
2787 | 237 | clean_storage() { | ||
2788 | 238 | # if configured to overwrite existing storage, we unmount the block-dev | ||
2789 | 239 | # if mounted and clear any previous pv signatures | ||
2790 | 240 | local block_dev="$1" | ||
2791 | 241 | juju-log "Cleaining storage '$block_dev'" | ||
2792 | 242 | if grep -q "^$block_dev" /proc/mounts ; then | ||
2793 | 243 | mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }') | ||
2794 | 244 | juju-log "Unmounting $block_dev from $mp" | ||
2795 | 245 | umount "$mp" || error_out "ERROR: Could not unmount storage from $mp" | ||
2796 | 246 | fi | ||
2797 | 247 | if pvdisplay "$block_dev" >/dev/null 2>&1 ; then | ||
2798 | 248 | juju-log "Removing existing LVM PV signatures from $block_dev" | ||
2799 | 249 | |||
2800 | 250 | # deactivate any volgroups that may be built on this dev | ||
2801 | 251 | vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }') | ||
2802 | 252 | if [[ -n "$vg" ]] ; then | ||
2803 | 253 | juju-log "Deactivating existing volume group: $vg" | ||
2804 | 254 | vgchange -an "$vg" || | ||
2805 | 255 | error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?" | ||
2806 | 256 | fi | ||
2807 | 257 | echo "yes" | pvremove -ff "$block_dev" || | ||
2808 | 258 | error_out "Could not pvremove $block_dev" | ||
2809 | 259 | else | ||
2810 | 260 | juju-log "Zapping disk of all GPT and MBR structures" | ||
2811 | 261 | sgdisk --zap-all $block_dev || | ||
2812 | 262 | error_out "Unable to zap $block_dev" | ||
2813 | 263 | fi | ||
2814 | 264 | } | ||
2815 | 265 | |||
2816 | 266 | function get_block_device() { | ||
2817 | 267 | # given a string, return full path to the block device for that | ||
2818 | 268 | # if input is not a block device, find a loopback device | ||
2819 | 269 | local input="$1" | ||
2820 | 270 | |||
2821 | 271 | case "$input" in | ||
2822 | 272 | /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist." | ||
2823 | 273 | echo "$input"; return 0;; | ||
2824 | 274 | /*) :;; | ||
2825 | 275 | *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist." | ||
2826 | 276 | echo "/dev/$input"; return 0;; | ||
2827 | 277 | esac | ||
2828 | 278 | |||
2829 | 279 | # this represents a file | ||
2830 | 280 | # support "/path/to/file|5G" | ||
2831 | 281 | local fpath size oifs="$IFS" | ||
2832 | 282 | if [ "${input#*|}" != "${input}" ]; then | ||
2833 | 283 | size=${input##*|} | ||
2834 | 284 | fpath=${input%|*} | ||
2835 | 285 | else | ||
2836 | 286 | fpath=${input} | ||
2837 | 287 | size=5G | ||
2838 | 288 | fi | ||
2839 | 289 | |||
2840 | 290 | ## loop devices are not namespaced. This is bad for containers. | ||
2841 | 291 | ## it means that the output of 'losetup' may have the given $fpath | ||
2842 | 292 | ## in it, but that may not represent this containers $fpath, but | ||
2843 | 293 | ## another containers. To address that, we really need to | ||
2844 | 294 | ## allow some uniq container-id to be expanded within path. | ||
2845 | 295 | ## TODO: find a unique container-id that will be consistent for | ||
2846 | 296 | ## this container throughout its lifetime and expand it | ||
2847 | 297 | ## in the fpath. | ||
2848 | 298 | # fpath=${fpath//%{id}/$THAT_ID} | ||
2849 | 299 | |||
2850 | 300 | local found="" | ||
2851 | 301 | # parse through 'losetup -a' output, looking for this file | ||
2852 | 302 | # output is expected to look like: | ||
2853 | 303 | # /dev/loop0: [0807]:961814 (/tmp/my.img) | ||
2854 | 304 | found=$(losetup -a | | ||
2855 | 305 | awk 'BEGIN { found=0; } | ||
2856 | 306 | $3 == f { sub(/:$/,"",$1); print $1; found=found+1; } | ||
2857 | 307 | END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \ | ||
2858 | 308 | f="($fpath)") | ||
2859 | 309 | |||
2860 | 310 | if [ $? -ne 0 ]; then | ||
2861 | 311 | echo "multiple devices found for $fpath: $found" 1>&2 | ||
2862 | 312 | return 1; | ||
2863 | 313 | fi | ||
2864 | 314 | |||
2865 | 315 | [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; } | ||
2866 | 316 | |||
2867 | 317 | if [ -n "$found" ]; then | ||
2868 | 318 | echo "confused, $found is not a block device for $fpath"; | ||
2869 | 319 | return 1; | ||
2870 | 320 | fi | ||
2871 | 321 | |||
2872 | 322 | # no existing device was found, create one | ||
2873 | 323 | mkdir -p "${fpath%/*}" | ||
2874 | 324 | truncate --size "$size" "$fpath" || | ||
2875 | 325 | { echo "failed to create $fpath of size $size"; return 1; } | ||
2876 | 326 | |||
2877 | 327 | found=$(losetup --find --show "$fpath") || | ||
2878 | 328 | { echo "failed to setup loop device for $fpath" 1>&2; return 1; } | ||
2879 | 329 | |||
2880 | 330 | echo "$found" | ||
2881 | 331 | return 0 | ||
2882 | 332 | } | ||
2883 | 333 | |||
2884 | 334 | HAPROXY_CFG=/etc/haproxy/haproxy.cfg | ||
2885 | 335 | HAPROXY_DEFAULT=/etc/default/haproxy | ||
2886 | 336 | ########################################################################## | ||
2887 | 337 | # Description: Configures HAProxy services for Openstack API's | ||
2888 | 338 | # Parameters: | ||
2889 | 339 | # Space delimited list of service:port:mode combinations for which | ||
2890 | 340 | # haproxy service configuration should be generated for. The function | ||
2891 | 341 | # assumes the name of the peer relation is 'cluster' and that every | ||
2892 | 342 | # service unit in the peer relation is running the same services. | ||
2893 | 343 | # | ||
2894 | 344 | # Services that do not specify :mode in parameter will default to http. | ||
2895 | 345 | # | ||
2896 | 346 | # Example | ||
2897 | 347 | # configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http | ||
2898 | 348 | ########################################################################## | ||
2899 | 349 | configure_haproxy() { | ||
2900 | 350 | local address=`unit-get private-address` | ||
2901 | 351 | local name=${JUJU_UNIT_NAME////-} | ||
2902 | 352 | cat > $HAPROXY_CFG << EOF | ||
2903 | 353 | global | ||
2904 | 354 | log 127.0.0.1 local0 | ||
2905 | 355 | log 127.0.0.1 local1 notice | ||
2906 | 356 | maxconn 20000 | ||
2907 | 357 | user haproxy | ||
2908 | 358 | group haproxy | ||
2909 | 359 | spread-checks 0 | ||
2910 | 360 | |||
2911 | 361 | defaults | ||
2912 | 362 | log global | ||
2913 | 363 | mode http | ||
2914 | 364 | option httplog | ||
2915 | 365 | option dontlognull | ||
2916 | 366 | retries 3 | ||
2917 | 367 | timeout queue 1000 | ||
2918 | 368 | timeout connect 1000 | ||
2919 | 369 | timeout client 30000 | ||
2920 | 370 | timeout server 30000 | ||
2921 | 371 | |||
2922 | 372 | listen stats :8888 | ||
2923 | 373 | mode http | ||
2924 | 374 | stats enable | ||
2925 | 375 | stats hide-version | ||
2926 | 376 | stats realm Haproxy\ Statistics | ||
2927 | 377 | stats uri / | ||
2928 | 378 | stats auth admin:password | ||
2929 | 379 | |||
2930 | 380 | EOF | ||
2931 | 381 | for service in $@; do | ||
2932 | 382 | local service_name=$(echo $service | cut -d : -f 1) | ||
2933 | 383 | local haproxy_listen_port=$(echo $service | cut -d : -f 2) | ||
2934 | 384 | local api_listen_port=$(echo $service | cut -d : -f 3) | ||
2935 | 385 | local mode=$(echo $service | cut -d : -f 4) | ||
2936 | 386 | [[ -z "$mode" ]] && mode="http" | ||
2937 | 387 | juju-log "Adding haproxy configuration entry for $service "\ | ||
2938 | 388 | "($haproxy_listen_port -> $api_listen_port)" | ||
2939 | 389 | cat >> $HAPROXY_CFG << EOF | ||
2940 | 390 | listen $service_name 0.0.0.0:$haproxy_listen_port | ||
2941 | 391 | balance roundrobin | ||
2942 | 392 | mode $mode | ||
2943 | 393 | option ${mode}log | ||
2944 | 394 | server $name $address:$api_listen_port check | ||
2945 | 395 | EOF | ||
2946 | 396 | local r_id="" | ||
2947 | 397 | local unit="" | ||
2948 | 398 | for r_id in `relation-ids cluster`; do | ||
2949 | 399 | for unit in `relation-list -r $r_id`; do | ||
2950 | 400 | local unit_name=${unit////-} | ||
2951 | 401 | local unit_address=`relation-get -r $r_id private-address $unit` | ||
2952 | 402 | if [ -n "$unit_address" ]; then | ||
2953 | 403 | echo " server $unit_name $unit_address:$api_listen_port check" \ | ||
2954 | 404 | >> $HAPROXY_CFG | ||
2955 | 405 | fi | ||
2956 | 406 | done | ||
2957 | 407 | done | ||
2958 | 408 | done | ||
2959 | 409 | echo "ENABLED=1" > $HAPROXY_DEFAULT | ||
2960 | 410 | service haproxy restart | ||
2961 | 411 | } | ||
2962 | 412 | |||
2963 | 413 | ########################################################################## | ||
2964 | 414 | # Description: Query HA interface to determine is cluster is configured | ||
2965 | 415 | # Returns: 0 if configured, 1 if not configured | ||
2966 | 416 | ########################################################################## | ||
2967 | 417 | is_clustered() { | ||
2968 | 418 | local r_id="" | ||
2969 | 419 | local unit="" | ||
2970 | 420 | for r_id in $(relation-ids ha); do | ||
2971 | 421 | if [ -n "$r_id" ]; then | ||
2972 | 422 | for unit in $(relation-list -r $r_id); do | ||
2973 | 423 | clustered=$(relation-get -r $r_id clustered $unit) | ||
2974 | 424 | if [ -n "$clustered" ]; then | ||
2975 | 425 | juju-log "Unit is haclustered" | ||
2976 | 426 | return 0 | ||
2977 | 427 | fi | ||
2978 | 428 | done | ||
2979 | 429 | fi | ||
2980 | 430 | done | ||
2981 | 431 | juju-log "Unit is not haclustered" | ||
2982 | 432 | return 1 | ||
2983 | 433 | } | ||
2984 | 434 | |||
2985 | 435 | ########################################################################## | ||
2986 | 436 | # Description: Return a list of all peers in cluster relations | ||
2987 | 437 | ########################################################################## | ||
2988 | 438 | peer_units() { | ||
2989 | 439 | local peers="" | ||
2990 | 440 | local r_id="" | ||
2991 | 441 | for r_id in $(relation-ids cluster); do | ||
2992 | 442 | peers="$peers $(relation-list -r $r_id)" | ||
2993 | 443 | done | ||
2994 | 444 | echo $peers | ||
2995 | 445 | } | ||
2996 | 446 | |||
2997 | 447 | ########################################################################## | ||
2998 | 448 | # Description: Determines whether the current unit is the oldest of all | ||
2999 | 449 | # its peers - supports partial leader election | ||
3000 | 450 | # Returns: 0 if oldest, 1 if not | ||
3001 | 451 | ########################################################################## | ||
3002 | 452 | oldest_peer() { | ||
3003 | 453 | peers=$1 | ||
3004 | 454 | local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2) | ||
3005 | 455 | for peer in $peers; do | ||
3006 | 456 | echo "Comparing $JUJU_UNIT_NAME with peers: $peers" | ||
3007 | 457 | local r_unit_no=$(echo $peer | cut -d / -f 2) | ||
3008 | 458 | if (($r_unit_no<$l_unit_no)); then | ||
3009 | 459 | juju-log "Not oldest peer; deferring" | ||
3010 | 460 | return 1 | ||
3011 | 461 | fi | ||
3012 | 462 | done | ||
3013 | 463 | juju-log "Oldest peer; might take charge?" | ||
3014 | 464 | return 0 | ||
3015 | 465 | } | ||
3016 | 466 | |||
3017 | 467 | ########################################################################## | ||
3018 | 468 | # Description: Determines whether the current service units is the | ||
3019 | 469 | # leader within a) a cluster of its peers or b) across a | ||
3020 | 470 | # set of unclustered peers. | ||
3021 | 471 | # Parameters: CRM resource to check ownership of if clustered | ||
3022 | 472 | # Returns: 0 if leader, 1 if not | ||
3023 | 473 | ########################################################################## | ||
3024 | 474 | eligible_leader() { | ||
3025 | 475 | if is_clustered; then | ||
3026 | 476 | if ! is_leader $1; then | ||
3027 | 477 | juju-log 'Deferring action to CRM leader' | ||
3028 | 478 | return 1 | ||
3029 | 479 | fi | ||
3030 | 480 | else | ||
3031 | 481 | peers=$(peer_units) | ||
3032 | 482 | if [ -n "$peers" ] && ! oldest_peer "$peers"; then | ||
3033 | 483 | juju-log 'Deferring action to oldest service unit.' | ||
3034 | 484 | return 1 | ||
3035 | 485 | fi | ||
3036 | 486 | fi | ||
3037 | 487 | return 0 | ||
3038 | 488 | } | ||
3039 | 489 | |||
3040 | 490 | ########################################################################## | ||
3041 | 491 | # Description: Query Cluster peer interface to see if peered | ||
3042 | 492 | # Returns: 0 if peered, 1 if not peered | ||
3043 | 493 | ########################################################################## | ||
3044 | 494 | is_peered() { | ||
3045 | 495 | local r_id=$(relation-ids cluster) | ||
3046 | 496 | if [ -n "$r_id" ]; then | ||
3047 | 497 | if [ -n "$(relation-list -r $r_id)" ]; then | ||
3048 | 498 | juju-log "Unit peered" | ||
3049 | 499 | return 0 | ||
3050 | 500 | fi | ||
3051 | 501 | fi | ||
3052 | 502 | juju-log "Unit not peered" | ||
3053 | 503 | return 1 | ||
3054 | 504 | } | ||
3055 | 505 | |||
3056 | 506 | ########################################################################## | ||
3057 | 507 | # Description: Determines whether host is owner of clustered services | ||
3058 | 508 | # Parameters: Name of CRM resource to check ownership of | ||
3059 | 509 | # Returns: 0 if leader, 1 if not leader | ||
3060 | 510 | ########################################################################## | ||
3061 | 511 | is_leader() { | ||
3062 | 512 | hostname=`hostname` | ||
3063 | 513 | if [ -x /usr/sbin/crm ]; then | ||
3064 | 514 | if crm resource show $1 | grep -q $hostname; then | ||
3065 | 515 | juju-log "$hostname is cluster leader." | ||
3066 | 516 | return 0 | ||
3067 | 517 | fi | ||
3068 | 518 | fi | ||
3069 | 519 | juju-log "$hostname is not cluster leader." | ||
3070 | 520 | return 1 | ||
3071 | 521 | } | ||
3072 | 522 | |||
3073 | 523 | ########################################################################## | ||
3074 | 524 | # Description: Determines whether enough data has been provided in | ||
3075 | 525 | # configuration or relation data to configure HTTPS. | ||
3076 | 526 | # Parameters: None | ||
3077 | 527 | # Returns: 0 if HTTPS can be configured, 1 if not. | ||
3078 | 528 | ########################################################################## | ||
3079 | 529 | https() { | ||
3080 | 530 | local r_id="" | ||
3081 | 531 | if [[ -n "$(config-get ssl_cert)" ]] && | ||
3082 | 532 | [[ -n "$(config-get ssl_key)" ]] ; then | ||
3083 | 533 | return 0 | ||
3084 | 534 | fi | ||
3085 | 535 | for r_id in $(relation-ids identity-service) ; do | ||
3086 | 536 | for unit in $(relation-list -r $r_id) ; do | ||
3087 | 537 | if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] && | ||
3088 | 538 | [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] && | ||
3089 | 539 | [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] && | ||
3090 | 540 | [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then | ||
3091 | 541 | return 0 | ||
3092 | 542 | fi | ||
3093 | 543 | done | ||
3094 | 544 | done | ||
3095 | 545 | return 1 | ||
3096 | 546 | } | ||
3097 | 547 | |||
3098 | 548 | ########################################################################## | ||
3099 | 549 | # Description: For a given number of port mappings, configures apache2 | ||
3100 | 550 | # HTTPs local reverse proxying using certficates and keys provided in | ||
3101 | 551 | # either configuration data (preferred) or relation data. Assumes ports | ||
3102 | 552 | # are not in use (calling charm should ensure that). | ||
3103 | 553 | # Parameters: Variable number of proxy port mappings as | ||
3104 | 554 | # $internal:$external. | ||
3105 | 555 | # Returns: 0 if reverse proxy(s) have been configured, 0 if not. | ||
3106 | 556 | ########################################################################## | ||
3107 | 557 | enable_https() { | ||
3108 | 558 | local port_maps="$@" | ||
3109 | 559 | local http_restart="" | ||
3110 | 560 | juju-log "Enabling HTTPS for port mappings: $port_maps." | ||
3111 | 561 | |||
3112 | 562 | # allow overriding of keystone provided certs with those set manually | ||
3113 | 563 | # in config. | ||
3114 | 564 | local cert=$(config-get ssl_cert) | ||
3115 | 565 | local key=$(config-get ssl_key) | ||
3116 | 566 | local ca_cert="" | ||
3117 | 567 | if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then | ||
3118 | 568 | juju-log "Inspecting identity-service relations for SSL certificate." | ||
3119 | 569 | local r_id="" | ||
3120 | 570 | cert="" | ||
3121 | 571 | key="" | ||
3122 | 572 | ca_cert="" | ||
3123 | 573 | for r_id in $(relation-ids identity-service) ; do | ||
3124 | 574 | for unit in $(relation-list -r $r_id) ; do | ||
3125 | 575 | [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)" | ||
3126 | 576 | [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)" | ||
3127 | 577 | [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)" | ||
3128 | 578 | done | ||
3129 | 579 | done | ||
3130 | 580 | [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di) | ||
3131 | 581 | [[ -n "$key" ]] && key=$(echo $key | base64 -di) | ||
3132 | 582 | [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di) | ||
3133 | 583 | else | ||
3134 | 584 | juju-log "Using SSL certificate provided in service config." | ||
3135 | 585 | fi | ||
3136 | 586 | |||
3137 | 587 | [[ -z "$cert" ]] || [[ -z "$key" ]] && | ||
3138 | 588 | juju-log "Expected but could not find SSL certificate data, not "\ | ||
3139 | 589 | "configuring HTTPS!" && return 1 | ||
3140 | 590 | |||
3141 | 591 | apt-get -y install apache2 | ||
3142 | 592 | a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" && | ||
3143 | 593 | http_restart=1 | ||
3144 | 594 | |||
3145 | 595 | mkdir -p /etc/apache2/ssl/$CHARM | ||
3146 | 596 | echo "$cert" >/etc/apache2/ssl/$CHARM/cert | ||
3147 | 597 | echo "$key" >/etc/apache2/ssl/$CHARM/key | ||
3148 | 598 | if [[ -n "$ca_cert" ]] ; then | ||
3149 | 599 | juju-log "Installing Keystone supplied CA cert." | ||
3150 | 600 | echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt | ||
3151 | 601 | update-ca-certificates --fresh | ||
3152 | 602 | |||
3153 | 603 | # XXX TODO: Find a better way of exporting this? | ||
3154 | 604 | if [[ "$CHARM" == "nova-cloud-controller" ]] ; then | ||
3155 | 605 | [[ -e /var/www/keystone_juju_ca_cert.crt ]] && | ||
3156 | 606 | rm -rf /var/www/keystone_juju_ca_cert.crt | ||
3157 | 607 | ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \ | ||
3158 | 608 | /var/www/keystone_juju_ca_cert.crt | ||
3159 | 609 | fi | ||
3160 | 610 | |||
3161 | 611 | fi | ||
3162 | 612 | for port_map in $port_maps ; do | ||
3163 | 613 | local ext_port=$(echo $port_map | cut -d: -f1) | ||
3164 | 614 | local int_port=$(echo $port_map | cut -d: -f2) | ||
3165 | 615 | juju-log "Creating apache2 reverse proxy vhost for $port_map." | ||
3166 | 616 | cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END | ||
3167 | 617 | Listen $ext_port | ||
3168 | 618 | NameVirtualHost *:$ext_port | ||
3169 | 619 | <VirtualHost *:$ext_port> | ||
3170 | 620 | ServerName $(unit-get private-address) | ||
3171 | 621 | SSLEngine on | ||
3172 | 622 | SSLCertificateFile /etc/apache2/ssl/$CHARM/cert | ||
3173 | 623 | SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key | ||
3174 | 624 | ProxyPass / http://localhost:$int_port/ | ||
3175 | 625 | ProxyPassReverse / http://localhost:$int_port/ | ||
3176 | 626 | ProxyPreserveHost on | ||
3177 | 627 | </VirtualHost> | ||
3178 | 628 | <Proxy *> | ||
3179 | 629 | Order deny,allow | ||
3180 | 630 | Allow from all | ||
3181 | 631 | </Proxy> | ||
3182 | 632 | <Location /> | ||
3183 | 633 | Order allow,deny | ||
3184 | 634 | Allow from all | ||
3185 | 635 | </Location> | ||
3186 | 636 | END | ||
3187 | 637 | a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && | ||
3188 | 638 | http_restart=1 | ||
3189 | 639 | done | ||
3190 | 640 | if [[ -n "$http_restart" ]] ; then | ||
3191 | 641 | service apache2 restart | ||
3192 | 642 | fi | ||
3193 | 643 | } | ||
3194 | 644 | |||
3195 | 645 | ########################################################################## | ||
3196 | 646 | # Description: Ensure HTTPS reverse proxying is disabled for given port | ||
3197 | 647 | # mappings. | ||
3198 | 648 | # Parameters: Variable number of proxy port mappings as | ||
3199 | 649 | # $internal:$external. | ||
3200 | 650 | # Returns: 0 if reverse proxy is not active for all portmaps, 1 on error. | ||
3201 | 651 | ########################################################################## | ||
3202 | 652 | disable_https() { | ||
3203 | 653 | local port_maps="$@" | ||
3204 | 654 | local http_restart="" | ||
3205 | 655 | juju-log "Ensuring HTTPS disabled for $port_maps." | ||
3206 | 656 | ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0 | ||
3207 | 657 | for port_map in $port_maps ; do | ||
3208 | 658 | local ext_port=$(echo $port_map | cut -d: -f1) | ||
3209 | 659 | local int_port=$(echo $port_map | cut -d: -f2) | ||
3210 | 660 | if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then | ||
3211 | 661 | juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map." | ||
3212 | 662 | a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && | ||
3213 | 663 | http_restart=1 | ||
3214 | 664 | fi | ||
3215 | 665 | done | ||
3216 | 666 | if [[ -n "$http_restart" ]] ; then | ||
3217 | 667 | service apache2 restart | ||
3218 | 668 | fi | ||
3219 | 669 | } | ||
3220 | 670 | |||
3221 | 671 | |||
3222 | 672 | ########################################################################## | ||
3223 | 673 | # Description: Ensures HTTPS is either enabled or disabled for given port | ||
3224 | 674 | # mapping. | ||
3225 | 675 | # Parameters: Variable number of proxy port mappings as | ||
3226 | 676 | # $internal:$external. | ||
3227 | 677 | # Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not. | ||
3228 | 678 | ########################################################################## | ||
3229 | 679 | setup_https() { | ||
3230 | 680 | # configure https via apache reverse proxying either | ||
3231 | 681 | # using certs provided by config or keystone. | ||
3232 | 682 | [[ -z "$CHARM" ]] && | ||
3233 | 683 | error_out "setup_https(): CHARM not set." | ||
3234 | 684 | if ! https ; then | ||
3235 | 685 | disable_https $@ | ||
3236 | 686 | else | ||
3237 | 687 | enable_https $@ | ||
3238 | 688 | fi | ||
3239 | 689 | } | ||
3240 | 690 | |||
3241 | 691 | ########################################################################## | ||
3242 | 692 | # Description: Determine correct API server listening port based on | ||
3243 | 693 | # existence of HTTPS reverse proxy and/or haproxy. | ||
3244 | 694 | # Paremeters: The standard public port for given service. | ||
3245 | 695 | # Returns: The correct listening port for API service. | ||
3246 | 696 | ########################################################################## | ||
3247 | 697 | determine_api_port() { | ||
3248 | 698 | local public_port="$1" | ||
3249 | 699 | local i=0 | ||
3250 | 700 | ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1] | ||
3251 | 701 | https >/dev/null 2>&1 && i=$[$i + 1] | ||
3252 | 702 | echo $[$public_port - $[$i * 10]] | ||
3253 | 703 | } | ||
3254 | 704 | |||
3255 | 705 | ########################################################################## | ||
3256 | 706 | # Description: Determine correct proxy listening port based on public IP + | ||
3257 | 707 | # existence of HTTPS reverse proxy. | ||
3258 | 708 | # Paremeters: The standard public port for given service. | ||
3259 | 709 | # Returns: The correct listening port for haproxy service public address. | ||
3260 | 710 | ########################################################################## | ||
3261 | 711 | determine_haproxy_port() { | ||
3262 | 712 | local public_port="$1" | ||
3263 | 713 | local i=0 | ||
3264 | 714 | https >/dev/null 2>&1 && i=$[$i + 1] | ||
3265 | 715 | echo $[$public_port - $[$i * 10]] | ||
3266 | 716 | } | ||
3267 | 717 | |||
3268 | 718 | ########################################################################## | ||
3269 | 719 | # Description: Print the value for a given config option in an OpenStack | ||
3270 | 720 | # .ini style configuration file. | ||
3271 | 721 | # Parameters: File path, option to retrieve, optional | ||
3272 | 722 | # section name (default=DEFAULT) | ||
3273 | 723 | # Returns: Prints value if set, prints nothing otherwise. | ||
3274 | 724 | ########################################################################## | ||
3275 | 725 | local_config_get() { | ||
3276 | 726 | # return config values set in openstack .ini config files. | ||
3277 | 727 | # default placeholders starting (eg, %AUTH_HOST%) treated as | ||
3278 | 728 | # unset values. | ||
3279 | 729 | local file="$1" | ||
3280 | 730 | local option="$2" | ||
3281 | 731 | local section="$3" | ||
3282 | 732 | [[ -z "$section" ]] && section="DEFAULT" | ||
3283 | 733 | python -c " | ||
3284 | 734 | import ConfigParser | ||
3285 | 735 | config = ConfigParser.RawConfigParser() | ||
3286 | 736 | config.read('$file') | ||
3287 | 737 | try: | ||
3288 | 738 | value = config.get('$section', '$option') | ||
3289 | 739 | except: | ||
3290 | 740 | print '' | ||
3291 | 741 | exit(0) | ||
3292 | 742 | if value.startswith('%'): exit(0) | ||
3293 | 743 | print value | ||
3294 | 744 | " | ||
3295 | 745 | } | ||
3296 | 746 | |||
3297 | 747 | ########################################################################## | ||
3298 | 748 | # Description: Creates an rc file exporting environment variables to a | ||
3299 | 749 | # script_path local to the charm's installed directory. | ||
3300 | 750 | # Any charm scripts run outside the juju hook environment can source this | ||
3301 | 751 | # scriptrc to obtain updated config information necessary to perform health | ||
3302 | 752 | # checks or service changes | ||
3303 | 753 | # | ||
3304 | 754 | # Parameters: | ||
3305 | 755 | # An array of '=' delimited ENV_VAR:value combinations to export. | ||
3306 | 756 | # If optional script_path key is not provided in the array, script_path | ||
3307 | 757 | # defaults to scripts/scriptrc | ||
3308 | 758 | ########################################################################## | ||
3309 | 759 | function save_script_rc { | ||
3310 | 760 | if [ ! -n "$JUJU_UNIT_NAME" ]; then | ||
3311 | 761 | echo "Error: Missing JUJU_UNIT_NAME environment variable" | ||
3312 | 762 | exit 1 | ||
3313 | 763 | fi | ||
3314 | 764 | # our default unit_path | ||
3315 | 765 | unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/scripts/scriptrc" | ||
3316 | 766 | echo $unit_path | ||
3317 | 767 | tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc" | ||
3318 | 768 | |||
3319 | 769 | echo "#!/bin/bash" > $tmp_rc | ||
3320 | 770 | for env_var in "${@}" | ||
3321 | 771 | do | ||
3322 | 772 | if `echo $env_var | grep -q script_path`; then | ||
3323 | 773 | # well then we need to reset the new unit-local script path | ||
3324 | 774 | unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/${env_var/script_path=/}" | ||
3325 | 775 | else | ||
3326 | 776 | echo "export $env_var" >> $tmp_rc | ||
3327 | 777 | fi | ||
3328 | 778 | done | ||
3329 | 779 | chmod 755 $tmp_rc | ||
3330 | 780 | mv $tmp_rc $unit_path | ||
3331 | 781 | } | ||
3332 | 782 | 0 | ||
3333 | === removed file 'hooks/charmhelpers/contrib/openstack/openstack_utils.py' | |||
3334 | --- hooks/charmhelpers/contrib/openstack/openstack_utils.py 2013-06-07 09:39:50 +0000 | |||
3335 | +++ hooks/charmhelpers/contrib/openstack/openstack_utils.py 1970-01-01 00:00:00 +0000 | |||
3336 | @@ -1,228 +0,0 @@ | |||
3337 | 1 | #!/usr/bin/python | ||
3338 | 2 | |||
3339 | 3 | # Common python helper functions used for OpenStack charms. | ||
3340 | 4 | |||
3341 | 5 | import apt_pkg as apt | ||
3342 | 6 | import subprocess | ||
3343 | 7 | import os | ||
3344 | 8 | import sys | ||
3345 | 9 | |||
3346 | 10 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" | ||
3347 | 11 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' | ||
3348 | 12 | |||
3349 | 13 | ubuntu_openstack_release = { | ||
3350 | 14 | 'oneiric': 'diablo', | ||
3351 | 15 | 'precise': 'essex', | ||
3352 | 16 | 'quantal': 'folsom', | ||
3353 | 17 | 'raring': 'grizzly', | ||
3354 | 18 | } | ||
3355 | 19 | |||
3356 | 20 | |||
3357 | 21 | openstack_codenames = { | ||
3358 | 22 | '2011.2': 'diablo', | ||
3359 | 23 | '2012.1': 'essex', | ||
3360 | 24 | '2012.2': 'folsom', | ||
3361 | 25 | '2013.1': 'grizzly', | ||
3362 | 26 | '2013.2': 'havana', | ||
3363 | 27 | } | ||
3364 | 28 | |||
3365 | 29 | # The ugly duckling | ||
3366 | 30 | swift_codenames = { | ||
3367 | 31 | '1.4.3': 'diablo', | ||
3368 | 32 | '1.4.8': 'essex', | ||
3369 | 33 | '1.7.4': 'folsom', | ||
3370 | 34 | '1.7.6': 'grizzly', | ||
3371 | 35 | '1.7.7': 'grizzly', | ||
3372 | 36 | '1.8.0': 'grizzly', | ||
3373 | 37 | } | ||
3374 | 38 | |||
3375 | 39 | |||
3376 | 40 | def juju_log(msg): | ||
3377 | 41 | subprocess.check_call(['juju-log', msg]) | ||
3378 | 42 | |||
3379 | 43 | |||
3380 | 44 | def error_out(msg): | ||
3381 | 45 | juju_log("FATAL ERROR: %s" % msg) | ||
3382 | 46 | sys.exit(1) | ||
3383 | 47 | |||
3384 | 48 | |||
3385 | 49 | def lsb_release(): | ||
3386 | 50 | '''Return /etc/lsb-release in a dict''' | ||
3387 | 51 | lsb = open('/etc/lsb-release', 'r') | ||
3388 | 52 | d = {} | ||
3389 | 53 | for l in lsb: | ||
3390 | 54 | k, v = l.split('=') | ||
3391 | 55 | d[k.strip()] = v.strip() | ||
3392 | 56 | return d | ||
3393 | 57 | |||
3394 | 58 | |||
3395 | 59 | def get_os_codename_install_source(src): | ||
3396 | 60 | '''Derive OpenStack release codename from a given installation source.''' | ||
3397 | 61 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3398 | 62 | |||
3399 | 63 | rel = '' | ||
3400 | 64 | if src == 'distro': | ||
3401 | 65 | try: | ||
3402 | 66 | rel = ubuntu_openstack_release[ubuntu_rel] | ||
3403 | 67 | except KeyError: | ||
3404 | 68 | e = 'Could not derive openstack release for '\ | ||
3405 | 69 | 'this Ubuntu release: %s' % ubuntu_rel | ||
3406 | 70 | error_out(e) | ||
3407 | 71 | return rel | ||
3408 | 72 | |||
3409 | 73 | if src.startswith('cloud:'): | ||
3410 | 74 | ca_rel = src.split(':')[1] | ||
3411 | 75 | ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] | ||
3412 | 76 | return ca_rel | ||
3413 | 77 | |||
3414 | 78 | # Best guess match based on deb string provided | ||
3415 | 79 | if src.startswith('deb') or src.startswith('ppa'): | ||
3416 | 80 | for k, v in openstack_codenames.iteritems(): | ||
3417 | 81 | if v in src: | ||
3418 | 82 | return v | ||
3419 | 83 | |||
3420 | 84 | |||
3421 | 85 | def get_os_codename_version(vers): | ||
3422 | 86 | '''Determine OpenStack codename from version number.''' | ||
3423 | 87 | try: | ||
3424 | 88 | return openstack_codenames[vers] | ||
3425 | 89 | except KeyError: | ||
3426 | 90 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3427 | 91 | error_out(e) | ||
3428 | 92 | |||
3429 | 93 | |||
3430 | 94 | def get_os_version_codename(codename): | ||
3431 | 95 | '''Determine OpenStack version number from codename.''' | ||
3432 | 96 | for k, v in openstack_codenames.iteritems(): | ||
3433 | 97 | if v == codename: | ||
3434 | 98 | return k | ||
3435 | 99 | e = 'Could not derive OpenStack version for '\ | ||
3436 | 100 | 'codename: %s' % codename | ||
3437 | 101 | error_out(e) | ||
3438 | 102 | |||
3439 | 103 | |||
3440 | 104 | def get_os_codename_package(pkg): | ||
3441 | 105 | '''Derive OpenStack release codename from an installed package.''' | ||
3442 | 106 | apt.init() | ||
3443 | 107 | cache = apt.Cache() | ||
3444 | 108 | |||
3445 | 109 | try: | ||
3446 | 110 | pkg = cache[pkg] | ||
3447 | 111 | except: | ||
3448 | 112 | e = 'Could not determine version of installed package: %s' % pkg | ||
3449 | 113 | error_out(e) | ||
3450 | 114 | |||
3451 | 115 | vers = apt.UpstreamVersion(pkg.current_ver.ver_str) | ||
3452 | 116 | |||
3453 | 117 | try: | ||
3454 | 118 | if 'swift' in pkg.name: | ||
3455 | 119 | vers = vers[:5] | ||
3456 | 120 | return swift_codenames[vers] | ||
3457 | 121 | else: | ||
3458 | 122 | vers = vers[:6] | ||
3459 | 123 | return openstack_codenames[vers] | ||
3460 | 124 | except KeyError: | ||
3461 | 125 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
3462 | 126 | error_out(e) | ||
3463 | 127 | |||
3464 | 128 | |||
3465 | 129 | def get_os_version_package(pkg): | ||
3466 | 130 | '''Derive OpenStack version number from an installed package.''' | ||
3467 | 131 | codename = get_os_codename_package(pkg) | ||
3468 | 132 | |||
3469 | 133 | if 'swift' in pkg: | ||
3470 | 134 | vers_map = swift_codenames | ||
3471 | 135 | else: | ||
3472 | 136 | vers_map = openstack_codenames | ||
3473 | 137 | |||
3474 | 138 | for version, cname in vers_map.iteritems(): | ||
3475 | 139 | if cname == codename: | ||
3476 | 140 | return version | ||
3477 | 141 | #e = "Could not determine OpenStack version for package: %s" % pkg | ||
3478 | 142 | #error_out(e) | ||
3479 | 143 | |||
3480 | 144 | def import_key(keyid): | ||
3481 | 145 | cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ | ||
3482 | 146 | "--recv-keys %s" % keyid | ||
3483 | 147 | try: | ||
3484 | 148 | subprocess.check_call(cmd.split(' ')) | ||
3485 | 149 | except subprocess.CalledProcessError: | ||
3486 | 150 | error_out("Error importing repo key %s" % keyid) | ||
3487 | 151 | |||
3488 | 152 | def configure_installation_source(rel): | ||
3489 | 153 | '''Configure apt installation source.''' | ||
3490 | 154 | if rel == 'distro': | ||
3491 | 155 | return | ||
3492 | 156 | elif rel[:4] == "ppa:": | ||
3493 | 157 | src = rel | ||
3494 | 158 | subprocess.check_call(["add-apt-repository", "-y", src]) | ||
3495 | 159 | elif rel[:3] == "deb": | ||
3496 | 160 | l = len(rel.split('|')) | ||
3497 | 161 | if l == 2: | ||
3498 | 162 | src, key = rel.split('|') | ||
3499 | 163 | juju_log("Importing PPA key from keyserver for %s" % src) | ||
3500 | 164 | import_key(key) | ||
3501 | 165 | elif l == 1: | ||
3502 | 166 | src = rel | ||
3503 | 167 | with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: | ||
3504 | 168 | f.write(src) | ||
3505 | 169 | elif rel[:6] == 'cloud:': | ||
3506 | 170 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | ||
3507 | 171 | rel = rel.split(':')[1] | ||
3508 | 172 | u_rel = rel.split('-')[0] | ||
3509 | 173 | ca_rel = rel.split('-')[1] | ||
3510 | 174 | |||
3511 | 175 | if u_rel != ubuntu_rel: | ||
3512 | 176 | e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ | ||
3513 | 177 | 'version (%s)' % (ca_rel, ubuntu_rel) | ||
3514 | 178 | error_out(e) | ||
3515 | 179 | |||
3516 | 180 | if 'staging' in ca_rel: | ||
3517 | 181 | # staging is just a regular PPA. | ||
3518 | 182 | os_rel = ca_rel.split('/')[0] | ||
3519 | 183 | ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel | ||
3520 | 184 | cmd = 'add-apt-repository -y %s' % ppa | ||
3521 | 185 | subprocess.check_call(cmd.split(' ')) | ||
3522 | 186 | return | ||
3523 | 187 | |||
3524 | 188 | # map charm config options to actual archive pockets. | ||
3525 | 189 | pockets = { | ||
3526 | 190 | 'folsom': 'precise-updates/folsom', | ||
3527 | 191 | 'folsom/updates': 'precise-updates/folsom', | ||
3528 | 192 | 'folsom/proposed': 'precise-proposed/folsom', | ||
3529 | 193 | 'grizzly': 'precise-updates/grizzly', | ||
3530 | 194 | 'grizzly/updates': 'precise-updates/grizzly', | ||
3531 | 195 | 'grizzly/proposed': 'precise-proposed/grizzly' | ||
3532 | 196 | } | ||
3533 | 197 | |||
3534 | 198 | try: | ||
3535 | 199 | pocket = pockets[ca_rel] | ||
3536 | 200 | except KeyError: | ||
3537 | 201 | e = 'Invalid Cloud Archive release specified: %s' % rel | ||
3538 | 202 | error_out(e) | ||
3539 | 203 | |||
3540 | 204 | src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) | ||
3541 | 205 | # TODO: Replace key import with cloud archive keyring pkg. | ||
3542 | 206 | import_key(CLOUD_ARCHIVE_KEY_ID) | ||
3543 | 207 | |||
3544 | 208 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: | ||
3545 | 209 | f.write(src) | ||
3546 | 210 | else: | ||
3547 | 211 | error_out("Invalid openstack-release specified: %s" % rel) | ||
3548 | 212 | |||
3549 | 213 | |||
3550 | 214 | def save_script_rc(script_path="scripts/scriptrc", **env_vars): | ||
3551 | 215 | """ | ||
3552 | 216 | Write an rc file in the charm-delivered directory containing | ||
3553 | 217 | exported environment variables provided by env_vars. Any charm scripts run | ||
3554 | 218 | outside the juju hook environment can source this scriptrc to obtain | ||
3555 | 219 | updated config information necessary to perform health checks or | ||
3556 | 220 | service changes. | ||
3557 | 221 | """ | ||
3558 | 222 | unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') | ||
3559 | 223 | juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) | ||
3560 | 224 | with open(juju_rc_path, 'wb') as rc_script: | ||
3561 | 225 | rc_script.write( | ||
3562 | 226 | "#!/bin/bash\n") | ||
3563 | 227 | [rc_script.write('export %s=%s\n' % (u, p)) | ||
3564 | 228 | for u, p in env_vars.iteritems() if u != "script_path"] | ||
3565 | 229 | 0 | ||
3566 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
3567 | --- hooks/charmhelpers/core/hookenv.py 2013-06-07 09:39:50 +0000 | |||
3568 | +++ hooks/charmhelpers/core/hookenv.py 2013-11-05 18:43:49 +0000 | |||
3569 | @@ -9,6 +9,7 @@ | |||
3570 | 9 | import yaml | 9 | import yaml |
3571 | 10 | import subprocess | 10 | import subprocess |
3572 | 11 | import UserDict | 11 | import UserDict |
3573 | 12 | from subprocess import CalledProcessError | ||
3574 | 12 | 13 | ||
3575 | 13 | CRITICAL = "CRITICAL" | 14 | CRITICAL = "CRITICAL" |
3576 | 14 | ERROR = "ERROR" | 15 | ERROR = "ERROR" |
3577 | @@ -17,9 +18,47 @@ | |||
3578 | 17 | DEBUG = "DEBUG" | 18 | DEBUG = "DEBUG" |
3579 | 18 | MARKER = object() | 19 | MARKER = object() |
3580 | 19 | 20 | ||
3581 | 21 | cache = {} | ||
3582 | 22 | |||
3583 | 23 | |||
3584 | 24 | def cached(func): | ||
3585 | 25 | """Cache return values for multiple executions of func + args | ||
3586 | 26 | |||
3587 | 27 | For example: | ||
3588 | 28 | |||
3589 | 29 | @cached | ||
3590 | 30 | def unit_get(attribute): | ||
3591 | 31 | pass | ||
3592 | 32 | |||
3593 | 33 | unit_get('test') | ||
3594 | 34 | |||
3595 | 35 | will cache the result of unit_get + 'test' for future calls. | ||
3596 | 36 | """ | ||
3597 | 37 | def wrapper(*args, **kwargs): | ||
3598 | 38 | global cache | ||
3599 | 39 | key = str((func, args, kwargs)) | ||
3600 | 40 | try: | ||
3601 | 41 | return cache[key] | ||
3602 | 42 | except KeyError: | ||
3603 | 43 | res = func(*args, **kwargs) | ||
3604 | 44 | cache[key] = res | ||
3605 | 45 | return res | ||
3606 | 46 | return wrapper | ||
3607 | 47 | |||
3608 | 48 | |||
3609 | 49 | def flush(key): | ||
3610 | 50 | """Flushes any entries from function cache where the | ||
3611 | 51 | key is found in the function+args """ | ||
3612 | 52 | flush_list = [] | ||
3613 | 53 | for item in cache: | ||
3614 | 54 | if key in item: | ||
3615 | 55 | flush_list.append(item) | ||
3616 | 56 | for item in flush_list: | ||
3617 | 57 | del cache[item] | ||
3618 | 58 | |||
3619 | 20 | 59 | ||
3620 | 21 | def log(message, level=None): | 60 | def log(message, level=None): |
3622 | 22 | "Write a message to the juju log" | 61 | """Write a message to the juju log""" |
3623 | 23 | command = ['juju-log'] | 62 | command = ['juju-log'] |
3624 | 24 | if level: | 63 | if level: |
3625 | 25 | command += ['-l', level] | 64 | command += ['-l', level] |
3626 | @@ -28,7 +67,7 @@ | |||
3627 | 28 | 67 | ||
3628 | 29 | 68 | ||
3629 | 30 | class Serializable(UserDict.IterableUserDict): | 69 | class Serializable(UserDict.IterableUserDict): |
3631 | 31 | "Wrapper, an object that can be serialized to yaml or json" | 70 | """Wrapper, an object that can be serialized to yaml or json""" |
3632 | 32 | 71 | ||
3633 | 33 | def __init__(self, obj): | 72 | def __init__(self, obj): |
3634 | 34 | # wrap the object | 73 | # wrap the object |
3635 | @@ -49,12 +88,20 @@ | |||
3636 | 49 | except KeyError: | 88 | except KeyError: |
3637 | 50 | raise AttributeError(attr) | 89 | raise AttributeError(attr) |
3638 | 51 | 90 | ||
3639 | 91 | def __getstate__(self): | ||
3640 | 92 | # Pickle as a standard dictionary. | ||
3641 | 93 | return self.data | ||
3642 | 94 | |||
3643 | 95 | def __setstate__(self, state): | ||
3644 | 96 | # Unpickle into our wrapper. | ||
3645 | 97 | self.data = state | ||
3646 | 98 | |||
3647 | 52 | def json(self): | 99 | def json(self): |
3649 | 53 | "Serialize the object to json" | 100 | """Serialize the object to json""" |
3650 | 54 | return json.dumps(self.data) | 101 | return json.dumps(self.data) |
3651 | 55 | 102 | ||
3652 | 56 | def yaml(self): | 103 | def yaml(self): |
3654 | 57 | "Serialize the object to yaml" | 104 | """Serialize the object to yaml""" |
3655 | 58 | return yaml.dump(self.data) | 105 | return yaml.dump(self.data) |
3656 | 59 | 106 | ||
3657 | 60 | 107 | ||
3658 | @@ -62,55 +109,62 @@ | |||
3659 | 62 | """A convenient bundling of the current execution context""" | 109 | """A convenient bundling of the current execution context""" |
3660 | 63 | context = {} | 110 | context = {} |
3661 | 64 | context['conf'] = config() | 111 | context['conf'] = config() |
3664 | 65 | context['reltype'] = relation_type() | 112 | if relation_id(): |
3665 | 66 | context['relid'] = relation_id() | 113 | context['reltype'] = relation_type() |
3666 | 114 | context['relid'] = relation_id() | ||
3667 | 115 | context['rel'] = relation_get() | ||
3668 | 67 | context['unit'] = local_unit() | 116 | context['unit'] = local_unit() |
3669 | 68 | context['rels'] = relations() | 117 | context['rels'] = relations() |
3670 | 69 | context['rel'] = relation_get() | ||
3671 | 70 | context['env'] = os.environ | 118 | context['env'] = os.environ |
3672 | 71 | return context | 119 | return context |
3673 | 72 | 120 | ||
3674 | 73 | 121 | ||
3675 | 74 | def in_relation_hook(): | 122 | def in_relation_hook(): |
3677 | 75 | "Determine whether we're running in a relation hook" | 123 | """Determine whether we're running in a relation hook""" |
3678 | 76 | return 'JUJU_RELATION' in os.environ | 124 | return 'JUJU_RELATION' in os.environ |
3679 | 77 | 125 | ||
3680 | 78 | 126 | ||
3681 | 79 | def relation_type(): | 127 | def relation_type(): |
3683 | 80 | "The scope for the current relation hook" | 128 | """The scope for the current relation hook""" |
3684 | 81 | return os.environ.get('JUJU_RELATION', None) | 129 | return os.environ.get('JUJU_RELATION', None) |
3685 | 82 | 130 | ||
3686 | 83 | 131 | ||
3687 | 84 | def relation_id(): | 132 | def relation_id(): |
3689 | 85 | "The relation ID for the current relation hook" | 133 | """The relation ID for the current relation hook""" |
3690 | 86 | return os.environ.get('JUJU_RELATION_ID', None) | 134 | return os.environ.get('JUJU_RELATION_ID', None) |
3691 | 87 | 135 | ||
3692 | 88 | 136 | ||
3693 | 89 | def local_unit(): | 137 | def local_unit(): |
3695 | 90 | "Local unit ID" | 138 | """Local unit ID""" |
3696 | 91 | return os.environ['JUJU_UNIT_NAME'] | 139 | return os.environ['JUJU_UNIT_NAME'] |
3697 | 92 | 140 | ||
3698 | 93 | 141 | ||
3699 | 94 | def remote_unit(): | 142 | def remote_unit(): |
3701 | 95 | "The remote unit for the current relation hook" | 143 | """The remote unit for the current relation hook""" |
3702 | 96 | return os.environ['JUJU_REMOTE_UNIT'] | 144 | return os.environ['JUJU_REMOTE_UNIT'] |
3703 | 97 | 145 | ||
3704 | 98 | 146 | ||
3705 | 147 | def service_name(): | ||
3706 | 148 | """The name service group this unit belongs to""" | ||
3707 | 149 | return local_unit().split('/')[0] | ||
3708 | 150 | |||
3709 | 151 | |||
3710 | 152 | @cached | ||
3711 | 99 | def config(scope=None): | 153 | def config(scope=None): |
3713 | 100 | "Juju charm configuration" | 154 | """Juju charm configuration""" |
3714 | 101 | config_cmd_line = ['config-get'] | 155 | config_cmd_line = ['config-get'] |
3715 | 102 | if scope is not None: | 156 | if scope is not None: |
3716 | 103 | config_cmd_line.append(scope) | 157 | config_cmd_line.append(scope) |
3717 | 104 | config_cmd_line.append('--format=json') | 158 | config_cmd_line.append('--format=json') |
3718 | 105 | try: | 159 | try: |
3726 | 106 | config_data = json.loads(subprocess.check_output(config_cmd_line)) | 160 | return json.loads(subprocess.check_output(config_cmd_line)) |
3727 | 107 | except (ValueError, OSError, subprocess.CalledProcessError) as err: | 161 | except ValueError: |
3728 | 108 | log(str(err), level=ERROR) | 162 | return None |
3729 | 109 | raise | 163 | |
3730 | 110 | return Serializable(config_data) | 164 | |
3731 | 111 | 165 | @cached | |
3725 | 112 | |||
3732 | 113 | def relation_get(attribute=None, unit=None, rid=None): | 166 | def relation_get(attribute=None, unit=None, rid=None): |
3733 | 167 | """Get relation information""" | ||
3734 | 114 | _args = ['relation-get', '--format=json'] | 168 | _args = ['relation-get', '--format=json'] |
3735 | 115 | if rid: | 169 | if rid: |
3736 | 116 | _args.append('-r') | 170 | _args.append('-r') |
3737 | @@ -122,51 +176,63 @@ | |||
3738 | 122 | return json.loads(subprocess.check_output(_args)) | 176 | return json.loads(subprocess.check_output(_args)) |
3739 | 123 | except ValueError: | 177 | except ValueError: |
3740 | 124 | return None | 178 | return None |
3741 | 179 | except CalledProcessError, e: | ||
3742 | 180 | if e.returncode == 2: | ||
3743 | 181 | return None | ||
3744 | 182 | raise | ||
3745 | 125 | 183 | ||
3746 | 126 | 184 | ||
3747 | 127 | def relation_set(relation_id=None, relation_settings={}, **kwargs): | 185 | def relation_set(relation_id=None, relation_settings={}, **kwargs): |
3748 | 186 | """Set relation information for the current unit""" | ||
3749 | 128 | relation_cmd_line = ['relation-set'] | 187 | relation_cmd_line = ['relation-set'] |
3750 | 129 | if relation_id is not None: | 188 | if relation_id is not None: |
3751 | 130 | relation_cmd_line.extend(('-r', relation_id)) | 189 | relation_cmd_line.extend(('-r', relation_id)) |
3756 | 131 | for k, v in relation_settings.items(): | 190 | for k, v in (relation_settings.items() + kwargs.items()): |
3757 | 132 | relation_cmd_line.append('{}={}'.format(k, v)) | 191 | if v is None: |
3758 | 133 | for k, v in kwargs.items(): | 192 | relation_cmd_line.append('{}='.format(k)) |
3759 | 134 | relation_cmd_line.append('{}={}'.format(k, v)) | 193 | else: |
3760 | 194 | relation_cmd_line.append('{}={}'.format(k, v)) | ||
3761 | 135 | subprocess.check_call(relation_cmd_line) | 195 | subprocess.check_call(relation_cmd_line) |
3764 | 136 | 196 | # Flush cache of any relation-gets for local unit | |
3765 | 137 | 197 | flush(local_unit()) | |
3766 | 198 | |||
3767 | 199 | |||
3768 | 200 | @cached | ||
3769 | 138 | def relation_ids(reltype=None): | 201 | def relation_ids(reltype=None): |
3771 | 139 | "A list of relation_ids" | 202 | """A list of relation_ids""" |
3772 | 140 | reltype = reltype or relation_type() | 203 | reltype = reltype or relation_type() |
3773 | 141 | relid_cmd_line = ['relation-ids', '--format=json'] | 204 | relid_cmd_line = ['relation-ids', '--format=json'] |
3774 | 142 | if reltype is not None: | 205 | if reltype is not None: |
3775 | 143 | relid_cmd_line.append(reltype) | 206 | relid_cmd_line.append(reltype) |
3777 | 144 | return json.loads(subprocess.check_output(relid_cmd_line)) | 207 | return json.loads(subprocess.check_output(relid_cmd_line)) or [] |
3778 | 145 | return [] | 208 | return [] |
3779 | 146 | 209 | ||
3780 | 147 | 210 | ||
3781 | 211 | @cached | ||
3782 | 148 | def related_units(relid=None): | 212 | def related_units(relid=None): |
3784 | 149 | "A list of related units" | 213 | """A list of related units""" |
3785 | 150 | relid = relid or relation_id() | 214 | relid = relid or relation_id() |
3786 | 151 | units_cmd_line = ['relation-list', '--format=json'] | 215 | units_cmd_line = ['relation-list', '--format=json'] |
3787 | 152 | if relid is not None: | 216 | if relid is not None: |
3788 | 153 | units_cmd_line.extend(('-r', relid)) | 217 | units_cmd_line.extend(('-r', relid)) |
3792 | 154 | return json.loads(subprocess.check_output(units_cmd_line)) | 218 | return json.loads(subprocess.check_output(units_cmd_line)) or [] |
3793 | 155 | 219 | ||
3794 | 156 | 220 | ||
3795 | 221 | @cached | ||
3796 | 157 | def relation_for_unit(unit=None, rid=None): | 222 | def relation_for_unit(unit=None, rid=None): |
3798 | 158 | "Get the json represenation of a unit's relation" | 223 | """Get the json represenation of a unit's relation""" |
3799 | 159 | unit = unit or remote_unit() | 224 | unit = unit or remote_unit() |
3800 | 160 | relation = relation_get(unit=unit, rid=rid) | 225 | relation = relation_get(unit=unit, rid=rid) |
3801 | 161 | for key in relation: | 226 | for key in relation: |
3802 | 162 | if key.endswith('-list'): | 227 | if key.endswith('-list'): |
3803 | 163 | relation[key] = relation[key].split() | 228 | relation[key] = relation[key].split() |
3804 | 164 | relation['__unit__'] = unit | 229 | relation['__unit__'] = unit |
3808 | 165 | return Serializable(relation) | 230 | return relation |
3809 | 166 | 231 | ||
3810 | 167 | 232 | ||
3811 | 233 | @cached | ||
3812 | 168 | def relations_for_id(relid=None): | 234 | def relations_for_id(relid=None): |
3814 | 169 | "Get relations of a specific relation ID" | 235 | """Get relations of a specific relation ID""" |
3815 | 170 | relation_data = [] | 236 | relation_data = [] |
3816 | 171 | relid = relid or relation_ids() | 237 | relid = relid or relation_ids() |
3817 | 172 | for unit in related_units(relid): | 238 | for unit in related_units(relid): |
3818 | @@ -176,8 +242,9 @@ | |||
3819 | 176 | return relation_data | 242 | return relation_data |
3820 | 177 | 243 | ||
3821 | 178 | 244 | ||
3822 | 245 | @cached | ||
3823 | 179 | def relations_of_type(reltype=None): | 246 | def relations_of_type(reltype=None): |
3825 | 180 | "Get relations of a specific type" | 247 | """Get relations of a specific type""" |
3826 | 181 | relation_data = [] | 248 | relation_data = [] |
3827 | 182 | reltype = reltype or relation_type() | 249 | reltype = reltype or relation_type() |
3828 | 183 | for relid in relation_ids(reltype): | 250 | for relid in relation_ids(reltype): |
3829 | @@ -187,13 +254,14 @@ | |||
3830 | 187 | return relation_data | 254 | return relation_data |
3831 | 188 | 255 | ||
3832 | 189 | 256 | ||
3833 | 257 | @cached | ||
3834 | 190 | def relation_types(): | 258 | def relation_types(): |
3836 | 191 | "Get a list of relation types supported by this charm" | 259 | """Get a list of relation types supported by this charm""" |
3837 | 192 | charmdir = os.environ.get('CHARM_DIR', '') | 260 | charmdir = os.environ.get('CHARM_DIR', '') |
3838 | 193 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) | 261 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) |
3839 | 194 | md = yaml.safe_load(mdf) | 262 | md = yaml.safe_load(mdf) |
3840 | 195 | rel_types = [] | 263 | rel_types = [] |
3842 | 196 | for key in ('provides','requires','peers'): | 264 | for key in ('provides', 'requires', 'peers'): |
3843 | 197 | section = md.get(key) | 265 | section = md.get(key) |
3844 | 198 | if section: | 266 | if section: |
3845 | 199 | rel_types.extend(section.keys()) | 267 | rel_types.extend(section.keys()) |
3846 | @@ -201,12 +269,14 @@ | |||
3847 | 201 | return rel_types | 269 | return rel_types |
3848 | 202 | 270 | ||
3849 | 203 | 271 | ||
3850 | 272 | @cached | ||
3851 | 204 | def relations(): | 273 | def relations(): |
3852 | 274 | """Get a nested dictionary of relation data for all related units""" | ||
3853 | 205 | rels = {} | 275 | rels = {} |
3854 | 206 | for reltype in relation_types(): | 276 | for reltype in relation_types(): |
3855 | 207 | relids = {} | 277 | relids = {} |
3856 | 208 | for relid in relation_ids(reltype): | 278 | for relid in relation_ids(reltype): |
3858 | 209 | units = {} | 279 | units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} |
3859 | 210 | for unit in related_units(relid): | 280 | for unit in related_units(relid): |
3860 | 211 | reldata = relation_get(unit=unit, rid=relid) | 281 | reldata = relation_get(unit=unit, rid=relid) |
3861 | 212 | units[unit] = reldata | 282 | units[unit] = reldata |
3862 | @@ -216,41 +286,70 @@ | |||
3863 | 216 | 286 | ||
3864 | 217 | 287 | ||
3865 | 218 | def open_port(port, protocol="TCP"): | 288 | def open_port(port, protocol="TCP"): |
3867 | 219 | "Open a service network port" | 289 | """Open a service network port""" |
3868 | 220 | _args = ['open-port'] | 290 | _args = ['open-port'] |
3869 | 221 | _args.append('{}/{}'.format(port, protocol)) | 291 | _args.append('{}/{}'.format(port, protocol)) |
3870 | 222 | subprocess.check_call(_args) | 292 | subprocess.check_call(_args) |
3871 | 223 | 293 | ||
3872 | 224 | 294 | ||
3873 | 225 | def close_port(port, protocol="TCP"): | 295 | def close_port(port, protocol="TCP"): |
3875 | 226 | "Close a service network port" | 296 | """Close a service network port""" |
3876 | 227 | _args = ['close-port'] | 297 | _args = ['close-port'] |
3877 | 228 | _args.append('{}/{}'.format(port, protocol)) | 298 | _args.append('{}/{}'.format(port, protocol)) |
3878 | 229 | subprocess.check_call(_args) | 299 | subprocess.check_call(_args) |
3879 | 230 | 300 | ||
3880 | 231 | 301 | ||
3881 | 302 | @cached | ||
3882 | 232 | def unit_get(attribute): | 303 | def unit_get(attribute): |
3885 | 233 | _args = ['unit-get', attribute] | 304 | """Get the unit ID for the remote unit""" |
3886 | 234 | return subprocess.check_output(_args).strip() | 305 | _args = ['unit-get', '--format=json', attribute] |
3887 | 306 | try: | ||
3888 | 307 | return json.loads(subprocess.check_output(_args)) | ||
3889 | 308 | except ValueError: | ||
3890 | 309 | return None | ||
3891 | 235 | 310 | ||
3892 | 236 | 311 | ||
3893 | 237 | def unit_private_ip(): | 312 | def unit_private_ip(): |
3894 | 313 | """Get this unit's private IP address""" | ||
3895 | 238 | return unit_get('private-address') | 314 | return unit_get('private-address') |
3896 | 239 | 315 | ||
3897 | 240 | 316 | ||
3898 | 241 | class UnregisteredHookError(Exception): | 317 | class UnregisteredHookError(Exception): |
3899 | 318 | """Raised when an undefined hook is called""" | ||
3900 | 242 | pass | 319 | pass |
3901 | 243 | 320 | ||
3902 | 244 | 321 | ||
3903 | 245 | class Hooks(object): | 322 | class Hooks(object): |
3904 | 323 | """A convenient handler for hook functions. | ||
3905 | 324 | |||
3906 | 325 | Example: | ||
3907 | 326 | hooks = Hooks() | ||
3908 | 327 | |||
3909 | 328 | # register a hook, taking its name from the function name | ||
3910 | 329 | @hooks.hook() | ||
3911 | 330 | def install(): | ||
3912 | 331 | ... | ||
3913 | 332 | |||
3914 | 333 | # register a hook, providing a custom hook name | ||
3915 | 334 | @hooks.hook("config-changed") | ||
3916 | 335 | def config_changed(): | ||
3917 | 336 | ... | ||
3918 | 337 | |||
3919 | 338 | if __name__ == "__main__": | ||
3920 | 339 | # execute a hook based on the name the program is called by | ||
3921 | 340 | hooks.execute(sys.argv) | ||
3922 | 341 | """ | ||
3923 | 342 | |||
3924 | 246 | def __init__(self): | 343 | def __init__(self): |
3925 | 247 | super(Hooks, self).__init__() | 344 | super(Hooks, self).__init__() |
3926 | 248 | self._hooks = {} | 345 | self._hooks = {} |
3927 | 249 | 346 | ||
3928 | 250 | def register(self, name, function): | 347 | def register(self, name, function): |
3929 | 348 | """Register a hook""" | ||
3930 | 251 | self._hooks[name] = function | 349 | self._hooks[name] = function |
3931 | 252 | 350 | ||
3932 | 253 | def execute(self, args): | 351 | def execute(self, args): |
3933 | 352 | """Execute a registered hook based on args[0]""" | ||
3934 | 254 | hook_name = os.path.basename(args[0]) | 353 | hook_name = os.path.basename(args[0]) |
3935 | 255 | if hook_name in self._hooks: | 354 | if hook_name in self._hooks: |
3936 | 256 | self._hooks[hook_name]() | 355 | self._hooks[hook_name]() |
3937 | @@ -258,10 +357,19 @@ | |||
3938 | 258 | raise UnregisteredHookError(hook_name) | 357 | raise UnregisteredHookError(hook_name) |
3939 | 259 | 358 | ||
3940 | 260 | def hook(self, *hook_names): | 359 | def hook(self, *hook_names): |
3941 | 360 | """Decorator, registering them as hooks""" | ||
3942 | 261 | def wrapper(decorated): | 361 | def wrapper(decorated): |
3943 | 262 | for hook_name in hook_names: | 362 | for hook_name in hook_names: |
3944 | 263 | self.register(hook_name, decorated) | 363 | self.register(hook_name, decorated) |
3945 | 264 | else: | 364 | else: |
3946 | 265 | self.register(decorated.__name__, decorated) | 365 | self.register(decorated.__name__, decorated) |
3947 | 366 | if '_' in decorated.__name__: | ||
3948 | 367 | self.register( | ||
3949 | 368 | decorated.__name__.replace('_', '-'), decorated) | ||
3950 | 266 | return decorated | 369 | return decorated |
3951 | 267 | return wrapper | 370 | return wrapper |
3952 | 371 | |||
3953 | 372 | |||
3954 | 373 | def charm_dir(): | ||
3955 | 374 | """Return the root directory of the current charm""" | ||
3956 | 375 | return os.environ.get('CHARM_DIR') | ||
3957 | 268 | 376 | ||
3958 | === modified file 'hooks/charmhelpers/core/host.py' | |||
3959 | --- hooks/charmhelpers/core/host.py 2013-06-07 09:39:50 +0000 | |||
3960 | +++ hooks/charmhelpers/core/host.py 2013-11-05 18:43:49 +0000 | |||
3961 | @@ -8,46 +8,75 @@ | |||
3962 | 8 | import os | 8 | import os |
3963 | 9 | import pwd | 9 | import pwd |
3964 | 10 | import grp | 10 | import grp |
3965 | 11 | import random | ||
3966 | 12 | import string | ||
3967 | 11 | import subprocess | 13 | import subprocess |
3970 | 12 | 14 | import hashlib | |
3971 | 13 | from hookenv import log, execution_environment | 15 | |
3972 | 16 | from collections import OrderedDict | ||
3973 | 17 | |||
3974 | 18 | from hookenv import log | ||
3975 | 14 | 19 | ||
3976 | 15 | 20 | ||
3977 | 16 | def service_start(service_name): | 21 | def service_start(service_name): |
3979 | 17 | service('start', service_name) | 22 | """Start a system service""" |
3980 | 23 | return service('start', service_name) | ||
3981 | 18 | 24 | ||
3982 | 19 | 25 | ||
3983 | 20 | def service_stop(service_name): | 26 | def service_stop(service_name): |
3985 | 21 | service('stop', service_name) | 27 | """Stop a system service""" |
3986 | 28 | return service('stop', service_name) | ||
3987 | 29 | |||
3988 | 30 | |||
3989 | 31 | def service_restart(service_name): | ||
3990 | 32 | """Restart a system service""" | ||
3991 | 33 | return service('restart', service_name) | ||
3992 | 34 | |||
3993 | 35 | |||
3994 | 36 | def service_reload(service_name, restart_on_failure=False): | ||
3995 | 37 | """Reload a system service, optionally falling back to restart if reload fails""" | ||
3996 | 38 | service_result = service('reload', service_name) | ||
3997 | 39 | if not service_result and restart_on_failure: | ||
3998 | 40 | service_result = service('restart', service_name) | ||
3999 | 41 | return service_result | ||
4000 | 22 | 42 | ||
4001 | 23 | 43 | ||
4002 | 24 | def service(action, service_name): | 44 | def service(action, service_name): |
4017 | 25 | cmd = None | 45 | """Control a system service""" |
4018 | 26 | if os.path.exists(os.path.join('/etc/init', '%s.conf' % service_name)): | 46 | cmd = ['service', service_name, action] |
4019 | 27 | cmd = ['initctl', action, service_name] | 47 | return subprocess.call(cmd) == 0 |
4020 | 28 | elif os.path.exists(os.path.join('/etc/init.d', service_name)): | 48 | |
4021 | 29 | cmd = [os.path.join('/etc/init.d', service_name), action] | 49 | |
4022 | 30 | if cmd: | 50 | def service_running(service): |
4023 | 31 | return_value = subprocess.call(cmd) | 51 | """Determine whether a system service is running""" |
4024 | 32 | return return_value == 0 | 52 | try: |
4025 | 33 | return False | 53 | output = subprocess.check_output(['service', service, 'status']) |
4026 | 34 | 54 | except subprocess.CalledProcessError: | |
4027 | 35 | 55 | return False | |
4028 | 36 | def adduser(username, password, shell='/bin/bash'): | 56 | else: |
4029 | 37 | """Add a user""" | 57 | if ("start/running" in output or "is running" in output): |
4030 | 38 | # TODO: generate a password if none is given | 58 | return True |
4031 | 59 | else: | ||
4032 | 60 | return False | ||
4033 | 61 | |||
4034 | 62 | |||
4035 | 63 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | ||
4036 | 64 | """Add a user to the system""" | ||
4037 | 39 | try: | 65 | try: |
4038 | 40 | user_info = pwd.getpwnam(username) | 66 | user_info = pwd.getpwnam(username) |
4039 | 41 | log('user {0} already exists!'.format(username)) | 67 | log('user {0} already exists!'.format(username)) |
4040 | 42 | except KeyError: | 68 | except KeyError: |
4041 | 43 | log('creating user {0}'.format(username)) | 69 | log('creating user {0}'.format(username)) |
4049 | 44 | cmd = [ | 70 | cmd = ['useradd'] |
4050 | 45 | 'useradd', | 71 | if system_user or password is None: |
4051 | 46 | '--create-home', | 72 | cmd.append('--system') |
4052 | 47 | '--shell', shell, | 73 | else: |
4053 | 48 | '--password', password, | 74 | cmd.extend([ |
4054 | 49 | username | 75 | '--create-home', |
4055 | 50 | ] | 76 | '--shell', shell, |
4056 | 77 | '--password', password, | ||
4057 | 78 | ]) | ||
4058 | 79 | cmd.append(username) | ||
4059 | 51 | subprocess.check_call(cmd) | 80 | subprocess.check_call(cmd) |
4060 | 52 | user_info = pwd.getpwnam(username) | 81 | user_info = pwd.getpwnam(username) |
4061 | 53 | return user_info | 82 | return user_info |
4062 | @@ -66,36 +95,33 @@ | |||
4063 | 66 | 95 | ||
4064 | 67 | def rsync(from_path, to_path, flags='-r', options=None): | 96 | def rsync(from_path, to_path, flags='-r', options=None): |
4065 | 68 | """Replicate the contents of a path""" | 97 | """Replicate the contents of a path""" |
4066 | 69 | context = execution_environment() | ||
4067 | 70 | options = options or ['--delete', '--executability'] | 98 | options = options or ['--delete', '--executability'] |
4068 | 71 | cmd = ['/usr/bin/rsync', flags] | 99 | cmd = ['/usr/bin/rsync', flags] |
4069 | 72 | cmd.extend(options) | 100 | cmd.extend(options) |
4072 | 73 | cmd.append(from_path.format(**context)) | 101 | cmd.append(from_path) |
4073 | 74 | cmd.append(to_path.format(**context)) | 102 | cmd.append(to_path) |
4074 | 75 | log(" ".join(cmd)) | 103 | log(" ".join(cmd)) |
4075 | 76 | return subprocess.check_output(cmd).strip() | 104 | return subprocess.check_output(cmd).strip() |
4076 | 77 | 105 | ||
4077 | 78 | 106 | ||
4078 | 79 | def symlink(source, destination): | 107 | def symlink(source, destination): |
4079 | 80 | """Create a symbolic link""" | 108 | """Create a symbolic link""" |
4080 | 81 | context = execution_environment() | ||
4081 | 82 | log("Symlinking {} as {}".format(source, destination)) | 109 | log("Symlinking {} as {}".format(source, destination)) |
4082 | 83 | cmd = [ | 110 | cmd = [ |
4083 | 84 | 'ln', | 111 | 'ln', |
4084 | 85 | '-sf', | 112 | '-sf', |
4087 | 86 | source.format(**context), | 113 | source, |
4088 | 87 | destination.format(**context) | 114 | destination, |
4089 | 88 | ] | 115 | ] |
4090 | 89 | subprocess.check_call(cmd) | 116 | subprocess.check_call(cmd) |
4091 | 90 | 117 | ||
4092 | 91 | 118 | ||
4093 | 92 | def mkdir(path, owner='root', group='root', perms=0555, force=False): | 119 | def mkdir(path, owner='root', group='root', perms=0555, force=False): |
4094 | 93 | """Create a directory""" | 120 | """Create a directory""" |
4095 | 94 | context = execution_environment() | ||
4096 | 95 | log("Making dir {} {}:{} {:o}".format(path, owner, group, | 121 | log("Making dir {} {}:{} {:o}".format(path, owner, group, |
4097 | 96 | perms)) | 122 | perms)) |
4100 | 97 | uid = pwd.getpwnam(owner.format(**context)).pw_uid | 123 | uid = pwd.getpwnam(owner).pw_uid |
4101 | 98 | gid = grp.getgrnam(group.format(**context)).gr_gid | 124 | gid = grp.getgrnam(group).gr_gid |
4102 | 99 | realpath = os.path.abspath(path) | 125 | realpath = os.path.abspath(path) |
4103 | 100 | if os.path.exists(realpath): | 126 | if os.path.exists(realpath): |
4104 | 101 | if force and not os.path.isdir(realpath): | 127 | if force and not os.path.isdir(realpath): |
4105 | @@ -106,50 +132,19 @@ | |||
4106 | 106 | os.chown(realpath, uid, gid) | 132 | os.chown(realpath, uid, gid) |
4107 | 107 | 133 | ||
4108 | 108 | 134 | ||
4110 | 109 | def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): | 135 | def write_file(path, content, owner='root', group='root', perms=0444): |
4111 | 110 | """Create or overwrite a file with the contents of a string""" | 136 | """Create or overwrite a file with the contents of a string""" |
4119 | 111 | context = execution_environment() | 137 | log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) |
4120 | 112 | context.update(kwargs) | 138 | uid = pwd.getpwnam(owner).pw_uid |
4121 | 113 | log("Writing file {} {}:{} {:o}".format(path, owner, group, | 139 | gid = grp.getgrnam(group).gr_gid |
4122 | 114 | perms)) | 140 | with open(path, 'w') as target: |
4116 | 115 | uid = pwd.getpwnam(owner.format(**context)).pw_uid | ||
4117 | 116 | gid = grp.getgrnam(group.format(**context)).gr_gid | ||
4118 | 117 | with open(path.format(**context), 'w') as target: | ||
4123 | 118 | os.fchown(target.fileno(), uid, gid) | 141 | os.fchown(target.fileno(), uid, gid) |
4124 | 119 | os.fchmod(target.fileno(), perms) | 142 | os.fchmod(target.fileno(), perms) |
4154 | 120 | target.write(fmtstr.format(**context)) | 143 | target.write(content) |
4126 | 121 | |||
4127 | 122 | |||
4128 | 123 | def render_template_file(source, destination, **kwargs): | ||
4129 | 124 | """Create or overwrite a file using a template""" | ||
4130 | 125 | log("Rendering template {} for {}".format(source, | ||
4131 | 126 | destination)) | ||
4132 | 127 | context = execution_environment() | ||
4133 | 128 | with open(source.format(**context), 'r') as template: | ||
4134 | 129 | write_file(destination.format(**context), template.read(), | ||
4135 | 130 | **kwargs) | ||
4136 | 131 | |||
4137 | 132 | |||
4138 | 133 | def apt_install(packages, options=None, fatal=False): | ||
4139 | 134 | """Install one or more packages""" | ||
4140 | 135 | options = options or [] | ||
4141 | 136 | cmd = ['apt-get', '-y'] | ||
4142 | 137 | cmd.extend(options) | ||
4143 | 138 | cmd.append('install') | ||
4144 | 139 | if isinstance(packages, basestring): | ||
4145 | 140 | cmd.append(packages) | ||
4146 | 141 | else: | ||
4147 | 142 | cmd.extend(packages) | ||
4148 | 143 | log("Installing {} with options: {}".format(packages, | ||
4149 | 144 | options)) | ||
4150 | 145 | if fatal: | ||
4151 | 146 | subprocess.check_call(cmd) | ||
4152 | 147 | else: | ||
4153 | 148 | subprocess.call(cmd) | ||
4155 | 149 | 144 | ||
4156 | 150 | 145 | ||
4157 | 151 | def mount(device, mountpoint, options=None, persist=False): | 146 | def mount(device, mountpoint, options=None, persist=False): |
4159 | 152 | '''Mount a filesystem''' | 147 | """Mount a filesystem at a particular mountpoint""" |
4160 | 153 | cmd_args = ['mount'] | 148 | cmd_args = ['mount'] |
4161 | 154 | if options is not None: | 149 | if options is not None: |
4162 | 155 | cmd_args.extend(['-o', options]) | 150 | cmd_args.extend(['-o', options]) |
4163 | @@ -166,7 +161,7 @@ | |||
4164 | 166 | 161 | ||
4165 | 167 | 162 | ||
4166 | 168 | def umount(mountpoint, persist=False): | 163 | def umount(mountpoint, persist=False): |
4168 | 169 | '''Unmount a filesystem''' | 164 | """Unmount a filesystem""" |
4169 | 170 | cmd_args = ['umount', mountpoint] | 165 | cmd_args = ['umount', mountpoint] |
4170 | 171 | try: | 166 | try: |
4171 | 172 | subprocess.check_output(cmd_args) | 167 | subprocess.check_output(cmd_args) |
4172 | @@ -180,9 +175,73 @@ | |||
4173 | 180 | 175 | ||
4174 | 181 | 176 | ||
4175 | 182 | def mounts(): | 177 | def mounts(): |
4177 | 183 | '''List of all mounted volumes as [[mountpoint,device],[...]]''' | 178 | """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" |
4178 | 184 | with open('/proc/mounts') as f: | 179 | with open('/proc/mounts') as f: |
4179 | 185 | # [['/mount/point','/dev/path'],[...]] | 180 | # [['/mount/point','/dev/path'],[...]] |
4180 | 186 | system_mounts = [m[1::-1] for m in [l.strip().split() | 181 | system_mounts = [m[1::-1] for m in [l.strip().split() |
4181 | 187 | for l in f.readlines()]] | 182 | for l in f.readlines()]] |
4182 | 188 | return system_mounts | 183 | return system_mounts |
4183 | 184 | |||
4184 | 185 | |||
4185 | 186 | def file_hash(path): | ||
4186 | 187 | """Generate a md5 hash of the contents of 'path' or None if not found """ | ||
4187 | 188 | if os.path.exists(path): | ||
4188 | 189 | h = hashlib.md5() | ||
4189 | 190 | with open(path, 'r') as source: | ||
4190 | 191 | h.update(source.read()) # IGNORE:E1101 - it does have update | ||
4191 | 192 | return h.hexdigest() | ||
4192 | 193 | else: | ||
4193 | 194 | return None | ||
4194 | 195 | |||
4195 | 196 | |||
4196 | 197 | def restart_on_change(restart_map): | ||
4197 | 198 | """Restart services based on configuration files changing | ||
4198 | 199 | |||
4199 | 200 | This function is used a decorator, for example | ||
4200 | 201 | |||
4201 | 202 | @restart_on_change({ | ||
4202 | 203 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | ||
4203 | 204 | }) | ||
4204 | 205 | def ceph_client_changed(): | ||
4205 | 206 | ... | ||
4206 | 207 | |||
4207 | 208 | In this example, the cinder-api and cinder-volume services | ||
4208 | 209 | would be restarted if /etc/ceph/ceph.conf is changed by the | ||
4209 | 210 | ceph_client_changed function. | ||
4210 | 211 | """ | ||
4211 | 212 | def wrap(f): | ||
4212 | 213 | def wrapped_f(*args): | ||
4213 | 214 | checksums = {} | ||
4214 | 215 | for path in restart_map: | ||
4215 | 216 | checksums[path] = file_hash(path) | ||
4216 | 217 | f(*args) | ||
4217 | 218 | restarts = [] | ||
4218 | 219 | for path in restart_map: | ||
4219 | 220 | if checksums[path] != file_hash(path): | ||
4220 | 221 | restarts += restart_map[path] | ||
4221 | 222 | for service_name in list(OrderedDict.fromkeys(restarts)): | ||
4222 | 223 | service('restart', service_name) | ||
4223 | 224 | return wrapped_f | ||
4224 | 225 | return wrap | ||
4225 | 226 | |||
4226 | 227 | |||
4227 | 228 | def lsb_release(): | ||
4228 | 229 | """Return /etc/lsb-release in a dict""" | ||
4229 | 230 | d = {} | ||
4230 | 231 | with open('/etc/lsb-release', 'r') as lsb: | ||
4231 | 232 | for l in lsb: | ||
4232 | 233 | k, v = l.split('=') | ||
4233 | 234 | d[k.strip()] = v.strip() | ||
4234 | 235 | return d | ||
4235 | 236 | |||
4236 | 237 | |||
4237 | 238 | def pwgen(length=None): | ||
4238 | 239 | """Generate a random pasword.""" | ||
4239 | 240 | if length is None: | ||
4240 | 241 | length = random.choice(range(35, 45)) | ||
4241 | 242 | alphanumeric_chars = [ | ||
4242 | 243 | l for l in (string.letters + string.digits) | ||
4243 | 244 | if l not in 'l0QD1vAEIOUaeiou'] | ||
4244 | 245 | random_chars = [ | ||
4245 | 246 | random.choice(alphanumeric_chars) for _ in range(length)] | ||
4246 | 247 | return(''.join(random_chars)) | ||
4247 | 189 | 248 | ||
4248 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
4249 | --- hooks/charmhelpers/fetch/__init__.py 2013-06-07 09:39:50 +0000 | |||
4250 | +++ hooks/charmhelpers/fetch/__init__.py 2013-11-05 18:43:49 +0000 | |||
4251 | @@ -1,15 +1,116 @@ | |||
4252 | 1 | import importlib | ||
4253 | 1 | from yaml import safe_load | 2 | from yaml import safe_load |
4256 | 2 | from core.hookenv import config_get | 3 | from charmhelpers.core.host import ( |
4257 | 3 | from subprocess import check_call | 4 | lsb_release |
4258 | 5 | ) | ||
4259 | 6 | from urlparse import ( | ||
4260 | 7 | urlparse, | ||
4261 | 8 | urlunparse, | ||
4262 | 9 | ) | ||
4263 | 10 | import subprocess | ||
4264 | 11 | from charmhelpers.core.hookenv import ( | ||
4265 | 12 | config, | ||
4266 | 13 | log, | ||
4267 | 14 | ) | ||
4268 | 15 | import apt_pkg | ||
4269 | 16 | |||
4270 | 17 | CLOUD_ARCHIVE = """# Ubuntu Cloud Archive | ||
4271 | 18 | deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main | ||
4272 | 19 | """ | ||
4273 | 20 | PROPOSED_POCKET = """# Proposed | ||
4274 | 21 | deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted | ||
4275 | 22 | """ | ||
4276 | 23 | |||
4277 | 24 | |||
4278 | 25 | def filter_installed_packages(packages): | ||
4279 | 26 | """Returns a list of packages that require installation""" | ||
4280 | 27 | apt_pkg.init() | ||
4281 | 28 | cache = apt_pkg.Cache() | ||
4282 | 29 | _pkgs = [] | ||
4283 | 30 | for package in packages: | ||
4284 | 31 | try: | ||
4285 | 32 | p = cache[package] | ||
4286 | 33 | p.current_ver or _pkgs.append(package) | ||
4287 | 34 | except KeyError: | ||
4288 | 35 | log('Package {} has no installation candidate.'.format(package), | ||
4289 | 36 | level='WARNING') | ||
4290 | 37 | _pkgs.append(package) | ||
4291 | 38 | return _pkgs | ||
4292 | 39 | |||
4293 | 40 | |||
4294 | 41 | def apt_install(packages, options=None, fatal=False): | ||
4295 | 42 | """Install one or more packages""" | ||
4296 | 43 | options = options or [] | ||
4297 | 44 | cmd = ['apt-get', '-y'] | ||
4298 | 45 | cmd.extend(options) | ||
4299 | 46 | cmd.append('install') | ||
4300 | 47 | if isinstance(packages, basestring): | ||
4301 | 48 | cmd.append(packages) | ||
4302 | 49 | else: | ||
4303 | 50 | cmd.extend(packages) | ||
4304 | 51 | log("Installing {} with options: {}".format(packages, | ||
4305 | 52 | options)) | ||
4306 | 53 | if fatal: | ||
4307 | 54 | subprocess.check_call(cmd) | ||
4308 | 55 | else: | ||
4309 | 56 | subprocess.call(cmd) | ||
4310 | 57 | |||
4311 | 58 | |||
4312 | 59 | def apt_update(fatal=False): | ||
4313 | 60 | """Update local apt cache""" | ||
4314 | 61 | cmd = ['apt-get', 'update'] | ||
4315 | 62 | if fatal: | ||
4316 | 63 | subprocess.check_call(cmd) | ||
4317 | 64 | else: | ||
4318 | 65 | subprocess.call(cmd) | ||
4319 | 66 | |||
4320 | 67 | |||
4321 | 68 | def apt_purge(packages, fatal=False): | ||
4322 | 69 | """Purge one or more packages""" | ||
4323 | 70 | cmd = ['apt-get', '-y', 'purge'] | ||
4324 | 71 | if isinstance(packages, basestring): | ||
4325 | 72 | cmd.append(packages) | ||
4326 | 73 | else: | ||
4327 | 74 | cmd.extend(packages) | ||
4328 | 75 | log("Purging {}".format(packages)) | ||
4329 | 76 | if fatal: | ||
4330 | 77 | subprocess.check_call(cmd) | ||
4331 | 78 | else: | ||
4332 | 79 | subprocess.call(cmd) | ||
4333 | 80 | |||
4334 | 81 | |||
4335 | 82 | def apt_hold(packages, fatal=False): | ||
4336 | 83 | """Hold one or more packages""" | ||
4337 | 84 | cmd = ['apt-mark', 'hold'] | ||
4338 | 85 | if isinstance(packages, basestring): | ||
4339 | 86 | cmd.append(packages) | ||
4340 | 87 | else: | ||
4341 | 88 | cmd.extend(packages) | ||
4342 | 89 | log("Holding {}".format(packages)) | ||
4343 | 90 | if fatal: | ||
4344 | 91 | subprocess.check_call(cmd) | ||
4345 | 92 | else: | ||
4346 | 93 | subprocess.call(cmd) | ||
4347 | 4 | 94 | ||
4348 | 5 | 95 | ||
4349 | 6 | def add_source(source, key=None): | 96 | def add_source(source, key=None): |
4354 | 7 | if ((source.startswith('ppa:') or | 97 | if (source.startswith('ppa:') or |
4355 | 8 | source.startswith('cloud:') or | 98 | source.startswith('http:') or |
4356 | 9 | source.startswith('http:'))): | 99 | source.startswith('deb ') or |
4357 | 10 | check_call('add-apt-repository', source) | 100 | source.startswith('cloud-archive:')): |
4358 | 101 | subprocess.check_call(['add-apt-repository', '--yes', source]) | ||
4359 | 102 | elif source.startswith('cloud:'): | ||
4360 | 103 | apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), | ||
4361 | 104 | fatal=True) | ||
4362 | 105 | pocket = source.split(':')[-1] | ||
4363 | 106 | with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: | ||
4364 | 107 | apt.write(CLOUD_ARCHIVE.format(pocket)) | ||
4365 | 108 | elif source == 'proposed': | ||
4366 | 109 | release = lsb_release()['DISTRIB_CODENAME'] | ||
4367 | 110 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | ||
4368 | 111 | apt.write(PROPOSED_POCKET.format(release)) | ||
4369 | 11 | if key: | 112 | if key: |
4371 | 12 | check_call('apt-key', 'import', key) | 113 | subprocess.check_call(['apt-key', 'import', key]) |
4372 | 13 | 114 | ||
4373 | 14 | 115 | ||
4374 | 15 | class SourceConfigError(Exception): | 116 | class SourceConfigError(Exception): |
4375 | @@ -32,15 +133,96 @@ | |||
4376 | 32 | 133 | ||
4377 | 33 | Note that 'null' (a.k.a. None) should not be quoted. | 134 | Note that 'null' (a.k.a. None) should not be quoted. |
4378 | 34 | """ | 135 | """ |
4382 | 35 | sources = safe_load(config_get(sources_var)) | 136 | sources = safe_load(config(sources_var)) |
4383 | 36 | keys = safe_load(config_get(keys_var)) | 137 | keys = config(keys_var) |
4384 | 37 | if isinstance(sources, basestring) and isinstance(keys, basestring): | 138 | if keys is not None: |
4385 | 139 | keys = safe_load(keys) | ||
4386 | 140 | if isinstance(sources, basestring) and ( | ||
4387 | 141 | keys is None or isinstance(keys, basestring)): | ||
4388 | 38 | add_source(sources, keys) | 142 | add_source(sources, keys) |
4389 | 39 | else: | 143 | else: |
4390 | 40 | if not len(sources) == len(keys): | 144 | if not len(sources) == len(keys): |
4391 | 41 | msg = 'Install sources and keys lists are different lengths' | 145 | msg = 'Install sources and keys lists are different lengths' |
4392 | 42 | raise SourceConfigError(msg) | 146 | raise SourceConfigError(msg) |
4393 | 43 | for src_num in range(len(sources)): | 147 | for src_num in range(len(sources)): |
4395 | 44 | add_source(sources[src_num], sources[src_num]) | 148 | add_source(sources[src_num], keys[src_num]) |
4396 | 45 | if update: | 149 | if update: |
4398 | 46 | check_call(('apt-get', 'update')) | 150 | apt_update(fatal=True) |
4399 | 151 | |||
4400 | 152 | # The order of this list is very important. Handlers should be listed in from | ||
4401 | 153 | # least- to most-specific URL matching. | ||
4402 | 154 | FETCH_HANDLERS = ( | ||
4403 | 155 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | ||
4404 | 156 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | ||
4405 | 157 | ) | ||
4406 | 158 | |||
4407 | 159 | |||
4408 | 160 | class UnhandledSource(Exception): | ||
4409 | 161 | pass | ||
4410 | 162 | |||
4411 | 163 | |||
4412 | 164 | def install_remote(source): | ||
4413 | 165 | """ | ||
4414 | 166 | Install a file tree from a remote source | ||
4415 | 167 | |||
4416 | 168 | The specified source should be a url of the form: | ||
4417 | 169 | scheme://[host]/path[#[option=value][&...]] | ||
4418 | 170 | |||
4419 | 171 | Schemes supported are based on this modules submodules | ||
4420 | 172 | Options supported are submodule-specific""" | ||
4421 | 173 | # We ONLY check for True here because can_handle may return a string | ||
4422 | 174 | # explaining why it can't handle a given source. | ||
4423 | 175 | handlers = [h for h in plugins() if h.can_handle(source) is True] | ||
4424 | 176 | installed_to = None | ||
4425 | 177 | for handler in handlers: | ||
4426 | 178 | try: | ||
4427 | 179 | installed_to = handler.install(source) | ||
4428 | 180 | except UnhandledSource: | ||
4429 | 181 | pass | ||
4430 | 182 | if not installed_to: | ||
4431 | 183 | raise UnhandledSource("No handler found for source {}".format(source)) | ||
4432 | 184 | return installed_to | ||
4433 | 185 | |||
4434 | 186 | |||
4435 | 187 | def install_from_config(config_var_name): | ||
4436 | 188 | charm_config = config() | ||
4437 | 189 | source = charm_config[config_var_name] | ||
4438 | 190 | return install_remote(source) | ||
4439 | 191 | |||
4440 | 192 | |||
4441 | 193 | class BaseFetchHandler(object): | ||
4442 | 194 | """Base class for FetchHandler implementations in fetch plugins""" | ||
4443 | 195 | def can_handle(self, source): | ||
4444 | 196 | """Returns True if the source can be handled. Otherwise returns | ||
4445 | 197 | a string explaining why it cannot""" | ||
4446 | 198 | return "Wrong source type" | ||
4447 | 199 | |||
4448 | 200 | def install(self, source): | ||
4449 | 201 | """Try to download and unpack the source. Return the path to the | ||
4450 | 202 | unpacked files or raise UnhandledSource.""" | ||
4451 | 203 | raise UnhandledSource("Wrong source type {}".format(source)) | ||
4452 | 204 | |||
4453 | 205 | def parse_url(self, url): | ||
4454 | 206 | return urlparse(url) | ||
4455 | 207 | |||
4456 | 208 | def base_url(self, url): | ||
4457 | 209 | """Return url without querystring or fragment""" | ||
4458 | 210 | parts = list(self.parse_url(url)) | ||
4459 | 211 | parts[4:] = ['' for i in parts[4:]] | ||
4460 | 212 | return urlunparse(parts) | ||
4461 | 213 | |||
4462 | 214 | |||
4463 | 215 | def plugins(fetch_handlers=None): | ||
4464 | 216 | if not fetch_handlers: | ||
4465 | 217 | fetch_handlers = FETCH_HANDLERS | ||
4466 | 218 | plugin_list = [] | ||
4467 | 219 | for handler_name in fetch_handlers: | ||
4468 | 220 | package, classname = handler_name.rsplit('.', 1) | ||
4469 | 221 | try: | ||
4470 | 222 | handler_class = getattr(importlib.import_module(package), classname) | ||
4471 | 223 | plugin_list.append(handler_class()) | ||
4472 | 224 | except (ImportError, AttributeError): | ||
4473 | 225 | # Skip missing plugins so that they can be ommitted from | ||
4474 | 226 | # installation if desired | ||
4475 | 227 | log("FetchHandler {} not found, skipping plugin".format(handler_name)) | ||
4476 | 228 | return plugin_list | ||
4477 | 47 | 229 | ||
4478 | === added file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
4479 | --- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000 | |||
4480 | +++ hooks/charmhelpers/fetch/archiveurl.py 2013-11-05 18:43:49 +0000 | |||
4481 | @@ -0,0 +1,48 @@ | |||
4482 | 1 | import os | ||
4483 | 2 | import urllib2 | ||
4484 | 3 | from charmhelpers.fetch import ( | ||
4485 | 4 | BaseFetchHandler, | ||
4486 | 5 | UnhandledSource | ||
4487 | 6 | ) | ||
4488 | 7 | from charmhelpers.payload.archive import ( | ||
4489 | 8 | get_archive_handler, | ||
4490 | 9 | extract, | ||
4491 | 10 | ) | ||
4492 | 11 | from charmhelpers.core.host import mkdir | ||
4493 | 12 | |||
4494 | 13 | |||
4495 | 14 | class ArchiveUrlFetchHandler(BaseFetchHandler): | ||
4496 | 15 | """Handler for archives via generic URLs""" | ||
4497 | 16 | def can_handle(self, source): | ||
4498 | 17 | url_parts = self.parse_url(source) | ||
4499 | 18 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): | ||
4500 | 19 | return "Wrong source type" | ||
4501 | 20 | if get_archive_handler(self.base_url(source)): | ||
4502 | 21 | return True | ||
4503 | 22 | return False | ||
4504 | 23 | |||
4505 | 24 | def download(self, source, dest): | ||
4506 | 25 | # propogate all exceptions | ||
4507 | 26 | # URLError, OSError, etc | ||
4508 | 27 | response = urllib2.urlopen(source) | ||
4509 | 28 | try: | ||
4510 | 29 | with open(dest, 'w') as dest_file: | ||
4511 | 30 | dest_file.write(response.read()) | ||
4512 | 31 | except Exception as e: | ||
4513 | 32 | if os.path.isfile(dest): | ||
4514 | 33 | os.unlink(dest) | ||
4515 | 34 | raise e | ||
4516 | 35 | |||
4517 | 36 | def install(self, source): | ||
4518 | 37 | url_parts = self.parse_url(source) | ||
4519 | 38 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') | ||
4520 | 39 | if not os.path.exists(dest_dir): | ||
4521 | 40 | mkdir(dest_dir, perms=0755) | ||
4522 | 41 | dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) | ||
4523 | 42 | try: | ||
4524 | 43 | self.download(source, dld_file) | ||
4525 | 44 | except urllib2.URLError as e: | ||
4526 | 45 | raise UnhandledSource(e.reason) | ||
4527 | 46 | except OSError as e: | ||
4528 | 47 | raise UnhandledSource(e.strerror) | ||
4529 | 48 | return extract(dld_file) | ||
4530 | 0 | 49 | ||
4531 | === added file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
4532 | --- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000 | |||
4533 | +++ hooks/charmhelpers/fetch/bzrurl.py 2013-11-05 18:43:49 +0000 | |||
4534 | @@ -0,0 +1,49 @@ | |||
4535 | 1 | import os | ||
4536 | 2 | from charmhelpers.fetch import ( | ||
4537 | 3 | BaseFetchHandler, | ||
4538 | 4 | UnhandledSource | ||
4539 | 5 | ) | ||
4540 | 6 | from charmhelpers.core.host import mkdir | ||
4541 | 7 | |||
4542 | 8 | try: | ||
4543 | 9 | from bzrlib.branch import Branch | ||
4544 | 10 | except ImportError: | ||
4545 | 11 | from charmhelpers.fetch import apt_install | ||
4546 | 12 | apt_install("python-bzrlib") | ||
4547 | 13 | from bzrlib.branch import Branch | ||
4548 | 14 | |||
4549 | 15 | class BzrUrlFetchHandler(BaseFetchHandler): | ||
4550 | 16 | """Handler for bazaar branches via generic and lp URLs""" | ||
4551 | 17 | def can_handle(self, source): | ||
4552 | 18 | url_parts = self.parse_url(source) | ||
4553 | 19 | if url_parts.scheme not in ('bzr+ssh', 'lp'): | ||
4554 | 20 | return False | ||
4555 | 21 | else: | ||
4556 | 22 | return True | ||
4557 | 23 | |||
4558 | 24 | def branch(self, source, dest): | ||
4559 | 25 | url_parts = self.parse_url(source) | ||
4560 | 26 | # If we use lp:branchname scheme we need to load plugins | ||
4561 | 27 | if not self.can_handle(source): | ||
4562 | 28 | raise UnhandledSource("Cannot handle {}".format(source)) | ||
4563 | 29 | if url_parts.scheme == "lp": | ||
4564 | 30 | from bzrlib.plugin import load_plugins | ||
4565 | 31 | load_plugins() | ||
4566 | 32 | try: | ||
4567 | 33 | remote_branch = Branch.open(source) | ||
4568 | 34 | remote_branch.bzrdir.sprout(dest).open_branch() | ||
4569 | 35 | except Exception as e: | ||
4570 | 36 | raise e | ||
4571 | 37 | |||
4572 | 38 | def install(self, source): | ||
4573 | 39 | url_parts = self.parse_url(source) | ||
4574 | 40 | branch_name = url_parts.path.strip("/").split("/")[-1] | ||
4575 | 41 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) | ||
4576 | 42 | if not os.path.exists(dest_dir): | ||
4577 | 43 | mkdir(dest_dir, perms=0755) | ||
4578 | 44 | try: | ||
4579 | 45 | self.branch(source, dest_dir) | ||
4580 | 46 | except OSError as e: | ||
4581 | 47 | raise UnhandledSource(e.strerror) | ||
4582 | 48 | return dest_dir | ||
4583 | 49 | |||
4584 | 0 | 50 | ||
4585 | === removed directory 'hooks/charmhelpers/payload' | |||
4586 | === removed file 'hooks/charmhelpers/payload/__init__.py' | |||
4587 | --- hooks/charmhelpers/payload/__init__.py 2013-06-07 09:39:50 +0000 | |||
4588 | +++ hooks/charmhelpers/payload/__init__.py 1970-01-01 00:00:00 +0000 | |||
4589 | @@ -1,1 +0,0 @@ | |||
4590 | 1 | "Tools for working with files injected into a charm just before deployment." | ||
4591 | 2 | 0 | ||
4592 | === removed file 'hooks/charmhelpers/payload/execd.py' | |||
4593 | --- hooks/charmhelpers/payload/execd.py 2013-06-07 09:39:50 +0000 | |||
4594 | +++ hooks/charmhelpers/payload/execd.py 1970-01-01 00:00:00 +0000 | |||
4595 | @@ -1,40 +0,0 @@ | |||
4596 | 1 | #!/usr/bin/env python | ||
4597 | 2 | |||
4598 | 3 | import os | ||
4599 | 4 | import sys | ||
4600 | 5 | import subprocess | ||
4601 | 6 | from charmhelpers.core import hookenv | ||
4602 | 7 | |||
4603 | 8 | |||
4604 | 9 | def default_execd_dir(): | ||
4605 | 10 | return os.path.join(os.environ['CHARM_DIR'],'exec.d') | ||
4606 | 11 | |||
4607 | 12 | |||
4608 | 13 | def execd_module_paths(execd_dir=None): | ||
4609 | 14 | if not execd_dir: | ||
4610 | 15 | execd_dir = default_execd_dir() | ||
4611 | 16 | for subpath in os.listdir(execd_dir): | ||
4612 | 17 | module = os.path.join(execd_dir, subpath) | ||
4613 | 18 | if os.path.isdir(module): | ||
4614 | 19 | yield module | ||
4615 | 20 | |||
4616 | 21 | |||
4617 | 22 | def execd_submodule_paths(submodule, execd_dir=None): | ||
4618 | 23 | for module_path in execd_module_paths(execd_dir): | ||
4619 | 24 | path = os.path.join(module_path, submodule) | ||
4620 | 25 | if os.access(path, os.X_OK) and os.path.isfile(path): | ||
4621 | 26 | yield path | ||
4622 | 27 | |||
4623 | 28 | |||
4624 | 29 | def execd_run(submodule, execd_dir=None, die_on_error=False): | ||
4625 | 30 | for submodule_path in execd_submodule_paths(submodule, execd_dir): | ||
4626 | 31 | try: | ||
4627 | 32 | subprocess.check_call(submodule_path, shell=True) | ||
4628 | 33 | except subprocess.CalledProcessError as e: | ||
4629 | 34 | hookenv.log(e.output) | ||
4630 | 35 | if die_on_error: | ||
4631 | 36 | sys.exit(e.returncode) | ||
4632 | 37 | |||
4633 | 38 | |||
4634 | 39 | def execd_preinstall(execd_dir=None): | ||
4635 | 40 | execd_run(execd_dir, 'charm-pre-install') | ||
4636 | 41 | 0 | ||
4637 | === modified file 'hooks/hooks.py' | |||
4638 | --- hooks/hooks.py 2013-07-03 05:54:19 +0000 | |||
4639 | +++ hooks/hooks.py 2013-11-05 18:43:49 +0000 | |||
4640 | @@ -10,12 +10,15 @@ | |||
4641 | 10 | service_start, | 10 | service_start, |
4642 | 11 | service_stop, | 11 | service_stop, |
4643 | 12 | adduser, | 12 | adduser, |
4644 | 13 | apt_install, | ||
4645 | 14 | log, | 13 | log, |
4646 | 15 | mkdir, | 14 | mkdir, |
4647 | 16 | symlink, | 15 | symlink, |
4648 | 17 | ) | 16 | ) |
4649 | 18 | 17 | ||
4650 | 18 | from charmhelpers.fetch import ( | ||
4651 | 19 | apt_install, | ||
4652 | 20 | ) | ||
4653 | 21 | |||
4654 | 19 | from charmhelpers.core.hookenv import ( | 22 | from charmhelpers.core.hookenv import ( |
4655 | 20 | Hooks, | 23 | Hooks, |
4656 | 21 | relation_get, | 24 | relation_get, |
4657 | @@ -69,7 +72,7 @@ | |||
4658 | 69 | 72 | ||
4659 | 70 | def add_extra_repos(): | 73 | def add_extra_repos(): |
4660 | 71 | extra_repos = config('extra_archives') | 74 | extra_repos = config('extra_archives') |
4662 | 72 | if extra_repos.data: #serialize cannot be cast as boolean | 75 | if extra_repos != None: |
4663 | 73 | repos_added = False | 76 | repos_added = False |
4664 | 74 | extra_repos_added = set() | 77 | extra_repos_added = set() |
4665 | 75 | for repo in extra_repos.split(): | 78 | for repo in extra_repos.split(): |
Thanks for this submission! LGTM!