Merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 into lp:charms/etherpad-lite
- Precise Pangolin (12.04)
- lp1247636-fix-try2
- Merge into trunk
Proposed by
Erik B. Andersen
Status: | Merged |
---|---|
Merged at revision: | 14 |
Proposed branch: | lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 |
Merge into: | lp:charms/etherpad-lite |
Diff against target: |
4665 lines (+588/-3619) 28 files modified
charm-helpers.yaml (+6/-0) hooks/charmhelpers/contrib/charmhelpers/IMPORT (+0/-4) hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-183) hooks/charmhelpers/contrib/charmsupport/IMPORT (+0/-14) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-217) hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156) hooks/charmhelpers/contrib/hahelpers/IMPORT (+0/-7) hooks/charmhelpers/contrib/hahelpers/apache_utils.py (+0/-196) hooks/charmhelpers/contrib/hahelpers/ceph_utils.py (+0/-256) hooks/charmhelpers/contrib/hahelpers/cluster_utils.py (+0/-130) hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py (+0/-55) hooks/charmhelpers/contrib/hahelpers/utils.py (+0/-332) hooks/charmhelpers/contrib/jujugui/IMPORT (+0/-4) hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602) hooks/charmhelpers/contrib/openstack/IMPORT (+0/-9) hooks/charmhelpers/contrib/openstack/nova/essex (+0/-43) hooks/charmhelpers/contrib/openstack/nova/folsom (+0/-81) hooks/charmhelpers/contrib/openstack/nova/nova-common (+0/-147) hooks/charmhelpers/contrib/openstack/openstack-common (+0/-781) hooks/charmhelpers/contrib/openstack/openstack_utils.py (+0/-228) hooks/charmhelpers/core/hookenv.py (+153/-45) hooks/charmhelpers/core/host.py (+133/-74) hooks/charmhelpers/fetch/__init__.py (+194/-12) hooks/charmhelpers/fetch/archiveurl.py (+48/-0) hooks/charmhelpers/fetch/bzrurl.py (+49/-0) hooks/charmhelpers/payload/__init__.py (+0/-1) hooks/charmhelpers/payload/execd.py (+0/-40) hooks/hooks.py (+5/-2) |
To merge this branch: | bzr merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Marco Ceppi (community) | Approve | ||
Review via email: mp+193986@code.launchpad.net |
Commit message
Description of the change
Update of the hooks.py and addition of charm-helpers.yaml to be able to use the charm helpers sync tool.
Then a charm helpers sync, which pulls in a version that should fix LP:1247636
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file 'charm-helpers.yaml' |
2 | --- charm-helpers.yaml 1970-01-01 00:00:00 +0000 |
3 | +++ charm-helpers.yaml 2013-11-05 18:43:49 +0000 |
4 | @@ -0,0 +1,6 @@ |
5 | +destination: "hooks/charmhelpers" |
6 | +branch: "lp:charm-helpers" |
7 | +include: |
8 | + - core |
9 | + - fetch |
10 | + |
11 | |
12 | === removed directory 'hooks/charmhelpers/contrib' |
13 | === removed file 'hooks/charmhelpers/contrib/__init__.py' |
14 | === removed directory 'hooks/charmhelpers/contrib/charmhelpers' |
15 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/IMPORT' |
16 | --- hooks/charmhelpers/contrib/charmhelpers/IMPORT 2013-06-07 09:39:50 +0000 |
17 | +++ hooks/charmhelpers/contrib/charmhelpers/IMPORT 1970-01-01 00:00:00 +0000 |
18 | @@ -1,4 +0,0 @@ |
19 | -Source lp:charm-tools/trunk |
20 | - |
21 | -charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py |
22 | -charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py |
23 | |
24 | === removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py' |
25 | --- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-06-07 09:39:50 +0000 |
26 | +++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 |
27 | @@ -1,183 +0,0 @@ |
28 | -# Copyright 2012 Canonical Ltd. This software is licensed under the |
29 | -# GNU Affero General Public License version 3 (see the file LICENSE). |
30 | - |
31 | -import warnings |
32 | -warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) |
33 | - |
34 | -"""Helper functions for writing Juju charms in Python.""" |
35 | - |
36 | -__metaclass__ = type |
37 | -__all__ = [ |
38 | - #'get_config', # core.hookenv.config() |
39 | - #'log', # core.hookenv.log() |
40 | - #'log_entry', # core.hookenv.log() |
41 | - #'log_exit', # core.hookenv.log() |
42 | - #'relation_get', # core.hookenv.relation_get() |
43 | - #'relation_set', # core.hookenv.relation_set() |
44 | - #'relation_ids', # core.hookenv.relation_ids() |
45 | - #'relation_list', # core.hookenv.relation_units() |
46 | - #'config_get', # core.hookenv.config() |
47 | - #'unit_get', # core.hookenv.unit_get() |
48 | - #'open_port', # core.hookenv.open_port() |
49 | - #'close_port', # core.hookenv.close_port() |
50 | - #'service_control', # core.host.service() |
51 | - 'unit_info', # client-side, NOT IMPLEMENTED |
52 | - 'wait_for_machine', # client-side, NOT IMPLEMENTED |
53 | - 'wait_for_page_contents', # client-side, NOT IMPLEMENTED |
54 | - 'wait_for_relation', # client-side, NOT IMPLEMENTED |
55 | - 'wait_for_unit', # client-side, NOT IMPLEMENTED |
56 | - ] |
57 | - |
58 | -import operator |
59 | -from shelltoolbox import ( |
60 | - command, |
61 | -) |
62 | -import tempfile |
63 | -import time |
64 | -import urllib2 |
65 | -import yaml |
66 | - |
67 | -SLEEP_AMOUNT = 0.1 |
68 | -# We create a juju_status Command here because it makes testing much, |
69 | -# much easier. |
70 | -juju_status = lambda: command('juju')('status') |
71 | - |
72 | -# re-implemented as charmhelpers.fetch.configure_sources() |
73 | -#def configure_source(update=False): |
74 | -# source = config_get('source') |
75 | -# if ((source.startswith('ppa:') or |
76 | -# source.startswith('cloud:') or |
77 | -# source.startswith('http:'))): |
78 | -# run('add-apt-repository', source) |
79 | -# if source.startswith("http:"): |
80 | -# run('apt-key', 'import', config_get('key')) |
81 | -# if update: |
82 | -# run('apt-get', 'update') |
83 | - |
84 | -# DEPRECATED: client-side only |
85 | -def make_charm_config_file(charm_config): |
86 | - charm_config_file = tempfile.NamedTemporaryFile() |
87 | - charm_config_file.write(yaml.dump(charm_config)) |
88 | - charm_config_file.flush() |
89 | - # The NamedTemporaryFile instance is returned instead of just the name |
90 | - # because we want to take advantage of garbage collection-triggered |
91 | - # deletion of the temp file when it goes out of scope in the caller. |
92 | - return charm_config_file |
93 | - |
94 | - |
95 | -# DEPRECATED: client-side only |
96 | -def unit_info(service_name, item_name, data=None, unit=None): |
97 | - if data is None: |
98 | - data = yaml.safe_load(juju_status()) |
99 | - service = data['services'].get(service_name) |
100 | - if service is None: |
101 | - # XXX 2012-02-08 gmb: |
102 | - # This allows us to cope with the race condition that we |
103 | - # have between deploying a service and having it come up in |
104 | - # `juju status`. We could probably do with cleaning it up so |
105 | - # that it fails a bit more noisily after a while. |
106 | - return '' |
107 | - units = service['units'] |
108 | - if unit is not None: |
109 | - item = units[unit][item_name] |
110 | - else: |
111 | - # It might seem odd to sort the units here, but we do it to |
112 | - # ensure that when no unit is specified, the first unit for the |
113 | - # service (or at least the one with the lowest number) is the |
114 | - # one whose data gets returned. |
115 | - sorted_unit_names = sorted(units.keys()) |
116 | - item = units[sorted_unit_names[0]][item_name] |
117 | - return item |
118 | - |
119 | - |
120 | -# DEPRECATED: client-side only |
121 | -def get_machine_data(): |
122 | - return yaml.safe_load(juju_status())['machines'] |
123 | - |
124 | - |
125 | -# DEPRECATED: client-side only |
126 | -def wait_for_machine(num_machines=1, timeout=300): |
127 | - """Wait `timeout` seconds for `num_machines` machines to come up. |
128 | - |
129 | - This wait_for... function can be called by other wait_for functions |
130 | - whose timeouts might be too short in situations where only a bare |
131 | - Juju setup has been bootstrapped. |
132 | - |
133 | - :return: A tuple of (num_machines, time_taken). This is used for |
134 | - testing. |
135 | - """ |
136 | - # You may think this is a hack, and you'd be right. The easiest way |
137 | - # to tell what environment we're working in (LXC vs EC2) is to check |
138 | - # the dns-name of the first machine. If it's localhost we're in LXC |
139 | - # and we can just return here. |
140 | - if get_machine_data()[0]['dns-name'] == 'localhost': |
141 | - return 1, 0 |
142 | - start_time = time.time() |
143 | - while True: |
144 | - # Drop the first machine, since it's the Zookeeper and that's |
145 | - # not a machine that we need to wait for. This will only work |
146 | - # for EC2 environments, which is why we return early above if |
147 | - # we're in LXC. |
148 | - machine_data = get_machine_data() |
149 | - non_zookeeper_machines = [ |
150 | - machine_data[key] for key in machine_data.keys()[1:]] |
151 | - if len(non_zookeeper_machines) >= num_machines: |
152 | - all_machines_running = True |
153 | - for machine in non_zookeeper_machines: |
154 | - if machine.get('instance-state') != 'running': |
155 | - all_machines_running = False |
156 | - break |
157 | - if all_machines_running: |
158 | - break |
159 | - if time.time() - start_time >= timeout: |
160 | - raise RuntimeError('timeout waiting for service to start') |
161 | - time.sleep(SLEEP_AMOUNT) |
162 | - return num_machines, time.time() - start_time |
163 | - |
164 | - |
165 | -# DEPRECATED: client-side only |
166 | -def wait_for_unit(service_name, timeout=480): |
167 | - """Wait `timeout` seconds for a given service name to come up.""" |
168 | - wait_for_machine(num_machines=1) |
169 | - start_time = time.time() |
170 | - while True: |
171 | - state = unit_info(service_name, 'agent-state') |
172 | - if 'error' in state or state == 'started': |
173 | - break |
174 | - if time.time() - start_time >= timeout: |
175 | - raise RuntimeError('timeout waiting for service to start') |
176 | - time.sleep(SLEEP_AMOUNT) |
177 | - if state != 'started': |
178 | - raise RuntimeError('unit did not start, agent-state: ' + state) |
179 | - |
180 | - |
181 | -# DEPRECATED: client-side only |
182 | -def wait_for_relation(service_name, relation_name, timeout=120): |
183 | - """Wait `timeout` seconds for a given relation to come up.""" |
184 | - start_time = time.time() |
185 | - while True: |
186 | - relation = unit_info(service_name, 'relations').get(relation_name) |
187 | - if relation is not None and relation['state'] == 'up': |
188 | - break |
189 | - if time.time() - start_time >= timeout: |
190 | - raise RuntimeError('timeout waiting for relation to be up') |
191 | - time.sleep(SLEEP_AMOUNT) |
192 | - |
193 | - |
194 | -# DEPRECATED: client-side only |
195 | -def wait_for_page_contents(url, contents, timeout=120, validate=None): |
196 | - if validate is None: |
197 | - validate = operator.contains |
198 | - start_time = time.time() |
199 | - while True: |
200 | - try: |
201 | - stream = urllib2.urlopen(url) |
202 | - except (urllib2.HTTPError, urllib2.URLError): |
203 | - pass |
204 | - else: |
205 | - page = stream.read() |
206 | - if validate(page, contents): |
207 | - return page |
208 | - if time.time() - start_time >= timeout: |
209 | - raise RuntimeError('timeout waiting for contents of ' + url) |
210 | - time.sleep(SLEEP_AMOUNT) |
211 | |
212 | === removed directory 'hooks/charmhelpers/contrib/charmsupport' |
213 | === removed file 'hooks/charmhelpers/contrib/charmsupport/IMPORT' |
214 | --- hooks/charmhelpers/contrib/charmsupport/IMPORT 2013-06-07 09:39:50 +0000 |
215 | +++ hooks/charmhelpers/contrib/charmsupport/IMPORT 1970-01-01 00:00:00 +0000 |
216 | @@ -1,14 +0,0 @@ |
217 | -Source: lp:charmsupport/trunk |
218 | - |
219 | -charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py |
220 | -charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py |
221 | -charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py |
222 | -charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py |
223 | -charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py |
224 | - |
225 | -charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py |
226 | -charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py |
227 | -charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py |
228 | -charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py |
229 | - |
230 | -charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport |
231 | |
232 | === removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py' |
233 | === removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' |
234 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-06-07 09:39:50 +0000 |
235 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 |
236 | @@ -1,217 +0,0 @@ |
237 | -"""Compatibility with the nrpe-external-master charm""" |
238 | -# Copyright 2012 Canonical Ltd. |
239 | -# |
240 | -# Authors: |
241 | -# Matthew Wedgwood <matthew.wedgwood@canonical.com> |
242 | - |
243 | -import subprocess |
244 | -import pwd |
245 | -import grp |
246 | -import os |
247 | -import re |
248 | -import shlex |
249 | -import yaml |
250 | - |
251 | -from charmhelpers.core.hookenv import ( |
252 | - config, |
253 | - local_unit, |
254 | - log, |
255 | - relation_ids, |
256 | - relation_set, |
257 | - ) |
258 | -from charmhelpers.core.host import service |
259 | - |
260 | -# This module adds compatibility with the nrpe-external-master and plain nrpe |
261 | -# subordinate charms. To use it in your charm: |
262 | -# |
263 | -# 1. Update metadata.yaml |
264 | -# |
265 | -# provides: |
266 | -# (...) |
267 | -# nrpe-external-master: |
268 | -# interface: nrpe-external-master |
269 | -# scope: container |
270 | -# |
271 | -# and/or |
272 | -# |
273 | -# provides: |
274 | -# (...) |
275 | -# local-monitors: |
276 | -# interface: local-monitors |
277 | -# scope: container |
278 | - |
279 | -# |
280 | -# 2. Add the following to config.yaml |
281 | -# |
282 | -# nagios_context: |
283 | -# default: "juju" |
284 | -# type: string |
285 | -# description: | |
286 | -# Used by the nrpe subordinate charms. |
287 | -# A string that will be prepended to instance name to set the host name |
288 | -# in nagios. So for instance the hostname would be something like: |
289 | -# juju-myservice-0 |
290 | -# If you're running multiple environments with the same services in them |
291 | -# this allows you to differentiate between them. |
292 | -# |
293 | -# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master |
294 | -# |
295 | -# 4. Update your hooks.py with something like this: |
296 | -# |
297 | -# from charmsupport.nrpe import NRPE |
298 | -# (...) |
299 | -# def update_nrpe_config(): |
300 | -# nrpe_compat = NRPE() |
301 | -# nrpe_compat.add_check( |
302 | -# shortname = "myservice", |
303 | -# description = "Check MyService", |
304 | -# check_cmd = "check_http -w 2 -c 10 http://localhost" |
305 | -# ) |
306 | -# nrpe_compat.add_check( |
307 | -# "myservice_other", |
308 | -# "Check for widget failures", |
309 | -# check_cmd = "/srv/myapp/scripts/widget_check" |
310 | -# ) |
311 | -# nrpe_compat.write() |
312 | -# |
313 | -# def config_changed(): |
314 | -# (...) |
315 | -# update_nrpe_config() |
316 | -# |
317 | -# def nrpe_external_master_relation_changed(): |
318 | -# update_nrpe_config() |
319 | -# |
320 | -# def local_monitors_relation_changed(): |
321 | -# update_nrpe_config() |
322 | -# |
323 | -# 5. ln -s hooks.py nrpe-external-master-relation-changed |
324 | -# ln -s hooks.py local-monitors-relation-changed |
325 | - |
326 | - |
327 | -class CheckException(Exception): |
328 | - pass |
329 | - |
330 | - |
331 | -class Check(object): |
332 | - shortname_re = '[A-Za-z0-9-_]+$' |
333 | - service_template = (""" |
334 | -#--------------------------------------------------- |
335 | -# This file is Juju managed |
336 | -#--------------------------------------------------- |
337 | -define service {{ |
338 | - use active-service |
339 | - host_name {nagios_hostname} |
340 | - service_description {nagios_hostname}[{shortname}] """ |
341 | - """{description} |
342 | - check_command check_nrpe!{command} |
343 | - servicegroups {nagios_servicegroup} |
344 | -}} |
345 | -""") |
346 | - |
347 | - def __init__(self, shortname, description, check_cmd): |
348 | - super(Check, self).__init__() |
349 | - # XXX: could be better to calculate this from the service name |
350 | - if not re.match(self.shortname_re, shortname): |
351 | - raise CheckException("shortname must match {}".format( |
352 | - Check.shortname_re)) |
353 | - self.shortname = shortname |
354 | - self.command = "check_{}".format(shortname) |
355 | - # Note: a set of invalid characters is defined by the |
356 | - # Nagios server config |
357 | - # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= |
358 | - self.description = description |
359 | - self.check_cmd = self._locate_cmd(check_cmd) |
360 | - |
361 | - def _locate_cmd(self, check_cmd): |
362 | - search_path = ( |
363 | - '/', |
364 | - os.path.join(os.environ['CHARM_DIR'], |
365 | - 'files/nrpe-external-master'), |
366 | - '/usr/lib/nagios/plugins', |
367 | - ) |
368 | - parts = shlex.split(check_cmd) |
369 | - for path in search_path: |
370 | - if os.path.exists(os.path.join(path, parts[0])): |
371 | - command = os.path.join(path, parts[0]) |
372 | - if len(parts) > 1: |
373 | - command += " " + " ".join(parts[1:]) |
374 | - return command |
375 | - log('Check command not found: {}'.format(parts[0])) |
376 | - return '' |
377 | - |
378 | - def write(self, nagios_context, hostname): |
379 | - nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( |
380 | - self.command) |
381 | - with open(nrpe_check_file, 'w') as nrpe_check_config: |
382 | - nrpe_check_config.write("# check {}\n".format(self.shortname)) |
383 | - nrpe_check_config.write("command[{}]={}\n".format( |
384 | - self.command, self.check_cmd)) |
385 | - |
386 | - if not os.path.exists(NRPE.nagios_exportdir): |
387 | - log('Not writing service config as {} is not accessible'.format( |
388 | - NRPE.nagios_exportdir)) |
389 | - else: |
390 | - self.write_service_config(nagios_context, hostname) |
391 | - |
392 | - def write_service_config(self, nagios_context, hostname): |
393 | - for f in os.listdir(NRPE.nagios_exportdir): |
394 | - if re.search('.*{}.cfg'.format(self.command), f): |
395 | - os.remove(os.path.join(NRPE.nagios_exportdir, f)) |
396 | - |
397 | - templ_vars = { |
398 | - 'nagios_hostname': hostname, |
399 | - 'nagios_servicegroup': nagios_context, |
400 | - 'description': self.description, |
401 | - 'shortname': self.shortname, |
402 | - 'command': self.command, |
403 | - } |
404 | - nrpe_service_text = Check.service_template.format(**templ_vars) |
405 | - nrpe_service_file = '{}/service__{}_{}.cfg'.format( |
406 | - NRPE.nagios_exportdir, hostname, self.command) |
407 | - with open(nrpe_service_file, 'w') as nrpe_service_config: |
408 | - nrpe_service_config.write(str(nrpe_service_text)) |
409 | - |
410 | - def run(self): |
411 | - subprocess.call(self.check_cmd) |
412 | - |
413 | - |
414 | -class NRPE(object): |
415 | - nagios_logdir = '/var/log/nagios' |
416 | - nagios_exportdir = '/var/lib/nagios/export' |
417 | - nrpe_confdir = '/etc/nagios/nrpe.d' |
418 | - |
419 | - def __init__(self): |
420 | - super(NRPE, self).__init__() |
421 | - self.config = config() |
422 | - self.nagios_context = self.config['nagios_context'] |
423 | - self.unit_name = local_unit().replace('/', '-') |
424 | - self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) |
425 | - self.checks = [] |
426 | - |
427 | - def add_check(self, *args, **kwargs): |
428 | - self.checks.append(Check(*args, **kwargs)) |
429 | - |
430 | - def write(self): |
431 | - try: |
432 | - nagios_uid = pwd.getpwnam('nagios').pw_uid |
433 | - nagios_gid = grp.getgrnam('nagios').gr_gid |
434 | - except: |
435 | - log("Nagios user not set up, nrpe checks not updated") |
436 | - return |
437 | - |
438 | - if not os.path.exists(NRPE.nagios_logdir): |
439 | - os.mkdir(NRPE.nagios_logdir) |
440 | - os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) |
441 | - |
442 | - nrpe_monitors = {} |
443 | - monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} |
444 | - for nrpecheck in self.checks: |
445 | - nrpecheck.write(self.nagios_context, self.hostname) |
446 | - nrpe_monitors[nrpecheck.shortname] = { |
447 | - "command": nrpecheck.command, |
448 | - } |
449 | - |
450 | - service('restart', 'nagios-nrpe-server') |
451 | - |
452 | - for rid in relation_ids("local-monitors"): |
453 | - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) |
454 | |
455 | === removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py' |
456 | --- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-06-07 09:39:50 +0000 |
457 | +++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 |
458 | @@ -1,156 +0,0 @@ |
459 | -''' |
460 | -Functions for managing volumes in juju units. One volume is supported per unit. |
461 | -Subordinates may have their own storage, provided it is on its own partition. |
462 | - |
463 | -Configuration stanzas: |
464 | - volume-ephemeral: |
465 | - type: boolean |
466 | - default: true |
467 | - description: > |
468 | - If false, a volume is mounted as sepecified in "volume-map" |
469 | - If true, ephemeral storage will be used, meaning that log data |
470 | - will only exist as long as the machine. YOU HAVE BEEN WARNED. |
471 | - volume-map: |
472 | - type: string |
473 | - default: {} |
474 | - description: > |
475 | - YAML map of units to device names, e.g: |
476 | - "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" |
477 | - Service units will raise a configure-error if volume-ephemeral |
478 | - is 'true' and no volume-map value is set. Use 'juju set' to set a |
479 | - value and 'juju resolved' to complete configuration. |
480 | - |
481 | -Usage: |
482 | - from charmsupport.volumes import configure_volume, VolumeConfigurationError |
483 | - from charmsupport.hookenv import log, ERROR |
484 | - def post_mount_hook(): |
485 | - stop_service('myservice') |
486 | - def post_mount_hook(): |
487 | - start_service('myservice') |
488 | - |
489 | - if __name__ == '__main__': |
490 | - try: |
491 | - configure_volume(before_change=pre_mount_hook, |
492 | - after_change=post_mount_hook) |
493 | - except VolumeConfigurationError: |
494 | - log('Storage could not be configured', ERROR) |
495 | -''' |
496 | - |
497 | -# XXX: Known limitations |
498 | -# - fstab is neither consulted nor updated |
499 | - |
500 | -import os |
501 | -import hookenv |
502 | -import host |
503 | -import yaml |
504 | - |
505 | - |
506 | -MOUNT_BASE = '/srv/juju/volumes' |
507 | - |
508 | - |
509 | -class VolumeConfigurationError(Exception): |
510 | - '''Volume configuration data is missing or invalid''' |
511 | - pass |
512 | - |
513 | - |
514 | -def get_config(): |
515 | - '''Gather and sanity-check volume configuration data''' |
516 | - volume_config = {} |
517 | - config = hookenv.config() |
518 | - |
519 | - errors = False |
520 | - |
521 | - if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): |
522 | - volume_config['ephemeral'] = True |
523 | - else: |
524 | - volume_config['ephemeral'] = False |
525 | - |
526 | - try: |
527 | - volume_map = yaml.safe_load(config.get('volume-map', '{}')) |
528 | - except yaml.YAMLError as e: |
529 | - hookenv.log("Error parsing YAML volume-map: {}".format(e), |
530 | - hookenv.ERROR) |
531 | - errors = True |
532 | - if volume_map is None: |
533 | - # probably an empty string |
534 | - volume_map = {} |
535 | - elif isinstance(volume_map, dict): |
536 | - hookenv.log("Volume-map should be a dictionary, not {}".format( |
537 | - type(volume_map))) |
538 | - errors = True |
539 | - |
540 | - volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) |
541 | - if volume_config['device'] and volume_config['ephemeral']: |
542 | - # asked for ephemeral storage but also defined a volume ID |
543 | - hookenv.log('A volume is defined for this unit, but ephemeral ' |
544 | - 'storage was requested', hookenv.ERROR) |
545 | - errors = True |
546 | - elif not volume_config['device'] and not volume_config['ephemeral']: |
547 | - # asked for permanent storage but did not define volume ID |
548 | - hookenv.log('Ephemeral storage was requested, but there is no volume ' |
549 | - 'defined for this unit.', hookenv.ERROR) |
550 | - errors = True |
551 | - |
552 | - unit_mount_name = hookenv.local_unit().replace('/', '-') |
553 | - volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) |
554 | - |
555 | - if errors: |
556 | - return None |
557 | - return volume_config |
558 | - |
559 | - |
560 | -def mount_volume(config): |
561 | - if os.path.exists(config['mountpoint']): |
562 | - if not os.path.isdir(config['mountpoint']): |
563 | - hookenv.log('Not a directory: {}'.format(config['mountpoint'])) |
564 | - raise VolumeConfigurationError() |
565 | - else: |
566 | - host.mkdir(config['mountpoint']) |
567 | - if os.path.ismount(config['mountpoint']): |
568 | - unmount_volume(config) |
569 | - if not host.mount(config['device'], config['mountpoint'], persist=True): |
570 | - raise VolumeConfigurationError() |
571 | - |
572 | - |
573 | -def unmount_volume(config): |
574 | - if os.path.ismount(config['mountpoint']): |
575 | - if not host.umount(config['mountpoint'], persist=True): |
576 | - raise VolumeConfigurationError() |
577 | - |
578 | - |
579 | -def managed_mounts(): |
580 | - '''List of all mounted managed volumes''' |
581 | - return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) |
582 | - |
583 | - |
584 | -def configure_volume(before_change=lambda: None, after_change=lambda: None): |
585 | - '''Set up storage (or don't) according to the charm's volume configuration. |
586 | - Returns the mount point or "ephemeral". before_change and after_change |
587 | - are optional functions to be called if the volume configuration changes. |
588 | - ''' |
589 | - |
590 | - config = get_config() |
591 | - if not config: |
592 | - hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) |
593 | - raise VolumeConfigurationError() |
594 | - |
595 | - if config['ephemeral']: |
596 | - if os.path.ismount(config['mountpoint']): |
597 | - before_change() |
598 | - unmount_volume(config) |
599 | - after_change() |
600 | - return 'ephemeral' |
601 | - else: |
602 | - # persistent storage |
603 | - if os.path.ismount(config['mountpoint']): |
604 | - mounts = dict(managed_mounts()) |
605 | - if mounts.get(config['mountpoint']) != config['device']: |
606 | - before_change() |
607 | - unmount_volume(config) |
608 | - mount_volume(config) |
609 | - after_change() |
610 | - else: |
611 | - before_change() |
612 | - mount_volume(config) |
613 | - after_change() |
614 | - return config['mountpoint'] |
615 | |
616 | === removed directory 'hooks/charmhelpers/contrib/hahelpers' |
617 | === removed file 'hooks/charmhelpers/contrib/hahelpers/IMPORT' |
618 | --- hooks/charmhelpers/contrib/hahelpers/IMPORT 2013-06-07 09:39:50 +0000 |
619 | +++ hooks/charmhelpers/contrib/hahelpers/IMPORT 1970-01-01 00:00:00 +0000 |
620 | @@ -1,7 +0,0 @@ |
621 | -Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers |
622 | - |
623 | -ha-helpers/lib/apache_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/apache_utils.py |
624 | -ha-helpers/lib/cluster_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/cluster_utils.py |
625 | -ha-helpers/lib/ceph_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/ceph_utils.py |
626 | -ha-helpers/lib/haproxy_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/haproxy_utils.py |
627 | -ha-helpers/lib/utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/utils.py |
628 | |
629 | === removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py' |
630 | === removed file 'hooks/charmhelpers/contrib/hahelpers/apache_utils.py' |
631 | --- hooks/charmhelpers/contrib/hahelpers/apache_utils.py 2013-06-07 09:39:50 +0000 |
632 | +++ hooks/charmhelpers/contrib/hahelpers/apache_utils.py 1970-01-01 00:00:00 +0000 |
633 | @@ -1,196 +0,0 @@ |
634 | -# |
635 | -# Copyright 2012 Canonical Ltd. |
636 | -# |
637 | -# This file is sourced from lp:openstack-charm-helpers |
638 | -# |
639 | -# Authors: |
640 | -# James Page <james.page@ubuntu.com> |
641 | -# Adam Gandelman <adamg@ubuntu.com> |
642 | -# |
643 | - |
644 | -from hahelpers.utils import ( |
645 | - relation_ids, |
646 | - relation_list, |
647 | - relation_get, |
648 | - render_template, |
649 | - juju_log, |
650 | - config_get, |
651 | - install, |
652 | - get_host_ip, |
653 | - restart |
654 | - ) |
655 | -from hahelpers.cluster_utils import https |
656 | - |
657 | -import os |
658 | -import subprocess |
659 | -from base64 import b64decode |
660 | - |
661 | -APACHE_SITE_DIR = "/etc/apache2/sites-available" |
662 | -SITE_TEMPLATE = "apache2_site.tmpl" |
663 | -RELOAD_CHECK = "To activate the new configuration" |
664 | - |
665 | - |
666 | -def get_cert(): |
667 | - cert = config_get('ssl_cert') |
668 | - key = config_get('ssl_key') |
669 | - if not (cert and key): |
670 | - juju_log('INFO', |
671 | - "Inspecting identity-service relations for SSL certificate.") |
672 | - cert = key = None |
673 | - for r_id in relation_ids('identity-service'): |
674 | - for unit in relation_list(r_id): |
675 | - if not cert: |
676 | - cert = relation_get('ssl_cert', |
677 | - rid=r_id, unit=unit) |
678 | - if not key: |
679 | - key = relation_get('ssl_key', |
680 | - rid=r_id, unit=unit) |
681 | - return (cert, key) |
682 | - |
683 | - |
684 | -def get_ca_cert(): |
685 | - ca_cert = None |
686 | - juju_log('INFO', |
687 | - "Inspecting identity-service relations for CA SSL certificate.") |
688 | - for r_id in relation_ids('identity-service'): |
689 | - for unit in relation_list(r_id): |
690 | - if not ca_cert: |
691 | - ca_cert = relation_get('ca_cert', |
692 | - rid=r_id, unit=unit) |
693 | - return ca_cert |
694 | - |
695 | - |
696 | -def install_ca_cert(ca_cert): |
697 | - if ca_cert: |
698 | - with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', |
699 | - 'w') as crt: |
700 | - crt.write(ca_cert) |
701 | - subprocess.check_call(['update-ca-certificates', '--fresh']) |
702 | - |
703 | - |
704 | -def enable_https(port_maps, namespace, cert, key, ca_cert=None): |
705 | - ''' |
706 | - For a given number of port mappings, configures apache2 |
707 | - HTTPs local reverse proxying using certficates and keys provided in |
708 | - either configuration data (preferred) or relation data. Assumes ports |
709 | - are not in use (calling charm should ensure that). |
710 | - |
711 | - port_maps: dict: external to internal port mappings |
712 | - namespace: str: name of charm |
713 | - ''' |
714 | - def _write_if_changed(path, new_content): |
715 | - content = None |
716 | - if os.path.exists(path): |
717 | - with open(path, 'r') as f: |
718 | - content = f.read().strip() |
719 | - if content != new_content: |
720 | - with open(path, 'w') as f: |
721 | - f.write(new_content) |
722 | - return True |
723 | - else: |
724 | - return False |
725 | - |
726 | - juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) |
727 | - http_restart = False |
728 | - |
729 | - if cert: |
730 | - cert = b64decode(cert) |
731 | - if key: |
732 | - key = b64decode(key) |
733 | - if ca_cert: |
734 | - ca_cert = b64decode(ca_cert) |
735 | - |
736 | - if not cert and not key: |
737 | - juju_log('ERROR', |
738 | - "Expected but could not find SSL certificate data, not " |
739 | - "configuring HTTPS!") |
740 | - return False |
741 | - |
742 | - install('apache2') |
743 | - if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', |
744 | - 'proxy', 'proxy_http']): |
745 | - http_restart = True |
746 | - |
747 | - ssl_dir = os.path.join('/etc/apache2/ssl', namespace) |
748 | - if not os.path.exists(ssl_dir): |
749 | - os.makedirs(ssl_dir) |
750 | - |
751 | - if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): |
752 | - http_restart = True |
753 | - if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): |
754 | - http_restart = True |
755 | - os.chmod(os.path.join(ssl_dir, 'key'), 0600) |
756 | - |
757 | - install_ca_cert(ca_cert) |
758 | - |
759 | - sites_dir = '/etc/apache2/sites-available' |
760 | - for ext_port, int_port in port_maps.items(): |
761 | - juju_log('INFO', |
762 | - 'Creating apache2 reverse proxy vhost' |
763 | - ' for {}:{}'.format(ext_port, |
764 | - int_port)) |
765 | - site = "{}_{}".format(namespace, ext_port) |
766 | - site_path = os.path.join(sites_dir, site) |
767 | - with open(site_path, 'w') as fsite: |
768 | - context = { |
769 | - "ext": ext_port, |
770 | - "int": int_port, |
771 | - "namespace": namespace, |
772 | - "private_address": get_host_ip() |
773 | - } |
774 | - fsite.write(render_template(SITE_TEMPLATE, |
775 | - context)) |
776 | - |
777 | - if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): |
778 | - http_restart = True |
779 | - |
780 | - if http_restart: |
781 | - restart('apache2') |
782 | - |
783 | - return True |
784 | - |
785 | - |
786 | -def disable_https(port_maps, namespace): |
787 | - ''' |
788 | - Ensure HTTPS reverse proxying is disables for given port mappings |
789 | - |
790 | - port_maps: dict: of ext -> int port mappings |
791 | - namespace: str: name of chamr |
792 | - ''' |
793 | - juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps)) |
794 | - |
795 | - if (not os.path.exists('/etc/apache2') or |
796 | - not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))): |
797 | - return |
798 | - |
799 | - http_restart = False |
800 | - for ext_port in port_maps.keys(): |
801 | - if os.path.exists(os.path.join(APACHE_SITE_DIR, |
802 | - "{}_{}".format(namespace, |
803 | - ext_port))): |
804 | - juju_log('INFO', |
805 | - "Disabling HTTPS reverse proxy" |
806 | - " for {} {}.".format(namespace, |
807 | - ext_port)) |
808 | - if (RELOAD_CHECK in |
809 | - subprocess.check_output(['a2dissite', |
810 | - '{}_{}'.format(namespace, |
811 | - ext_port)])): |
812 | - http_restart = True |
813 | - |
814 | - if http_restart: |
815 | - restart(['apache2']) |
816 | - |
817 | - |
818 | -def setup_https(port_maps, namespace, cert, key, ca_cert=None): |
819 | - ''' |
820 | - Ensures HTTPS is either enabled or disabled for given port |
821 | - mapping. |
822 | - |
823 | - port_maps: dict: of ext -> int port mappings |
824 | - namespace: str: name of charm |
825 | - ''' |
826 | - if not https: |
827 | - disable_https(port_maps, namespace) |
828 | - else: |
829 | - enable_https(port_maps, namespace, cert, key, ca_cert) |
830 | |
831 | === removed file 'hooks/charmhelpers/contrib/hahelpers/ceph_utils.py' |
832 | --- hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 2013-06-07 09:39:50 +0000 |
833 | +++ hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 1970-01-01 00:00:00 +0000 |
834 | @@ -1,256 +0,0 @@ |
835 | -# |
836 | -# Copyright 2012 Canonical Ltd. |
837 | -# |
838 | -# This file is sourced from lp:openstack-charm-helpers |
839 | -# |
840 | -# Authors: |
841 | -# James Page <james.page@ubuntu.com> |
842 | -# Adam Gandelman <adamg@ubuntu.com> |
843 | -# |
844 | - |
845 | -import commands |
846 | -import subprocess |
847 | -import os |
848 | -import shutil |
849 | -import hahelpers.utils as utils |
850 | - |
851 | -KEYRING = '/etc/ceph/ceph.client.%s.keyring' |
852 | -KEYFILE = '/etc/ceph/ceph.client.%s.key' |
853 | - |
854 | -CEPH_CONF = """[global] |
855 | - auth supported = %(auth)s |
856 | - keyring = %(keyring)s |
857 | - mon host = %(mon_hosts)s |
858 | -""" |
859 | - |
860 | - |
861 | -def execute(cmd): |
862 | - subprocess.check_call(cmd) |
863 | - |
864 | - |
865 | -def execute_shell(cmd): |
866 | - subprocess.check_call(cmd, shell=True) |
867 | - |
868 | - |
869 | -def install(): |
870 | - ceph_dir = "/etc/ceph" |
871 | - if not os.path.isdir(ceph_dir): |
872 | - os.mkdir(ceph_dir) |
873 | - utils.install('ceph-common') |
874 | - |
875 | - |
876 | -def rbd_exists(service, pool, rbd_img): |
877 | - (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\ |
878 | - (service, pool)) |
879 | - return rbd_img in out |
880 | - |
881 | - |
882 | -def create_rbd_image(service, pool, image, sizemb): |
883 | - cmd = [ |
884 | - 'rbd', |
885 | - 'create', |
886 | - image, |
887 | - '--size', |
888 | - str(sizemb), |
889 | - '--id', |
890 | - service, |
891 | - '--pool', |
892 | - pool |
893 | - ] |
894 | - execute(cmd) |
895 | - |
896 | - |
897 | -def pool_exists(service, name): |
898 | - (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) |
899 | - return name in out |
900 | - |
901 | - |
902 | -def create_pool(service, name): |
903 | - cmd = [ |
904 | - 'rados', |
905 | - '--id', |
906 | - service, |
907 | - 'mkpool', |
908 | - name |
909 | - ] |
910 | - execute(cmd) |
911 | - |
912 | - |
913 | -def keyfile_path(service): |
914 | - return KEYFILE % service |
915 | - |
916 | - |
917 | -def keyring_path(service): |
918 | - return KEYRING % service |
919 | - |
920 | - |
921 | -def create_keyring(service, key): |
922 | - keyring = keyring_path(service) |
923 | - if os.path.exists(keyring): |
924 | - utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring) |
925 | - cmd = [ |
926 | - 'ceph-authtool', |
927 | - keyring, |
928 | - '--create-keyring', |
929 | - '--name=client.%s' % service, |
930 | - '--add-key=%s' % key |
931 | - ] |
932 | - execute(cmd) |
933 | - utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring) |
934 | - |
935 | - |
936 | -def create_key_file(service, key): |
937 | - # create a file containing the key |
938 | - keyfile = keyfile_path(service) |
939 | - if os.path.exists(keyfile): |
940 | - utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile) |
941 | - fd = open(keyfile, 'w') |
942 | - fd.write(key) |
943 | - fd.close() |
944 | - utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile) |
945 | - |
946 | - |
947 | -def get_ceph_nodes(): |
948 | - hosts = [] |
949 | - for r_id in utils.relation_ids('ceph'): |
950 | - for unit in utils.relation_list(r_id): |
951 | - hosts.append(utils.relation_get('private-address', |
952 | - unit=unit, rid=r_id)) |
953 | - return hosts |
954 | - |
955 | - |
956 | -def configure(service, key, auth): |
957 | - create_keyring(service, key) |
958 | - create_key_file(service, key) |
959 | - hosts = get_ceph_nodes() |
960 | - mon_hosts = ",".join(map(str, hosts)) |
961 | - keyring = keyring_path(service) |
962 | - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: |
963 | - ceph_conf.write(CEPH_CONF % locals()) |
964 | - modprobe_kernel_module('rbd') |
965 | - |
966 | - |
967 | -def image_mapped(image_name): |
968 | - (rc, out) = commands.getstatusoutput('rbd showmapped') |
969 | - return image_name in out |
970 | - |
971 | - |
972 | -def map_block_storage(service, pool, image): |
973 | - cmd = [ |
974 | - 'rbd', |
975 | - 'map', |
976 | - '%s/%s' % (pool, image), |
977 | - '--user', |
978 | - service, |
979 | - '--secret', |
980 | - keyfile_path(service), |
981 | - ] |
982 | - execute(cmd) |
983 | - |
984 | - |
985 | -def filesystem_mounted(fs): |
986 | - return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0 |
987 | - |
988 | - |
989 | -def make_filesystem(blk_device, fstype='ext4'): |
990 | - utils.juju_log('INFO', |
991 | - 'ceph: Formatting block device %s as filesystem %s.' %\ |
992 | - (blk_device, fstype)) |
993 | - cmd = ['mkfs', '-t', fstype, blk_device] |
994 | - execute(cmd) |
995 | - |
996 | - |
997 | -def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): |
998 | - # mount block device into /mnt |
999 | - cmd = ['mount', '-t', fstype, blk_device, '/mnt'] |
1000 | - execute(cmd) |
1001 | - |
1002 | - # copy data to /mnt |
1003 | - try: |
1004 | - copy_files(data_src_dst, '/mnt') |
1005 | - except: |
1006 | - pass |
1007 | - |
1008 | - # umount block device |
1009 | - cmd = ['umount', '/mnt'] |
1010 | - execute(cmd) |
1011 | - |
1012 | - _dir = os.stat(data_src_dst) |
1013 | - uid = _dir.st_uid |
1014 | - gid = _dir.st_gid |
1015 | - |
1016 | - # re-mount where the data should originally be |
1017 | - cmd = ['mount', '-t', fstype, blk_device, data_src_dst] |
1018 | - execute(cmd) |
1019 | - |
1020 | - # ensure original ownership of new mount. |
1021 | - cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] |
1022 | - execute(cmd) |
1023 | - |
1024 | - |
1025 | -# TODO: re-use |
1026 | -def modprobe_kernel_module(module): |
1027 | - utils.juju_log('INFO', 'Loading kernel module') |
1028 | - cmd = ['modprobe', module] |
1029 | - execute(cmd) |
1030 | - cmd = 'echo %s >> /etc/modules' % module |
1031 | - execute_shell(cmd) |
1032 | - |
1033 | - |
1034 | -def copy_files(src, dst, symlinks=False, ignore=None): |
1035 | - for item in os.listdir(src): |
1036 | - s = os.path.join(src, item) |
1037 | - d = os.path.join(dst, item) |
1038 | - if os.path.isdir(s): |
1039 | - shutil.copytree(s, d, symlinks, ignore) |
1040 | - else: |
1041 | - shutil.copy2(s, d) |
1042 | - |
1043 | - |
1044 | -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, |
1045 | - blk_device, fstype, system_services=[]): |
1046 | - """ |
1047 | - To be called from the current cluster leader. |
1048 | - Ensures given pool and RBD image exists, is mapped to a block device, |
1049 | - and the device is formatted and mounted at the given mount_point. |
1050 | - |
1051 | - If formatting a device for the first time, data existing at mount_point |
1052 | - will be migrated to the RBD device before being remounted. |
1053 | - |
1054 | - All services listed in system_services will be stopped prior to data |
1055 | - migration and restarted when complete. |
1056 | - """ |
1057 | - # Ensure pool, RBD image, RBD mappings are in place. |
1058 | - if not pool_exists(service, pool): |
1059 | - utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool) |
1060 | - create_pool(service, pool) |
1061 | - |
1062 | - if not rbd_exists(service, pool, rbd_img): |
1063 | - utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img) |
1064 | - create_rbd_image(service, pool, rbd_img, sizemb) |
1065 | - |
1066 | - if not image_mapped(rbd_img): |
1067 | - utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.') |
1068 | - map_block_storage(service, pool, rbd_img) |
1069 | - |
1070 | - # make file system |
1071 | - # TODO: What happens if for whatever reason this is run again and |
1072 | - # the data is already in the rbd device and/or is mounted?? |
1073 | - # When it is mounted already, it will fail to make the fs |
1074 | - # XXX: This is really sketchy! Need to at least add an fstab entry |
1075 | - # otherwise this hook will blow away existing data if its executed |
1076 | - # after a reboot. |
1077 | - if not filesystem_mounted(mount_point): |
1078 | - make_filesystem(blk_device, fstype) |
1079 | - |
1080 | - for svc in system_services: |
1081 | - if utils.running(svc): |
1082 | - utils.juju_log('INFO', |
1083 | - 'Stopping services %s prior to migrating '\ |
1084 | - 'data' % svc) |
1085 | - utils.stop(svc) |
1086 | - |
1087 | - place_data_on_ceph(service, blk_device, mount_point, fstype) |
1088 | - |
1089 | - for svc in system_services: |
1090 | - utils.start(svc) |
1091 | |
1092 | === removed file 'hooks/charmhelpers/contrib/hahelpers/cluster_utils.py' |
1093 | --- hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 2013-06-07 09:39:50 +0000 |
1094 | +++ hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 1970-01-01 00:00:00 +0000 |
1095 | @@ -1,130 +0,0 @@ |
1096 | -# |
1097 | -# Copyright 2012 Canonical Ltd. |
1098 | -# |
1099 | -# This file is sourced from lp:openstack-charm-helpers |
1100 | -# |
1101 | -# Authors: |
1102 | -# James Page <james.page@ubuntu.com> |
1103 | -# Adam Gandelman <adamg@ubuntu.com> |
1104 | -# |
1105 | - |
1106 | -from hahelpers.utils import ( |
1107 | - juju_log, |
1108 | - relation_ids, |
1109 | - relation_list, |
1110 | - relation_get, |
1111 | - get_unit_hostname, |
1112 | - config_get |
1113 | - ) |
1114 | -import subprocess |
1115 | -import os |
1116 | - |
1117 | - |
1118 | -def is_clustered(): |
1119 | - for r_id in (relation_ids('ha') or []): |
1120 | - for unit in (relation_list(r_id) or []): |
1121 | - clustered = relation_get('clustered', |
1122 | - rid=r_id, |
1123 | - unit=unit) |
1124 | - if clustered: |
1125 | - return True |
1126 | - return False |
1127 | - |
1128 | - |
1129 | -def is_leader(resource): |
1130 | - cmd = [ |
1131 | - "crm", "resource", |
1132 | - "show", resource |
1133 | - ] |
1134 | - try: |
1135 | - status = subprocess.check_output(cmd) |
1136 | - except subprocess.CalledProcessError: |
1137 | - return False |
1138 | - else: |
1139 | - if get_unit_hostname() in status: |
1140 | - return True |
1141 | - else: |
1142 | - return False |
1143 | - |
1144 | - |
1145 | -def peer_units(): |
1146 | - peers = [] |
1147 | - for r_id in (relation_ids('cluster') or []): |
1148 | - for unit in (relation_list(r_id) or []): |
1149 | - peers.append(unit) |
1150 | - return peers |
1151 | - |
1152 | - |
1153 | -def oldest_peer(peers): |
1154 | - local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
1155 | - for peer in peers: |
1156 | - remote_unit_no = int(peer.split('/')[1]) |
1157 | - if remote_unit_no < local_unit_no: |
1158 | - return False |
1159 | - return True |
1160 | - |
1161 | - |
1162 | -def eligible_leader(resource): |
1163 | - if is_clustered(): |
1164 | - if not is_leader(resource): |
1165 | - juju_log('INFO', 'Deferring action to CRM leader.') |
1166 | - return False |
1167 | - else: |
1168 | - peers = peer_units() |
1169 | - if peers and not oldest_peer(peers): |
1170 | - juju_log('INFO', 'Deferring action to oldest service unit.') |
1171 | - return False |
1172 | - return True |
1173 | - |
1174 | - |
1175 | -def https(): |
1176 | - ''' |
1177 | - Determines whether enough data has been provided in configuration |
1178 | - or relation data to configure HTTPS |
1179 | - . |
1180 | - returns: boolean |
1181 | - ''' |
1182 | - if config_get('use-https') == "yes": |
1183 | - return True |
1184 | - if config_get('ssl_cert') and config_get('ssl_key'): |
1185 | - return True |
1186 | - for r_id in relation_ids('identity-service'): |
1187 | - for unit in relation_list(r_id): |
1188 | - if (relation_get('https_keystone', rid=r_id, unit=unit) and |
1189 | - relation_get('ssl_cert', rid=r_id, unit=unit) and |
1190 | - relation_get('ssl_key', rid=r_id, unit=unit) and |
1191 | - relation_get('ca_cert', rid=r_id, unit=unit)): |
1192 | - return True |
1193 | - return False |
1194 | - |
1195 | - |
1196 | -def determine_api_port(public_port): |
1197 | - ''' |
1198 | - Determine correct API server listening port based on |
1199 | - existence of HTTPS reverse proxy and/or haproxy. |
1200 | - |
1201 | - public_port: int: standard public port for given service |
1202 | - |
1203 | - returns: int: the correct listening port for the API service |
1204 | - ''' |
1205 | - i = 0 |
1206 | - if len(peer_units()) > 0 or is_clustered(): |
1207 | - i += 1 |
1208 | - if https(): |
1209 | - i += 1 |
1210 | - return public_port - (i * 10) |
1211 | - |
1212 | - |
1213 | -def determine_haproxy_port(public_port): |
1214 | - ''' |
1215 | - Description: Determine correct proxy listening port based on public IP + |
1216 | - existence of HTTPS reverse proxy. |
1217 | - |
1218 | - public_port: int: standard public port for given service |
1219 | - |
1220 | - returns: int: the correct listening port for the HAProxy service |
1221 | - ''' |
1222 | - i = 0 |
1223 | - if https(): |
1224 | - i += 1 |
1225 | - return public_port - (i * 10) |
1226 | |
1227 | === removed file 'hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py' |
1228 | --- hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 2013-06-07 09:39:50 +0000 |
1229 | +++ hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 1970-01-01 00:00:00 +0000 |
1230 | @@ -1,55 +0,0 @@ |
1231 | -# |
1232 | -# Copyright 2012 Canonical Ltd. |
1233 | -# |
1234 | -# This file is sourced from lp:openstack-charm-helpers |
1235 | -# |
1236 | -# Authors: |
1237 | -# James Page <james.page@ubuntu.com> |
1238 | -# Adam Gandelman <adamg@ubuntu.com> |
1239 | -# |
1240 | - |
1241 | -from lib.utils import ( |
1242 | - relation_ids, |
1243 | - relation_list, |
1244 | - relation_get, |
1245 | - unit_get, |
1246 | - reload, |
1247 | - render_template |
1248 | - ) |
1249 | -import os |
1250 | - |
1251 | -HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
1252 | -HAPROXY_DEFAULT = '/etc/default/haproxy' |
1253 | - |
1254 | - |
1255 | -def configure_haproxy(service_ports): |
1256 | - ''' |
1257 | - Configure HAProxy based on the current peers in the service |
1258 | - cluster using the provided port map: |
1259 | - |
1260 | - "swift": [ 8080, 8070 ] |
1261 | - |
1262 | - HAproxy will also be reloaded/started if required |
1263 | - |
1264 | - service_ports: dict: dict of lists of [ frontend, backend ] |
1265 | - ''' |
1266 | - cluster_hosts = {} |
1267 | - cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ |
1268 | - unit_get('private-address') |
1269 | - for r_id in relation_ids('cluster'): |
1270 | - for unit in relation_list(r_id): |
1271 | - cluster_hosts[unit.replace('/', '-')] = \ |
1272 | - relation_get(attribute='private-address', |
1273 | - rid=r_id, |
1274 | - unit=unit) |
1275 | - context = { |
1276 | - 'units': cluster_hosts, |
1277 | - 'service_ports': service_ports |
1278 | - } |
1279 | - with open(HAPROXY_CONF, 'w') as f: |
1280 | - f.write(render_template(os.path.basename(HAPROXY_CONF), |
1281 | - context)) |
1282 | - with open(HAPROXY_DEFAULT, 'w') as f: |
1283 | - f.write('ENABLED=1') |
1284 | - |
1285 | - reload('haproxy') |
1286 | |
1287 | === removed file 'hooks/charmhelpers/contrib/hahelpers/utils.py' |
1288 | --- hooks/charmhelpers/contrib/hahelpers/utils.py 2013-06-07 09:39:50 +0000 |
1289 | +++ hooks/charmhelpers/contrib/hahelpers/utils.py 1970-01-01 00:00:00 +0000 |
1290 | @@ -1,332 +0,0 @@ |
1291 | -# |
1292 | -# Copyright 2012 Canonical Ltd. |
1293 | -# |
1294 | -# This file is sourced from lp:openstack-charm-helpers |
1295 | -# |
1296 | -# Authors: |
1297 | -# James Page <james.page@ubuntu.com> |
1298 | -# Paul Collins <paul.collins@canonical.com> |
1299 | -# Adam Gandelman <adamg@ubuntu.com> |
1300 | -# |
1301 | - |
1302 | -import json |
1303 | -import os |
1304 | -import subprocess |
1305 | -import socket |
1306 | -import sys |
1307 | - |
1308 | - |
1309 | -def do_hooks(hooks): |
1310 | - hook = os.path.basename(sys.argv[0]) |
1311 | - |
1312 | - try: |
1313 | - hook_func = hooks[hook] |
1314 | - except KeyError: |
1315 | - juju_log('INFO', |
1316 | - "This charm doesn't know how to handle '{}'.".format(hook)) |
1317 | - else: |
1318 | - hook_func() |
1319 | - |
1320 | - |
1321 | -def install(*pkgs): |
1322 | - cmd = [ |
1323 | - 'apt-get', |
1324 | - '-y', |
1325 | - 'install' |
1326 | - ] |
1327 | - for pkg in pkgs: |
1328 | - cmd.append(pkg) |
1329 | - subprocess.check_call(cmd) |
1330 | - |
1331 | -TEMPLATES_DIR = 'templates' |
1332 | - |
1333 | -try: |
1334 | - import jinja2 |
1335 | -except ImportError: |
1336 | - install('python-jinja2') |
1337 | - import jinja2 |
1338 | - |
1339 | -try: |
1340 | - import dns.resolver |
1341 | -except ImportError: |
1342 | - install('python-dnspython') |
1343 | - import dns.resolver |
1344 | - |
1345 | - |
1346 | -def render_template(template_name, context, template_dir=TEMPLATES_DIR): |
1347 | - templates = jinja2.Environment( |
1348 | - loader=jinja2.FileSystemLoader(template_dir) |
1349 | - ) |
1350 | - template = templates.get_template(template_name) |
1351 | - return template.render(context) |
1352 | - |
1353 | -CLOUD_ARCHIVE = \ |
1354 | -""" # Ubuntu Cloud Archive |
1355 | -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
1356 | -""" |
1357 | - |
1358 | -CLOUD_ARCHIVE_POCKETS = { |
1359 | - 'folsom': 'precise-updates/folsom', |
1360 | - 'folsom/updates': 'precise-updates/folsom', |
1361 | - 'folsom/proposed': 'precise-proposed/folsom', |
1362 | - 'grizzly': 'precise-updates/grizzly', |
1363 | - 'grizzly/updates': 'precise-updates/grizzly', |
1364 | - 'grizzly/proposed': 'precise-proposed/grizzly' |
1365 | - } |
1366 | - |
1367 | - |
1368 | -def configure_source(): |
1369 | - source = str(config_get('openstack-origin')) |
1370 | - if not source: |
1371 | - return |
1372 | - if source.startswith('ppa:'): |
1373 | - cmd = [ |
1374 | - 'add-apt-repository', |
1375 | - source |
1376 | - ] |
1377 | - subprocess.check_call(cmd) |
1378 | - if source.startswith('cloud:'): |
1379 | - # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg: |
1380 | - # cloud:precise-folsom/updates or cloud:precise-folsom/proposed |
1381 | - install('ubuntu-cloud-keyring') |
1382 | - pocket = source.split(':')[1] |
1383 | - pocket = pocket.split('-')[1] |
1384 | - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1385 | - apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) |
1386 | - if source.startswith('deb'): |
1387 | - l = len(source.split('|')) |
1388 | - if l == 2: |
1389 | - (apt_line, key) = source.split('|') |
1390 | - cmd = [ |
1391 | - 'apt-key', |
1392 | - 'adv', '--keyserver keyserver.ubuntu.com', |
1393 | - '--recv-keys', key |
1394 | - ] |
1395 | - subprocess.check_call(cmd) |
1396 | - elif l == 1: |
1397 | - apt_line = source |
1398 | - |
1399 | - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: |
1400 | - apt.write(apt_line + "\n") |
1401 | - cmd = [ |
1402 | - 'apt-get', |
1403 | - 'update' |
1404 | - ] |
1405 | - subprocess.check_call(cmd) |
1406 | - |
1407 | -# Protocols |
1408 | -TCP = 'TCP' |
1409 | -UDP = 'UDP' |
1410 | - |
1411 | - |
1412 | -def expose(port, protocol='TCP'): |
1413 | - cmd = [ |
1414 | - 'open-port', |
1415 | - '{}/{}'.format(port, protocol) |
1416 | - ] |
1417 | - subprocess.check_call(cmd) |
1418 | - |
1419 | - |
1420 | -def juju_log(severity, message): |
1421 | - cmd = [ |
1422 | - 'juju-log', |
1423 | - '--log-level', severity, |
1424 | - message |
1425 | - ] |
1426 | - subprocess.check_call(cmd) |
1427 | - |
1428 | - |
1429 | -cache = {} |
1430 | - |
1431 | - |
1432 | -def cached(func): |
1433 | - def wrapper(*args, **kwargs): |
1434 | - global cache |
1435 | - key = str((func, args, kwargs)) |
1436 | - try: |
1437 | - return cache[key] |
1438 | - except KeyError: |
1439 | - res = func(*args, **kwargs) |
1440 | - cache[key] = res |
1441 | - return res |
1442 | - return wrapper |
1443 | - |
1444 | - |
1445 | -@cached |
1446 | -def relation_ids(relation): |
1447 | - cmd = [ |
1448 | - 'relation-ids', |
1449 | - relation |
1450 | - ] |
1451 | - result = str(subprocess.check_output(cmd)).split() |
1452 | - if result == "": |
1453 | - return None |
1454 | - else: |
1455 | - return result |
1456 | - |
1457 | - |
1458 | -@cached |
1459 | -def relation_list(rid): |
1460 | - cmd = [ |
1461 | - 'relation-list', |
1462 | - '-r', rid, |
1463 | - ] |
1464 | - result = str(subprocess.check_output(cmd)).split() |
1465 | - if result == "": |
1466 | - return None |
1467 | - else: |
1468 | - return result |
1469 | - |
1470 | - |
1471 | -@cached |
1472 | -def relation_get(attribute, unit=None, rid=None): |
1473 | - cmd = [ |
1474 | - 'relation-get', |
1475 | - ] |
1476 | - if rid: |
1477 | - cmd.append('-r') |
1478 | - cmd.append(rid) |
1479 | - cmd.append(attribute) |
1480 | - if unit: |
1481 | - cmd.append(unit) |
1482 | - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1483 | - if value == "": |
1484 | - return None |
1485 | - else: |
1486 | - return value |
1487 | - |
1488 | - |
1489 | -@cached |
1490 | -def relation_get_dict(relation_id=None, remote_unit=None): |
1491 | - """Obtain all relation data as dict by way of JSON""" |
1492 | - cmd = [ |
1493 | - 'relation-get', '--format=json' |
1494 | - ] |
1495 | - if relation_id: |
1496 | - cmd.append('-r') |
1497 | - cmd.append(relation_id) |
1498 | - if remote_unit: |
1499 | - remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) |
1500 | - os.environ['JUJU_REMOTE_UNIT'] = remote_unit |
1501 | - j = subprocess.check_output(cmd) |
1502 | - if remote_unit and remote_unit_orig: |
1503 | - os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig |
1504 | - d = json.loads(j) |
1505 | - settings = {} |
1506 | - # convert unicode to strings |
1507 | - for k, v in d.iteritems(): |
1508 | - settings[str(k)] = str(v) |
1509 | - return settings |
1510 | - |
1511 | - |
1512 | -def relation_set(**kwargs): |
1513 | - cmd = [ |
1514 | - 'relation-set' |
1515 | - ] |
1516 | - args = [] |
1517 | - for k, v in kwargs.items(): |
1518 | - if k == 'rid': |
1519 | - if v: |
1520 | - cmd.append('-r') |
1521 | - cmd.append(v) |
1522 | - else: |
1523 | - args.append('{}={}'.format(k, v)) |
1524 | - cmd += args |
1525 | - subprocess.check_call(cmd) |
1526 | - |
1527 | - |
1528 | -@cached |
1529 | -def unit_get(attribute): |
1530 | - cmd = [ |
1531 | - 'unit-get', |
1532 | - attribute |
1533 | - ] |
1534 | - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1535 | - if value == "": |
1536 | - return None |
1537 | - else: |
1538 | - return value |
1539 | - |
1540 | - |
1541 | -@cached |
1542 | -def config_get(attribute): |
1543 | - cmd = [ |
1544 | - 'config-get', |
1545 | - '--format', |
1546 | - 'json', |
1547 | - ] |
1548 | - out = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1549 | - cfg = json.loads(out) |
1550 | - |
1551 | - try: |
1552 | - return cfg[attribute] |
1553 | - except KeyError: |
1554 | - return None |
1555 | - |
1556 | - |
1557 | -@cached |
1558 | -def get_unit_hostname(): |
1559 | - return socket.gethostname() |
1560 | - |
1561 | - |
1562 | -@cached |
1563 | -def get_host_ip(hostname=unit_get('private-address')): |
1564 | - try: |
1565 | - # Test to see if already an IPv4 address |
1566 | - socket.inet_aton(hostname) |
1567 | - return hostname |
1568 | - except socket.error: |
1569 | - answers = dns.resolver.query(hostname, 'A') |
1570 | - if answers: |
1571 | - return answers[0].address |
1572 | - return None |
1573 | - |
1574 | - |
1575 | -def _svc_control(service, action): |
1576 | - subprocess.check_call(['service', service, action]) |
1577 | - |
1578 | - |
1579 | -def restart(*services): |
1580 | - for service in services: |
1581 | - _svc_control(service, 'restart') |
1582 | - |
1583 | - |
1584 | -def stop(*services): |
1585 | - for service in services: |
1586 | - _svc_control(service, 'stop') |
1587 | - |
1588 | - |
1589 | -def start(*services): |
1590 | - for service in services: |
1591 | - _svc_control(service, 'start') |
1592 | - |
1593 | - |
1594 | -def reload(*services): |
1595 | - for service in services: |
1596 | - try: |
1597 | - _svc_control(service, 'reload') |
1598 | - except subprocess.CalledProcessError: |
1599 | - # Reload failed - either service does not support reload |
1600 | - # or it was not running - restart will fixup most things |
1601 | - _svc_control(service, 'restart') |
1602 | - |
1603 | - |
1604 | -def running(service): |
1605 | - try: |
1606 | - output = subprocess.check_output(['service', service, 'status']) |
1607 | - except subprocess.CalledProcessError: |
1608 | - return False |
1609 | - else: |
1610 | - if ("start/running" in output or |
1611 | - "is running" in output): |
1612 | - return True |
1613 | - else: |
1614 | - return False |
1615 | - |
1616 | - |
1617 | -def is_relation_made(relation, key='private-address'): |
1618 | - for r_id in (relation_ids(relation) or []): |
1619 | - for unit in (relation_list(r_id) or []): |
1620 | - if relation_get(key, rid=r_id, unit=unit): |
1621 | - return True |
1622 | - return False |
1623 | |
1624 | === removed directory 'hooks/charmhelpers/contrib/jujugui' |
1625 | === removed file 'hooks/charmhelpers/contrib/jujugui/IMPORT' |
1626 | --- hooks/charmhelpers/contrib/jujugui/IMPORT 2013-06-07 09:39:50 +0000 |
1627 | +++ hooks/charmhelpers/contrib/jujugui/IMPORT 1970-01-01 00:00:00 +0000 |
1628 | @@ -1,4 +0,0 @@ |
1629 | -Source: lp:charms/juju-gui |
1630 | - |
1631 | -juju-gui/hooks/utils.py -> charm-helpers/charmhelpers/contrib/jujugui/utils.py |
1632 | -juju-gui/tests/test_utils.py -> charm-helpers/tests/contrib/jujugui/test_utils.py |
1633 | |
1634 | === removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py' |
1635 | === removed file 'hooks/charmhelpers/contrib/jujugui/utils.py' |
1636 | --- hooks/charmhelpers/contrib/jujugui/utils.py 2013-06-07 09:39:50 +0000 |
1637 | +++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000 |
1638 | @@ -1,602 +0,0 @@ |
1639 | -"""Juju GUI charm utilities.""" |
1640 | - |
1641 | -__all__ = [ |
1642 | - 'AGENT', |
1643 | - 'APACHE', |
1644 | - 'API_PORT', |
1645 | - 'CURRENT_DIR', |
1646 | - 'HAPROXY', |
1647 | - 'IMPROV', |
1648 | - 'JUJU_DIR', |
1649 | - 'JUJU_GUI_DIR', |
1650 | - 'JUJU_GUI_SITE', |
1651 | - 'JUJU_PEM', |
1652 | - 'WEB_PORT', |
1653 | - 'bzr_checkout', |
1654 | - 'chain', |
1655 | - 'cmd_log', |
1656 | - 'fetch_api', |
1657 | - 'fetch_gui', |
1658 | - 'find_missing_packages', |
1659 | - 'first_path_in_dir', |
1660 | - 'get_api_address', |
1661 | - 'get_npm_cache_archive_url', |
1662 | - 'get_release_file_url', |
1663 | - 'get_staging_dependencies', |
1664 | - 'get_zookeeper_address', |
1665 | - 'legacy_juju', |
1666 | - 'log_hook', |
1667 | - 'merge', |
1668 | - 'parse_source', |
1669 | - 'prime_npm_cache', |
1670 | - 'render_to_file', |
1671 | - 'save_or_create_certificates', |
1672 | - 'setup_apache', |
1673 | - 'setup_gui', |
1674 | - 'start_agent', |
1675 | - 'start_gui', |
1676 | - 'start_improv', |
1677 | - 'write_apache_config', |
1678 | -] |
1679 | - |
1680 | -from contextlib import contextmanager |
1681 | -import errno |
1682 | -import json |
1683 | -import os |
1684 | -import logging |
1685 | -import shutil |
1686 | -from subprocess import CalledProcessError |
1687 | -import tempfile |
1688 | -from urlparse import urlparse |
1689 | - |
1690 | -import apt |
1691 | -import tempita |
1692 | - |
1693 | -from launchpadlib.launchpad import Launchpad |
1694 | -from shelltoolbox import ( |
1695 | - Serializer, |
1696 | - apt_get_install, |
1697 | - command, |
1698 | - environ, |
1699 | - install_extra_repositories, |
1700 | - run, |
1701 | - script_name, |
1702 | - search_file, |
1703 | - su, |
1704 | -) |
1705 | -from charmhelpers.core.host import ( |
1706 | - service_start, |
1707 | -) |
1708 | -from charmhelpers.core.hookenv import ( |
1709 | - log, |
1710 | - config, |
1711 | - unit_get, |
1712 | -) |
1713 | - |
1714 | - |
1715 | -AGENT = 'juju-api-agent' |
1716 | -APACHE = 'apache2' |
1717 | -IMPROV = 'juju-api-improv' |
1718 | -HAPROXY = 'haproxy' |
1719 | - |
1720 | -API_PORT = 8080 |
1721 | -WEB_PORT = 8000 |
1722 | - |
1723 | -CURRENT_DIR = os.getcwd() |
1724 | -JUJU_DIR = os.path.join(CURRENT_DIR, 'juju') |
1725 | -JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui') |
1726 | -JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui' |
1727 | -JUJU_GUI_PORTS = '/etc/apache2/ports.conf' |
1728 | -JUJU_PEM = 'juju.includes-private-key.pem' |
1729 | -BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',) |
1730 | -DEB_BUILD_DEPENDENCIES = ( |
1731 | - 'bzr', 'imagemagick', 'make', 'nodejs', 'npm', |
1732 | -) |
1733 | -DEB_STAGE_DEPENDENCIES = ( |
1734 | - 'zookeeper', |
1735 | -) |
1736 | - |
1737 | - |
1738 | -# Store the configuration from on invocation to the next. |
1739 | -config_json = Serializer('/tmp/config.json') |
1740 | -# Bazaar checkout command. |
1741 | -bzr_checkout = command('bzr', 'co', '--lightweight') |
1742 | -# Whether or not the charm is deployed using juju-core. |
1743 | -# If juju-core has been used to deploy the charm, an agent.conf file must |
1744 | -# be present in the charm parent directory. |
1745 | -legacy_juju = lambda: not os.path.exists( |
1746 | - os.path.join(CURRENT_DIR, '..', 'agent.conf')) |
1747 | - |
1748 | - |
1749 | -def _get_build_dependencies(): |
1750 | - """Install deb dependencies for building.""" |
1751 | - log('Installing build dependencies.') |
1752 | - cmd_log(install_extra_repositories(*BUILD_REPOSITORIES)) |
1753 | - cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES)) |
1754 | - |
1755 | - |
1756 | -def get_api_address(unit_dir): |
1757 | - """Return the Juju API address stored in the uniter agent.conf file.""" |
1758 | - import yaml # python-yaml is only installed if juju-core is used. |
1759 | - # XXX 2013-03-27 frankban bug=1161443: |
1760 | - # currently the uniter agent.conf file does not include the API |
1761 | - # address. For now retrieve it from the machine agent file. |
1762 | - base_dir = os.path.abspath(os.path.join(unit_dir, '..')) |
1763 | - for dirname in os.listdir(base_dir): |
1764 | - if dirname.startswith('machine-'): |
1765 | - agent_conf = os.path.join(base_dir, dirname, 'agent.conf') |
1766 | - break |
1767 | - else: |
1768 | - raise IOError('Juju agent configuration file not found.') |
1769 | - contents = yaml.load(open(agent_conf)) |
1770 | - return contents['apiinfo']['addrs'][0] |
1771 | - |
1772 | - |
1773 | -def get_staging_dependencies(): |
1774 | - """Install deb dependencies for the stage (improv) environment.""" |
1775 | - log('Installing stage dependencies.') |
1776 | - cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES)) |
1777 | - |
1778 | - |
1779 | -def first_path_in_dir(directory): |
1780 | - """Return the full path of the first file/dir in *directory*.""" |
1781 | - return os.path.join(directory, os.listdir(directory)[0]) |
1782 | - |
1783 | - |
1784 | -def _get_by_attr(collection, attr, value): |
1785 | - """Return the first item in collection having attr == value. |
1786 | - |
1787 | - Return None if the item is not found. |
1788 | - """ |
1789 | - for item in collection: |
1790 | - if getattr(item, attr) == value: |
1791 | - return item |
1792 | - |
1793 | - |
1794 | -def get_release_file_url(project, series_name, release_version): |
1795 | - """Return the URL of the release file hosted in Launchpad. |
1796 | - |
1797 | - The returned URL points to a release file for the given project, series |
1798 | - name and release version. |
1799 | - The argument *project* is a project object as returned by launchpadlib. |
1800 | - The arguments *series_name* and *release_version* are strings. If |
1801 | - *release_version* is None, the URL of the latest release will be returned. |
1802 | - """ |
1803 | - series = _get_by_attr(project.series, 'name', series_name) |
1804 | - if series is None: |
1805 | - raise ValueError('%r: series not found' % series_name) |
1806 | - # Releases are returned by Launchpad in reverse date order. |
1807 | - releases = list(series.releases) |
1808 | - if not releases: |
1809 | - raise ValueError('%r: series does not contain releases' % series_name) |
1810 | - if release_version is not None: |
1811 | - release = _get_by_attr(releases, 'version', release_version) |
1812 | - if release is None: |
1813 | - raise ValueError('%r: release not found' % release_version) |
1814 | - releases = [release] |
1815 | - for release in releases: |
1816 | - for file_ in release.files: |
1817 | - if str(file_).endswith('.tgz'): |
1818 | - return file_.file_link |
1819 | - raise ValueError('%r: file not found' % release_version) |
1820 | - |
1821 | - |
1822 | -def get_zookeeper_address(agent_file_path): |
1823 | - """Retrieve the Zookeeper address contained in the given *agent_file_path*. |
1824 | - |
1825 | - The *agent_file_path* is a path to a file containing a line similar to the |
1826 | - following:: |
1827 | - |
1828 | - env JUJU_ZOOKEEPER="address" |
1829 | - """ |
1830 | - line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip() |
1831 | - return line.split('=')[1].strip('"') |
1832 | - |
1833 | - |
1834 | -@contextmanager |
1835 | -def log_hook(): |
1836 | - """Log when a hook starts and stops its execution. |
1837 | - |
1838 | - Also log to stdout possible CalledProcessError exceptions raised executing |
1839 | - the hook. |
1840 | - """ |
1841 | - script = script_name() |
1842 | - log(">>> Entering {}".format(script)) |
1843 | - try: |
1844 | - yield |
1845 | - except CalledProcessError as err: |
1846 | - log('Exception caught:') |
1847 | - log(err.output) |
1848 | - raise |
1849 | - finally: |
1850 | - log("<<< Exiting {}".format(script)) |
1851 | - |
1852 | - |
1853 | -def parse_source(source): |
1854 | - """Parse the ``juju-gui-source`` option. |
1855 | - |
1856 | - Return a tuple of two elements representing info on how to deploy Juju GUI. |
1857 | - Examples: |
1858 | - - ('stable', None): latest stable release; |
1859 | - - ('stable', '0.1.0'): stable release v0.1.0; |
1860 | - - ('trunk', None): latest trunk release; |
1861 | - - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1; |
1862 | - - ('branch', 'lp:juju-gui'): release is made from a branch; |
1863 | - - ('url', 'http://example.com/gui'): release from a downloaded file. |
1864 | - """ |
1865 | - if source.startswith('url:'): |
1866 | - source = source[4:] |
1867 | - # Support file paths, including relative paths. |
1868 | - if urlparse(source).scheme == '': |
1869 | - if not source.startswith('/'): |
1870 | - source = os.path.join(os.path.abspath(CURRENT_DIR), source) |
1871 | - source = "file://%s" % source |
1872 | - return 'url', source |
1873 | - if source in ('stable', 'trunk'): |
1874 | - return source, None |
1875 | - if source.startswith('lp:') or source.startswith('http://'): |
1876 | - return 'branch', source |
1877 | - if 'build' in source: |
1878 | - return 'trunk', source |
1879 | - return 'stable', source |
1880 | - |
1881 | - |
1882 | -def render_to_file(template_name, context, destination): |
1883 | - """Render the given *template_name* into *destination* using *context*. |
1884 | - |
1885 | - The tempita template language is used to render contents |
1886 | - (see http://pythonpaste.org/tempita/). |
1887 | - The argument *template_name* is the name or path of the template file: |
1888 | - it may be either a path relative to ``../config`` or an absolute path. |
1889 | - The argument *destination* is a file path. |
1890 | - The argument *context* is a dict-like object. |
1891 | - """ |
1892 | - template_path = os.path.abspath(template_name) |
1893 | - template = tempita.Template.from_filename(template_path) |
1894 | - with open(destination, 'w') as stream: |
1895 | - stream.write(template.substitute(context)) |
1896 | - |
1897 | - |
1898 | -results_log = None |
1899 | - |
1900 | - |
1901 | -def _setupLogging(): |
1902 | - global results_log |
1903 | - if results_log is not None: |
1904 | - return |
1905 | - cfg = config() |
1906 | - logging.basicConfig( |
1907 | - filename=cfg['command-log-file'], |
1908 | - level=logging.INFO, |
1909 | - format="%(asctime)s: %(name)s@%(levelname)s %(message)s") |
1910 | - results_log = logging.getLogger('juju-gui') |
1911 | - |
1912 | - |
1913 | -def cmd_log(results): |
1914 | - global results_log |
1915 | - if not results: |
1916 | - return |
1917 | - if results_log is None: |
1918 | - _setupLogging() |
1919 | - # Since 'results' may be multi-line output, start it on a separate line |
1920 | - # from the logger timestamp, etc. |
1921 | - results_log.info('\n' + results) |
1922 | - |
1923 | - |
1924 | -def start_improv(staging_env, ssl_cert_path, |
1925 | - config_path='/etc/init/juju-api-improv.conf'): |
1926 | - """Start a simulated juju environment using ``improv.py``.""" |
1927 | - log('Setting up staging start up script.') |
1928 | - context = { |
1929 | - 'juju_dir': JUJU_DIR, |
1930 | - 'keys': ssl_cert_path, |
1931 | - 'port': API_PORT, |
1932 | - 'staging_env': staging_env, |
1933 | - } |
1934 | - render_to_file('config/juju-api-improv.conf.template', context, config_path) |
1935 | - log('Starting the staging backend.') |
1936 | - with su('root'): |
1937 | - service_start(IMPROV) |
1938 | - |
1939 | - |
1940 | -def start_agent( |
1941 | - ssl_cert_path, config_path='/etc/init/juju-api-agent.conf', |
1942 | - read_only=False): |
1943 | - """Start the Juju agent and connect to the current environment.""" |
1944 | - # Retrieve the Zookeeper address from the start up script. |
1945 | - unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..')) |
1946 | - agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir)) |
1947 | - zookeeper = get_zookeeper_address(agent_file) |
1948 | - log('Setting up API agent start up script.') |
1949 | - context = { |
1950 | - 'juju_dir': JUJU_DIR, |
1951 | - 'keys': ssl_cert_path, |
1952 | - 'port': API_PORT, |
1953 | - 'zookeeper': zookeeper, |
1954 | - 'read_only': read_only |
1955 | - } |
1956 | - render_to_file('config/juju-api-agent.conf.template', context, config_path) |
1957 | - log('Starting API agent.') |
1958 | - with su('root'): |
1959 | - service_start(AGENT) |
1960 | - |
1961 | - |
1962 | -def start_gui( |
1963 | - console_enabled, login_help, readonly, in_staging, ssl_cert_path, |
1964 | - charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg', |
1965 | - config_js_path=None, secure=True, sandbox=False): |
1966 | - """Set up and start the Juju GUI server.""" |
1967 | - with su('root'): |
1968 | - run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR) |
1969 | - # XXX 2013-02-05 frankban bug=1116320: |
1970 | - # External insecure resources are still loaded when testing in the |
1971 | - # debug environment. For now, switch to the production environment if |
1972 | - # the charm is configured to serve tests. |
1973 | - if in_staging and not serve_tests: |
1974 | - build_dirname = 'build-debug' |
1975 | - else: |
1976 | - build_dirname = 'build-prod' |
1977 | - build_dir = os.path.join(JUJU_GUI_DIR, build_dirname) |
1978 | - log('Generating the Juju GUI configuration file.') |
1979 | - is_legacy_juju = legacy_juju() |
1980 | - user, password = None, None |
1981 | - if (is_legacy_juju and in_staging) or sandbox: |
1982 | - user, password = 'admin', 'admin' |
1983 | - else: |
1984 | - user, password = None, None |
1985 | - |
1986 | - api_backend = 'python' if is_legacy_juju else 'go' |
1987 | - if secure: |
1988 | - protocol = 'wss' |
1989 | - else: |
1990 | - log('Running in insecure mode! Port 80 will serve unencrypted.') |
1991 | - protocol = 'ws' |
1992 | - |
1993 | - context = { |
1994 | - 'raw_protocol': protocol, |
1995 | - 'address': unit_get('public-address'), |
1996 | - 'console_enabled': json.dumps(console_enabled), |
1997 | - 'login_help': json.dumps(login_help), |
1998 | - 'password': json.dumps(password), |
1999 | - 'api_backend': json.dumps(api_backend), |
2000 | - 'readonly': json.dumps(readonly), |
2001 | - 'user': json.dumps(user), |
2002 | - 'protocol': json.dumps(protocol), |
2003 | - 'sandbox': json.dumps(sandbox), |
2004 | - 'charmworld_url': json.dumps(charmworld_url), |
2005 | - } |
2006 | - if config_js_path is None: |
2007 | - config_js_path = os.path.join( |
2008 | - build_dir, 'juju-ui', 'assets', 'config.js') |
2009 | - render_to_file('config/config.js.template', context, config_js_path) |
2010 | - |
2011 | - write_apache_config(build_dir, serve_tests) |
2012 | - |
2013 | - log('Generating haproxy configuration file.') |
2014 | - if is_legacy_juju: |
2015 | - # The PyJuju API agent is listening on localhost. |
2016 | - api_address = '127.0.0.1:{0}'.format(API_PORT) |
2017 | - else: |
2018 | - # Retrieve the juju-core API server address. |
2019 | - api_address = get_api_address(os.path.join(CURRENT_DIR, '..')) |
2020 | - context = { |
2021 | - 'api_address': api_address, |
2022 | - 'api_pem': JUJU_PEM, |
2023 | - 'legacy_juju': is_legacy_juju, |
2024 | - 'ssl_cert_path': ssl_cert_path, |
2025 | - # In PyJuju environments, use the same certificate for both HTTPS and |
2026 | - # WebSocket connections. In juju-core the system already has the proper |
2027 | - # certificate installed. |
2028 | - 'web_pem': JUJU_PEM, |
2029 | - 'web_port': WEB_PORT, |
2030 | - 'secure': secure |
2031 | - } |
2032 | - render_to_file('config/haproxy.cfg.template', context, haproxy_path) |
2033 | - log('Starting Juju GUI.') |
2034 | - |
2035 | - |
2036 | -def write_apache_config(build_dir, serve_tests=False): |
2037 | - log('Generating the apache site configuration file.') |
2038 | - context = { |
2039 | - 'port': WEB_PORT, |
2040 | - 'serve_tests': serve_tests, |
2041 | - 'server_root': build_dir, |
2042 | - 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''), |
2043 | - } |
2044 | - render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS) |
2045 | - render_to_file('config/apache-site.template', context, JUJU_GUI_SITE) |
2046 | - |
2047 | - |
2048 | -def get_npm_cache_archive_url(Launchpad=Launchpad): |
2049 | - """Figure out the URL of the most recent NPM cache archive on Launchpad.""" |
2050 | - launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production') |
2051 | - project = launchpad.projects['juju-gui'] |
2052 | - # Find the URL of the most recently created NPM cache archive. |
2053 | - npm_cache_url = get_release_file_url(project, 'npm-cache', None) |
2054 | - return npm_cache_url |
2055 | - |
2056 | - |
2057 | -def prime_npm_cache(npm_cache_url): |
2058 | - """Download NPM cache archive and prime the NPM cache with it.""" |
2059 | - # Download the cache archive and then uncompress it into the NPM cache. |
2060 | - npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz') |
2061 | - cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url)) |
2062 | - npm_cache_dir = os.path.expanduser('~/.npm') |
2063 | - # The NPM cache directory probably does not exist, so make it if not. |
2064 | - try: |
2065 | - os.mkdir(npm_cache_dir) |
2066 | - except OSError, e: |
2067 | - # If the directory already exists then ignore the error. |
2068 | - if e.errno != errno.EEXIST: # File exists. |
2069 | - raise |
2070 | - uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f') |
2071 | - cmd_log(uncompress(npm_cache_archive)) |
2072 | - |
2073 | - |
2074 | -def fetch_gui(juju_gui_source, logpath): |
2075 | - """Retrieve the Juju GUI release/branch.""" |
2076 | - # Retrieve a Juju GUI release. |
2077 | - origin, version_or_branch = parse_source(juju_gui_source) |
2078 | - if origin == 'branch': |
2079 | - # Make sure we have the dependencies necessary for us to actually make |
2080 | - # a build. |
2081 | - _get_build_dependencies() |
2082 | - # Create a release starting from a branch. |
2083 | - juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source') |
2084 | - log('Retrieving Juju GUI source checkout from %s.' % version_or_branch) |
2085 | - cmd_log(run('rm', '-rf', juju_gui_source_dir)) |
2086 | - cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir)) |
2087 | - log('Preparing a Juju GUI release.') |
2088 | - logdir = os.path.dirname(logpath) |
2089 | - fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir) |
2090 | - log('Output from "make distfile" sent to %s' % name) |
2091 | - with environ(NO_BZR='1'): |
2092 | - run('make', '-C', juju_gui_source_dir, 'distfile', |
2093 | - stdout=fd, stderr=fd) |
2094 | - release_tarball = first_path_in_dir( |
2095 | - os.path.join(juju_gui_source_dir, 'releases')) |
2096 | - else: |
2097 | - log('Retrieving Juju GUI release.') |
2098 | - if origin == 'url': |
2099 | - file_url = version_or_branch |
2100 | - else: |
2101 | - # Retrieve a release from Launchpad. |
2102 | - launchpad = Launchpad.login_anonymously( |
2103 | - 'Juju GUI charm', 'production') |
2104 | - project = launchpad.projects['juju-gui'] |
2105 | - file_url = get_release_file_url(project, origin, version_or_branch) |
2106 | - log('Downloading release file from %s.' % file_url) |
2107 | - release_tarball = os.path.join(CURRENT_DIR, 'release.tgz') |
2108 | - cmd_log(run('curl', '-L', '-o', release_tarball, file_url)) |
2109 | - return release_tarball |
2110 | - |
2111 | - |
2112 | -def fetch_api(juju_api_branch): |
2113 | - """Retrieve the Juju branch.""" |
2114 | - # Retrieve Juju API source checkout. |
2115 | - log('Retrieving Juju API source checkout.') |
2116 | - cmd_log(run('rm', '-rf', JUJU_DIR)) |
2117 | - cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR)) |
2118 | - |
2119 | - |
2120 | -def setup_gui(release_tarball): |
2121 | - """Set up Juju GUI.""" |
2122 | - # Uncompress the release tarball. |
2123 | - log('Installing Juju GUI.') |
2124 | - release_dir = os.path.join(CURRENT_DIR, 'release') |
2125 | - cmd_log(run('rm', '-rf', release_dir)) |
2126 | - os.mkdir(release_dir) |
2127 | - uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f') |
2128 | - cmd_log(uncompress(release_tarball)) |
2129 | - # Link the Juju GUI dir to the contents of the release tarball. |
2130 | - cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR)) |
2131 | - |
2132 | - |
2133 | -def setup_apache(): |
2134 | - """Set up apache.""" |
2135 | - log('Setting up apache.') |
2136 | - if not os.path.exists(JUJU_GUI_SITE): |
2137 | - cmd_log(run('touch', JUJU_GUI_SITE)) |
2138 | - cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE)) |
2139 | - cmd_log( |
2140 | - run('ln', '-s', JUJU_GUI_SITE, |
2141 | - '/etc/apache2/sites-enabled/juju-gui')) |
2142 | - |
2143 | - if not os.path.exists(JUJU_GUI_PORTS): |
2144 | - cmd_log(run('touch', JUJU_GUI_PORTS)) |
2145 | - cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS)) |
2146 | - |
2147 | - with su('root'): |
2148 | - run('a2dissite', 'default') |
2149 | - run('a2ensite', 'juju-gui') |
2150 | - |
2151 | - |
2152 | -def save_or_create_certificates( |
2153 | - ssl_cert_path, ssl_cert_contents, ssl_key_contents): |
2154 | - """Generate the SSL certificates. |
2155 | - |
2156 | - If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them |
2157 | - as certificates; otherwise, generate them. |
2158 | - |
2159 | - Also create a pem file, suitable for use in the haproxy configuration, |
2160 | - concatenating the key and the certificate files. |
2161 | - """ |
2162 | - crt_path = os.path.join(ssl_cert_path, 'juju.crt') |
2163 | - key_path = os.path.join(ssl_cert_path, 'juju.key') |
2164 | - if not os.path.exists(ssl_cert_path): |
2165 | - os.makedirs(ssl_cert_path) |
2166 | - if ssl_cert_contents and ssl_key_contents: |
2167 | - # Save the provided certificates. |
2168 | - with open(crt_path, 'w') as cert_file: |
2169 | - cert_file.write(ssl_cert_contents) |
2170 | - with open(key_path, 'w') as key_file: |
2171 | - key_file.write(ssl_key_contents) |
2172 | - else: |
2173 | - # Generate certificates. |
2174 | - # See http://superuser.com/questions/226192/openssl-without-prompt |
2175 | - cmd_log(run( |
2176 | - 'openssl', 'req', '-new', '-newkey', 'rsa:4096', |
2177 | - '-days', '365', '-nodes', '-x509', '-subj', |
2178 | - # These are arbitrary test values for the certificate. |
2179 | - '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com', |
2180 | - '-keyout', key_path, '-out', crt_path)) |
2181 | - # Generate the pem file. |
2182 | - pem_path = os.path.join(ssl_cert_path, JUJU_PEM) |
2183 | - if os.path.exists(pem_path): |
2184 | - os.remove(pem_path) |
2185 | - with open(pem_path, 'w') as pem_file: |
2186 | - shutil.copyfileobj(open(key_path), pem_file) |
2187 | - shutil.copyfileobj(open(crt_path), pem_file) |
2188 | - |
2189 | - |
2190 | -def find_missing_packages(*packages): |
2191 | - """Given a list of packages, return the packages which are not installed. |
2192 | - """ |
2193 | - cache = apt.Cache() |
2194 | - missing = set() |
2195 | - for pkg_name in packages: |
2196 | - try: |
2197 | - pkg = cache[pkg_name] |
2198 | - except KeyError: |
2199 | - missing.add(pkg_name) |
2200 | - continue |
2201 | - if pkg.is_installed: |
2202 | - continue |
2203 | - missing.add(pkg_name) |
2204 | - return missing |
2205 | - |
2206 | - |
2207 | -## Backend support decorators |
2208 | - |
2209 | -def chain(name): |
2210 | - """Helper method to compose a set of mixin objects into a callable. |
2211 | - |
2212 | - Each method is called in the context of its mixin instance, and its |
2213 | - argument is the Backend instance. |
2214 | - """ |
2215 | - # Chain method calls through all implementing mixins. |
2216 | - def method(self): |
2217 | - for mixin in self.mixins: |
2218 | - a_callable = getattr(type(mixin), name, None) |
2219 | - if a_callable: |
2220 | - a_callable(mixin, self) |
2221 | - |
2222 | - method.__name__ = name |
2223 | - return method |
2224 | - |
2225 | - |
2226 | -def merge(name): |
2227 | - """Helper to merge a property from a set of strategy objects |
2228 | - into a unified set. |
2229 | - """ |
2230 | - # Return merged property from every providing mixin as a set. |
2231 | - @property |
2232 | - def method(self): |
2233 | - result = set() |
2234 | - for mixin in self.mixins: |
2235 | - segment = getattr(type(mixin), name, None) |
2236 | - if segment and isinstance(segment, (list, tuple, set)): |
2237 | - result |= set(segment) |
2238 | - |
2239 | - return result |
2240 | - return method |
2241 | |
2242 | === removed directory 'hooks/charmhelpers/contrib/openstack' |
2243 | === removed file 'hooks/charmhelpers/contrib/openstack/IMPORT' |
2244 | --- hooks/charmhelpers/contrib/openstack/IMPORT 2013-06-07 09:39:50 +0000 |
2245 | +++ hooks/charmhelpers/contrib/openstack/IMPORT 1970-01-01 00:00:00 +0000 |
2246 | @@ -1,9 +0,0 @@ |
2247 | -Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers |
2248 | - |
2249 | -ha-helpers/lib/openstack-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack-common |
2250 | -ha-helpers/lib/openstack_common.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack_common.py |
2251 | -ha-helpers/lib/nova -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova |
2252 | -ha-helpers/lib/nova/nova-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/nova-common |
2253 | -ha-helpers/lib/nova/grizzly -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/grizzly |
2254 | -ha-helpers/lib/nova/essex -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/essex |
2255 | -ha-helpers/lib/nova/folsom -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/folsom |
2256 | |
2257 | === removed file 'hooks/charmhelpers/contrib/openstack/__init__.py' |
2258 | === removed directory 'hooks/charmhelpers/contrib/openstack/nova' |
2259 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/essex' |
2260 | --- hooks/charmhelpers/contrib/openstack/nova/essex 2013-06-07 09:39:50 +0000 |
2261 | +++ hooks/charmhelpers/contrib/openstack/nova/essex 1970-01-01 00:00:00 +0000 |
2262 | @@ -1,43 +0,0 @@ |
2263 | -#!/bin/bash -e |
2264 | - |
2265 | -# Essex-specific functions |
2266 | - |
2267 | -nova_set_or_update() { |
2268 | - # Set a config option in nova.conf or api-paste.ini, depending |
2269 | - # Defaults to updating nova.conf |
2270 | - local key=$1 |
2271 | - local value=$2 |
2272 | - local conf_file=$3 |
2273 | - local pattern="" |
2274 | - |
2275 | - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} |
2276 | - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} |
2277 | - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} |
2278 | - [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1 |
2279 | - [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1 |
2280 | - [[ -z "$conf_file" ]] && conf_file=$nova_conf |
2281 | - |
2282 | - case "$conf_file" in |
2283 | - "$nova_conf") match="\-\-$key=" |
2284 | - pattern="--$key=" |
2285 | - out=$pattern |
2286 | - ;; |
2287 | - "$api_conf"|"$libvirtd_conf") match="^$key = " |
2288 | - pattern="$match" |
2289 | - out="$key = " |
2290 | - ;; |
2291 | - *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)" |
2292 | - esac |
2293 | - |
2294 | - cat $conf_file | grep "$match$value" >/dev/null && |
2295 | - juju-log "$CHARM: $key=$value already in set in $conf_file" \ |
2296 | - && return 0 |
2297 | - if cat $conf_file | grep "$match" >/dev/null ; then |
2298 | - juju-log "$CHARM: Updating $conf_file, $key=$value" |
2299 | - sed -i "s|\($pattern\).*|\1$value|" $conf_file |
2300 | - else |
2301 | - juju-log "$CHARM: Setting new option $key=$value in $conf_file" |
2302 | - echo "$out$value" >>$conf_file |
2303 | - fi |
2304 | - CONFIG_CHANGED=True |
2305 | -} |
2306 | |
2307 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/folsom' |
2308 | --- hooks/charmhelpers/contrib/openstack/nova/folsom 2013-06-07 09:39:50 +0000 |
2309 | +++ hooks/charmhelpers/contrib/openstack/nova/folsom 1970-01-01 00:00:00 +0000 |
2310 | @@ -1,81 +0,0 @@ |
2311 | -#!/bin/bash -e |
2312 | - |
2313 | -# Folsom-specific functions |
2314 | - |
2315 | -nova_set_or_update() { |
2316 | - # TODO: This needs to be shared among folsom, grizzly and beyond. |
2317 | - # Set a config option in nova.conf or api-paste.ini, depending |
2318 | - # Defaults to updating nova.conf |
2319 | - local key="$1" |
2320 | - local value="$2" |
2321 | - local conf_file="$3" |
2322 | - local section="${4:-DEFAULT}" |
2323 | - |
2324 | - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} |
2325 | - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} |
2326 | - local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} |
2327 | - local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} |
2328 | - local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} |
2329 | - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} |
2330 | - |
2331 | - [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 |
2332 | - [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 |
2333 | - |
2334 | - [[ -z "$conf_file" ]] && conf_file=$nova_conf |
2335 | - |
2336 | - local pattern="" |
2337 | - case "$conf_file" in |
2338 | - "$nova_conf") match="^$key=" |
2339 | - pattern="$key=" |
2340 | - out=$pattern |
2341 | - ;; |
2342 | - "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ |
2343 | - "$libvirtd_conf") |
2344 | - match="^$key = " |
2345 | - pattern="$match" |
2346 | - out="$key = " |
2347 | - ;; |
2348 | - *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" |
2349 | - esac |
2350 | - |
2351 | - cat $conf_file | grep "$match$value" >/dev/null && |
2352 | - juju-log "$CHARM: $key=$value already in set in $conf_file" \ |
2353 | - && return 0 |
2354 | - |
2355 | - case $conf_file in |
2356 | - "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") |
2357 | - python -c " |
2358 | -import ConfigParser |
2359 | -config = ConfigParser.RawConfigParser() |
2360 | -config.read('$conf_file') |
2361 | -config.set('$section','$key','$value') |
2362 | -with open('$conf_file', 'wb') as configfile: |
2363 | - config.write(configfile) |
2364 | -" |
2365 | - ;; |
2366 | - *) |
2367 | - if cat $conf_file | grep "$match" >/dev/null ; then |
2368 | - juju-log "$CHARM: Updating $conf_file, $key=$value" |
2369 | - sed -i "s|\($pattern\).*|\1$value|" $conf_file |
2370 | - else |
2371 | - juju-log "$CHARM: Setting new option $key=$value in $conf_file" |
2372 | - echo "$out$value" >>$conf_file |
2373 | - fi |
2374 | - ;; |
2375 | - esac |
2376 | - CONFIG_CHANGED="True" |
2377 | -} |
2378 | - |
2379 | -# Upgrade Helpers |
2380 | -nova_pre_upgrade() { |
2381 | - # Pre-upgrade helper. Caller should pass the version of OpenStack we are |
2382 | - # upgrading from. |
2383 | - return 0 # Nothing to do here, yet. |
2384 | -} |
2385 | - |
2386 | -nova_post_upgrade() { |
2387 | - # Post-upgrade helper. Caller should pass the version of OpenStack we are |
2388 | - # upgrading from. |
2389 | - juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom." |
2390 | - # nothing to do here yet. |
2391 | -} |
2392 | |
2393 | === removed symlink 'hooks/charmhelpers/contrib/openstack/nova/grizzly' |
2394 | === target was u'folsom' |
2395 | === removed file 'hooks/charmhelpers/contrib/openstack/nova/nova-common' |
2396 | --- hooks/charmhelpers/contrib/openstack/nova/nova-common 2013-06-07 09:39:50 +0000 |
2397 | +++ hooks/charmhelpers/contrib/openstack/nova/nova-common 1970-01-01 00:00:00 +0000 |
2398 | @@ -1,147 +0,0 @@ |
2399 | -#!/bin/bash -e |
2400 | - |
2401 | -# Common utility functions used across all nova charms. |
2402 | - |
2403 | -CONFIG_CHANGED=False |
2404 | - |
2405 | -# Load the common OpenStack helper library. |
2406 | -if [[ -e $CHARM_DIR/lib/openstack-common ]] ; then |
2407 | - . $CHARM_DIR/lib/openstack-common |
2408 | -else |
2409 | - juju-log "Couldn't load $CHARM_DIR/lib/opentack-common." && exit 1 |
2410 | -fi |
2411 | - |
2412 | -set_or_update() { |
2413 | - # Update config flags in nova.conf or api-paste.ini. |
2414 | - # Config layout changed in Folsom, so this is now OpenStack release specific. |
2415 | - local rel=$(get_os_codename_package "nova-common") |
2416 | - . $CHARM_DIR/lib/nova/$rel |
2417 | - nova_set_or_update $@ |
2418 | -} |
2419 | - |
2420 | -function set_config_flags() { |
2421 | - # Set user-defined nova.conf flags from deployment config |
2422 | - juju-log "$CHARM: Processing config-flags." |
2423 | - flags=$(config-get config-flags) |
2424 | - if [[ "$flags" != "None" && -n "$flags" ]] ; then |
2425 | - for f in $(echo $flags | sed -e 's/,/ /g') ; do |
2426 | - k=$(echo $f | cut -d= -f1) |
2427 | - v=$(echo $f | cut -d= -f2) |
2428 | - set_or_update "$k" "$v" |
2429 | - done |
2430 | - fi |
2431 | -} |
2432 | - |
2433 | -configure_volume_service() { |
2434 | - local svc="$1" |
2435 | - local cur_vers="$(get_os_codename_package "nova-common")" |
2436 | - case "$svc" in |
2437 | - "cinder") |
2438 | - set_or_update "volume_api_class" "nova.volume.cinder.API" ;; |
2439 | - "nova-volume") |
2440 | - # nova-volume only supported before grizzly. |
2441 | - [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] && |
2442 | - set_or_update "volume_api_class" "nova.volume.api.API" |
2443 | - ;; |
2444 | - *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc" |
2445 | - return 1 ;; |
2446 | - esac |
2447 | -} |
2448 | - |
2449 | -function configure_network_manager { |
2450 | - local manager="$1" |
2451 | - echo "$CHARM: configuring $manager network manager" |
2452 | - case $1 in |
2453 | - "FlatManager") |
2454 | - set_or_update "network_manager" "nova.network.manager.FlatManager" |
2455 | - ;; |
2456 | - "FlatDHCPManager") |
2457 | - set_or_update "network_manager" "nova.network.manager.FlatDHCPManager" |
2458 | - |
2459 | - if [[ "$CHARM" == "nova-compute" ]] ; then |
2460 | - local flat_interface=$(config-get flat-interface) |
2461 | - local ec2_host=$(relation-get ec2_host) |
2462 | - set_or_update flat_inteface "$flat_interface" |
2463 | - set_or_update ec2_dmz_host "$ec2_host" |
2464 | - |
2465 | - # Ensure flat_interface has link. |
2466 | - if ip link show $flat_interface >/dev/null 2>&1 ; then |
2467 | - ip link set $flat_interface up |
2468 | - fi |
2469 | - |
2470 | - # work around (LP: #1035172) |
2471 | - if [[ -e /dev/vhost-net ]] ; then |
2472 | - iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \ |
2473 | - --checksum-fill |
2474 | - fi |
2475 | - fi |
2476 | - |
2477 | - ;; |
2478 | - "Quantum") |
2479 | - local local_ip=$(get_ip `unit-get private-address`) |
2480 | - [[ -n $local_ip ]] || { |
2481 | - juju-log "Unable to resolve local IP address" |
2482 | - exit 1 |
2483 | - } |
2484 | - set_or_update "network_api_class" "nova.network.quantumv2.api.API" |
2485 | - set_or_update "quantum_auth_strategy" "keystone" |
2486 | - set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF" |
2487 | - set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" |
2488 | - if [ "$QUANTUM_PLUGIN" == "ovs" ]; then |
2489 | - set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS" |
2490 | - set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS" |
2491 | - set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS" |
2492 | - set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS" |
2493 | - fi |
2494 | - ;; |
2495 | - *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;; |
2496 | - esac |
2497 | -} |
2498 | - |
2499 | -function trigger_remote_service_restarts() { |
2500 | - # Trigger a service restart on all other nova nodes that have a relation |
2501 | - # via the cloud-controller interface. |
2502 | - |
2503 | - # possible relations to other nova services. |
2504 | - local relations="cloud-compute nova-volume-service" |
2505 | - |
2506 | - for rel in $relations; do |
2507 | - local r_ids=$(relation-ids $rel) |
2508 | - for r_id in $r_ids ; do |
2509 | - juju-log "$CHARM: Triggering a service restart on relation $r_id." |
2510 | - relation-set -r $r_id restart-trigger=$(uuid) |
2511 | - done |
2512 | - done |
2513 | -} |
2514 | - |
2515 | -do_openstack_upgrade() { |
2516 | - # update openstack components to those provided by a new installation source |
2517 | - # it is assumed the calling hook has confirmed that the upgrade is sane. |
2518 | - local rel="$1" |
2519 | - shift |
2520 | - local packages=$@ |
2521 | - |
2522 | - orig_os_rel=$(get_os_codename_package "nova-common") |
2523 | - new_rel=$(get_os_codename_install_source "$rel") |
2524 | - |
2525 | - # Backup the config directory. |
2526 | - local stamp=$(date +"%Y%m%d%M%S") |
2527 | - tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR |
2528 | - |
2529 | - # load the release helper library for pre/post upgrade hooks specific to the |
2530 | - # release we are upgrading to. |
2531 | - . $CHARM_DIR/lib/nova/$new_rel |
2532 | - |
2533 | - # new release specific pre-upgrade hook |
2534 | - nova_pre_upgrade "$orig_os_rel" |
2535 | - |
2536 | - # Setup apt repository access and kick off the actual package upgrade. |
2537 | - configure_install_source "$rel" |
2538 | - apt-get update |
2539 | - DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \ |
2540 | - install --no-install-recommends $packages |
2541 | - |
2542 | - # new release sepcific post-upgrade hook |
2543 | - nova_post_upgrade "$orig_os_rel" |
2544 | - |
2545 | -} |
2546 | |
2547 | === removed file 'hooks/charmhelpers/contrib/openstack/openstack-common' |
2548 | --- hooks/charmhelpers/contrib/openstack/openstack-common 2013-06-07 09:39:50 +0000 |
2549 | +++ hooks/charmhelpers/contrib/openstack/openstack-common 1970-01-01 00:00:00 +0000 |
2550 | @@ -1,781 +0,0 @@ |
2551 | -#!/bin/bash -e |
2552 | - |
2553 | -# Common utility functions used across all OpenStack charms. |
2554 | - |
2555 | -error_out() { |
2556 | - juju-log "$CHARM ERROR: $@" |
2557 | - exit 1 |
2558 | -} |
2559 | - |
2560 | -function service_ctl_status { |
2561 | - # Return 0 if a service is running, 1 otherwise. |
2562 | - local svc="$1" |
2563 | - local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }') |
2564 | - case $status in |
2565 | - "start") return 0 ;; |
2566 | - "stop") return 1 ;; |
2567 | - *) error_out "Unexpected status of service $svc: $status" ;; |
2568 | - esac |
2569 | -} |
2570 | - |
2571 | -function service_ctl { |
2572 | - # control a specific service, or all (as defined by $SERVICES) |
2573 | - # service restarts will only occur depending on global $CONFIG_CHANGED, |
2574 | - # which should be updated in charm's set_or_update(). |
2575 | - local config_changed=${CONFIG_CHANGED:-True} |
2576 | - if [[ $1 == "all" ]] ; then |
2577 | - ctl="$SERVICES" |
2578 | - else |
2579 | - ctl="$1" |
2580 | - fi |
2581 | - action="$2" |
2582 | - if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then |
2583 | - error_out "ERROR service_ctl: Not enough arguments" |
2584 | - fi |
2585 | - |
2586 | - for i in $ctl ; do |
2587 | - case $action in |
2588 | - "start") |
2589 | - service_ctl_status $i || service $i start ;; |
2590 | - "stop") |
2591 | - service_ctl_status $i && service $i stop || return 0 ;; |
2592 | - "restart") |
2593 | - if [[ "$config_changed" == "True" ]] ; then |
2594 | - service_ctl_status $i && service $i restart || service $i start |
2595 | - fi |
2596 | - ;; |
2597 | - esac |
2598 | - if [[ $? != 0 ]] ; then |
2599 | - juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" |
2600 | - fi |
2601 | - done |
2602 | - # all configs should have been reloaded on restart of all services, reset |
2603 | - # flag if its being used. |
2604 | - if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] && |
2605 | - [[ "$ctl" == "all" ]]; then |
2606 | - CONFIG_CHANGED="False" |
2607 | - fi |
2608 | -} |
2609 | - |
2610 | -function configure_install_source { |
2611 | - # Setup and configure installation source based on a config flag. |
2612 | - local src="$1" |
2613 | - |
2614 | - # Default to installing from the main Ubuntu archive. |
2615 | - [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0 |
2616 | - |
2617 | - . /etc/lsb-release |
2618 | - |
2619 | - # standard 'ppa:someppa/name' format. |
2620 | - if [[ "${src:0:4}" == "ppa:" ]] ; then |
2621 | - juju-log "$CHARM: Configuring installation from custom src ($src)" |
2622 | - add-apt-repository -y "$src" || error_out "Could not configure PPA access." |
2623 | - return 0 |
2624 | - fi |
2625 | - |
2626 | - # standard 'deb http://url/ubuntu main' entries. gpg key ids must |
2627 | - # be appended to the end of url after a |, ie: |
2628 | - # 'deb http://url/ubuntu main|$GPGKEYID' |
2629 | - if [[ "${src:0:3}" == "deb" ]] ; then |
2630 | - juju-log "$CHARM: Configuring installation from custom src URL ($src)" |
2631 | - if echo "$src" | grep -q "|" ; then |
2632 | - # gpg key id tagged to end of url folloed by a | |
2633 | - url=$(echo $src | cut -d'|' -f1) |
2634 | - key=$(echo $src | cut -d'|' -f2) |
2635 | - juju-log "$CHARM: Importing repository key: $key" |
2636 | - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \ |
2637 | - juju-log "$CHARM WARN: Could not import key from keyserver: $key" |
2638 | - else |
2639 | - juju-log "$CHARM No repository key specified." |
2640 | - url="$src" |
2641 | - fi |
2642 | - echo "$url" > /etc/apt/sources.list.d/juju_deb.list |
2643 | - return 0 |
2644 | - fi |
2645 | - |
2646 | - # Cloud Archive |
2647 | - if [[ "${src:0:6}" == "cloud:" ]] ; then |
2648 | - |
2649 | - # current os releases supported by the UCA. |
2650 | - local cloud_archive_versions="folsom grizzly" |
2651 | - |
2652 | - local ca_rel=$(echo $src | cut -d: -f2) |
2653 | - local u_rel=$(echo $ca_rel | cut -d- -f1) |
2654 | - local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1) |
2655 | - |
2656 | - [[ "$u_rel" != "$DISTRIB_CODENAME" ]] && |
2657 | - error_out "Cannot install from Cloud Archive pocket $src " \ |
2658 | - "on this Ubuntu version ($DISTRIB_CODENAME)!" |
2659 | - |
2660 | - valid_release="" |
2661 | - for rel in $cloud_archive_versions ; do |
2662 | - if [[ "$os_rel" == "$rel" ]] ; then |
2663 | - valid_release=1 |
2664 | - juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive." |
2665 | - fi |
2666 | - done |
2667 | - if [[ -z "$valid_release" ]] ; then |
2668 | - error_out "OpenStack release ($os_rel) not supported by "\ |
2669 | - "the Ubuntu Cloud Archive." |
2670 | - fi |
2671 | - |
2672 | - # CA staging repos are standard PPAs. |
2673 | - if echo $ca_rel | grep -q "staging" ; then |
2674 | - add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging |
2675 | - return 0 |
2676 | - fi |
2677 | - |
2678 | - # the others are LP-external deb repos. |
2679 | - case "$ca_rel" in |
2680 | - "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; |
2681 | - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; |
2682 | - "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; |
2683 | - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; |
2684 | - *) error_out "Invalid Cloud Archive repo specified: $src" |
2685 | - esac |
2686 | - |
2687 | - apt-get -y install ubuntu-cloud-keyring |
2688 | - entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main" |
2689 | - echo "$entry" \ |
2690 | - >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list |
2691 | - return 0 |
2692 | - fi |
2693 | - |
2694 | - error_out "Invalid installation source specified in config: $src" |
2695 | - |
2696 | -} |
2697 | - |
2698 | -get_os_codename_install_source() { |
2699 | - # derive the openstack release provided by a supported installation source. |
2700 | - local rel="$1" |
2701 | - local codename="unknown" |
2702 | - . /etc/lsb-release |
2703 | - |
2704 | - # map ubuntu releases to the openstack version shipped with it. |
2705 | - if [[ "$rel" == "distro" ]] ; then |
2706 | - case "$DISTRIB_CODENAME" in |
2707 | - "oneiric") codename="diablo" ;; |
2708 | - "precise") codename="essex" ;; |
2709 | - "quantal") codename="folsom" ;; |
2710 | - "raring") codename="grizzly" ;; |
2711 | - esac |
2712 | - fi |
2713 | - |
2714 | - # derive version from cloud archive strings. |
2715 | - if [[ "${rel:0:6}" == "cloud:" ]] ; then |
2716 | - rel=$(echo $rel | cut -d: -f2) |
2717 | - local u_rel=$(echo $rel | cut -d- -f1) |
2718 | - local ca_rel=$(echo $rel | cut -d- -f2) |
2719 | - if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then |
2720 | - case "$ca_rel" in |
2721 | - "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging") |
2722 | - codename="folsom" ;; |
2723 | - "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging") |
2724 | - codename="grizzly" ;; |
2725 | - esac |
2726 | - fi |
2727 | - fi |
2728 | - |
2729 | - # have a guess based on the deb string provided |
2730 | - if [[ "${rel:0:3}" == "deb" ]] || \ |
2731 | - [[ "${rel:0:3}" == "ppa" ]] ; then |
2732 | - CODENAMES="diablo essex folsom grizzly havana" |
2733 | - for cname in $CODENAMES; do |
2734 | - if echo $rel | grep -q $cname; then |
2735 | - codename=$cname |
2736 | - fi |
2737 | - done |
2738 | - fi |
2739 | - echo $codename |
2740 | -} |
2741 | - |
2742 | -get_os_codename_package() { |
2743 | - local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" |
2744 | - pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs |
2745 | - case "${pkg_vers:0:6}" in |
2746 | - "2011.2") echo "diablo" ;; |
2747 | - "2012.1") echo "essex" ;; |
2748 | - "2012.2") echo "folsom" ;; |
2749 | - "2013.1") echo "grizzly" ;; |
2750 | - "2013.2") echo "havana" ;; |
2751 | - esac |
2752 | -} |
2753 | - |
2754 | -get_os_version_codename() { |
2755 | - case "$1" in |
2756 | - "diablo") echo "2011.2" ;; |
2757 | - "essex") echo "2012.1" ;; |
2758 | - "folsom") echo "2012.2" ;; |
2759 | - "grizzly") echo "2013.1" ;; |
2760 | - "havana") echo "2013.2" ;; |
2761 | - esac |
2762 | -} |
2763 | - |
2764 | -get_ip() { |
2765 | - dpkg -l | grep -q python-dnspython || { |
2766 | - apt-get -y install python-dnspython 2>&1 > /dev/null |
2767 | - } |
2768 | - hostname=$1 |
2769 | - python -c " |
2770 | -import dns.resolver |
2771 | -import socket |
2772 | -try: |
2773 | - # Test to see if already an IPv4 address |
2774 | - socket.inet_aton('$hostname') |
2775 | - print '$hostname' |
2776 | -except socket.error: |
2777 | - try: |
2778 | - answers = dns.resolver.query('$hostname', 'A') |
2779 | - if answers: |
2780 | - print answers[0].address |
2781 | - except dns.resolver.NXDOMAIN: |
2782 | - pass |
2783 | -" |
2784 | -} |
2785 | - |
2786 | -# Common storage routines used by cinder, nova-volume and swift-storage. |
2787 | -clean_storage() { |
2788 | - # if configured to overwrite existing storage, we unmount the block-dev |
2789 | - # if mounted and clear any previous pv signatures |
2790 | - local block_dev="$1" |
2791 | - juju-log "Cleaining storage '$block_dev'" |
2792 | - if grep -q "^$block_dev" /proc/mounts ; then |
2793 | - mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }') |
2794 | - juju-log "Unmounting $block_dev from $mp" |
2795 | - umount "$mp" || error_out "ERROR: Could not unmount storage from $mp" |
2796 | - fi |
2797 | - if pvdisplay "$block_dev" >/dev/null 2>&1 ; then |
2798 | - juju-log "Removing existing LVM PV signatures from $block_dev" |
2799 | - |
2800 | - # deactivate any volgroups that may be built on this dev |
2801 | - vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }') |
2802 | - if [[ -n "$vg" ]] ; then |
2803 | - juju-log "Deactivating existing volume group: $vg" |
2804 | - vgchange -an "$vg" || |
2805 | - error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?" |
2806 | - fi |
2807 | - echo "yes" | pvremove -ff "$block_dev" || |
2808 | - error_out "Could not pvremove $block_dev" |
2809 | - else |
2810 | - juju-log "Zapping disk of all GPT and MBR structures" |
2811 | - sgdisk --zap-all $block_dev || |
2812 | - error_out "Unable to zap $block_dev" |
2813 | - fi |
2814 | -} |
2815 | - |
2816 | -function get_block_device() { |
2817 | - # given a string, return full path to the block device for that |
2818 | - # if input is not a block device, find a loopback device |
2819 | - local input="$1" |
2820 | - |
2821 | - case "$input" in |
2822 | - /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist." |
2823 | - echo "$input"; return 0;; |
2824 | - /*) :;; |
2825 | - *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist." |
2826 | - echo "/dev/$input"; return 0;; |
2827 | - esac |
2828 | - |
2829 | - # this represents a file |
2830 | - # support "/path/to/file|5G" |
2831 | - local fpath size oifs="$IFS" |
2832 | - if [ "${input#*|}" != "${input}" ]; then |
2833 | - size=${input##*|} |
2834 | - fpath=${input%|*} |
2835 | - else |
2836 | - fpath=${input} |
2837 | - size=5G |
2838 | - fi |
2839 | - |
2840 | - ## loop devices are not namespaced. This is bad for containers. |
2841 | - ## it means that the output of 'losetup' may have the given $fpath |
2842 | - ## in it, but that may not represent this containers $fpath, but |
2843 | - ## another containers. To address that, we really need to |
2844 | - ## allow some uniq container-id to be expanded within path. |
2845 | - ## TODO: find a unique container-id that will be consistent for |
2846 | - ## this container throughout its lifetime and expand it |
2847 | - ## in the fpath. |
2848 | - # fpath=${fpath//%{id}/$THAT_ID} |
2849 | - |
2850 | - local found="" |
2851 | - # parse through 'losetup -a' output, looking for this file |
2852 | - # output is expected to look like: |
2853 | - # /dev/loop0: [0807]:961814 (/tmp/my.img) |
2854 | - found=$(losetup -a | |
2855 | - awk 'BEGIN { found=0; } |
2856 | - $3 == f { sub(/:$/,"",$1); print $1; found=found+1; } |
2857 | - END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \ |
2858 | - f="($fpath)") |
2859 | - |
2860 | - if [ $? -ne 0 ]; then |
2861 | - echo "multiple devices found for $fpath: $found" 1>&2 |
2862 | - return 1; |
2863 | - fi |
2864 | - |
2865 | - [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; } |
2866 | - |
2867 | - if [ -n "$found" ]; then |
2868 | - echo "confused, $found is not a block device for $fpath"; |
2869 | - return 1; |
2870 | - fi |
2871 | - |
2872 | - # no existing device was found, create one |
2873 | - mkdir -p "${fpath%/*}" |
2874 | - truncate --size "$size" "$fpath" || |
2875 | - { echo "failed to create $fpath of size $size"; return 1; } |
2876 | - |
2877 | - found=$(losetup --find --show "$fpath") || |
2878 | - { echo "failed to setup loop device for $fpath" 1>&2; return 1; } |
2879 | - |
2880 | - echo "$found" |
2881 | - return 0 |
2882 | -} |
2883 | - |
2884 | -HAPROXY_CFG=/etc/haproxy/haproxy.cfg |
2885 | -HAPROXY_DEFAULT=/etc/default/haproxy |
2886 | -########################################################################## |
2887 | -# Description: Configures HAProxy services for Openstack API's |
2888 | -# Parameters: |
2889 | -# Space delimited list of service:port:mode combinations for which |
2890 | -# haproxy service configuration should be generated for. The function |
2891 | -# assumes the name of the peer relation is 'cluster' and that every |
2892 | -# service unit in the peer relation is running the same services. |
2893 | -# |
2894 | -# Services that do not specify :mode in parameter will default to http. |
2895 | -# |
2896 | -# Example |
2897 | -# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http |
2898 | -########################################################################## |
2899 | -configure_haproxy() { |
2900 | - local address=`unit-get private-address` |
2901 | - local name=${JUJU_UNIT_NAME////-} |
2902 | - cat > $HAPROXY_CFG << EOF |
2903 | -global |
2904 | - log 127.0.0.1 local0 |
2905 | - log 127.0.0.1 local1 notice |
2906 | - maxconn 20000 |
2907 | - user haproxy |
2908 | - group haproxy |
2909 | - spread-checks 0 |
2910 | - |
2911 | -defaults |
2912 | - log global |
2913 | - mode http |
2914 | - option httplog |
2915 | - option dontlognull |
2916 | - retries 3 |
2917 | - timeout queue 1000 |
2918 | - timeout connect 1000 |
2919 | - timeout client 30000 |
2920 | - timeout server 30000 |
2921 | - |
2922 | -listen stats :8888 |
2923 | - mode http |
2924 | - stats enable |
2925 | - stats hide-version |
2926 | - stats realm Haproxy\ Statistics |
2927 | - stats uri / |
2928 | - stats auth admin:password |
2929 | - |
2930 | -EOF |
2931 | - for service in $@; do |
2932 | - local service_name=$(echo $service | cut -d : -f 1) |
2933 | - local haproxy_listen_port=$(echo $service | cut -d : -f 2) |
2934 | - local api_listen_port=$(echo $service | cut -d : -f 3) |
2935 | - local mode=$(echo $service | cut -d : -f 4) |
2936 | - [[ -z "$mode" ]] && mode="http" |
2937 | - juju-log "Adding haproxy configuration entry for $service "\ |
2938 | - "($haproxy_listen_port -> $api_listen_port)" |
2939 | - cat >> $HAPROXY_CFG << EOF |
2940 | -listen $service_name 0.0.0.0:$haproxy_listen_port |
2941 | - balance roundrobin |
2942 | - mode $mode |
2943 | - option ${mode}log |
2944 | - server $name $address:$api_listen_port check |
2945 | -EOF |
2946 | - local r_id="" |
2947 | - local unit="" |
2948 | - for r_id in `relation-ids cluster`; do |
2949 | - for unit in `relation-list -r $r_id`; do |
2950 | - local unit_name=${unit////-} |
2951 | - local unit_address=`relation-get -r $r_id private-address $unit` |
2952 | - if [ -n "$unit_address" ]; then |
2953 | - echo " server $unit_name $unit_address:$api_listen_port check" \ |
2954 | - >> $HAPROXY_CFG |
2955 | - fi |
2956 | - done |
2957 | - done |
2958 | - done |
2959 | - echo "ENABLED=1" > $HAPROXY_DEFAULT |
2960 | - service haproxy restart |
2961 | -} |
2962 | - |
2963 | -########################################################################## |
2964 | -# Description: Query HA interface to determine is cluster is configured |
2965 | -# Returns: 0 if configured, 1 if not configured |
2966 | -########################################################################## |
2967 | -is_clustered() { |
2968 | - local r_id="" |
2969 | - local unit="" |
2970 | - for r_id in $(relation-ids ha); do |
2971 | - if [ -n "$r_id" ]; then |
2972 | - for unit in $(relation-list -r $r_id); do |
2973 | - clustered=$(relation-get -r $r_id clustered $unit) |
2974 | - if [ -n "$clustered" ]; then |
2975 | - juju-log "Unit is haclustered" |
2976 | - return 0 |
2977 | - fi |
2978 | - done |
2979 | - fi |
2980 | - done |
2981 | - juju-log "Unit is not haclustered" |
2982 | - return 1 |
2983 | -} |
2984 | - |
2985 | -########################################################################## |
2986 | -# Description: Return a list of all peers in cluster relations |
2987 | -########################################################################## |
2988 | -peer_units() { |
2989 | - local peers="" |
2990 | - local r_id="" |
2991 | - for r_id in $(relation-ids cluster); do |
2992 | - peers="$peers $(relation-list -r $r_id)" |
2993 | - done |
2994 | - echo $peers |
2995 | -} |
2996 | - |
2997 | -########################################################################## |
2998 | -# Description: Determines whether the current unit is the oldest of all |
2999 | -# its peers - supports partial leader election |
3000 | -# Returns: 0 if oldest, 1 if not |
3001 | -########################################################################## |
3002 | -oldest_peer() { |
3003 | - peers=$1 |
3004 | - local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2) |
3005 | - for peer in $peers; do |
3006 | - echo "Comparing $JUJU_UNIT_NAME with peers: $peers" |
3007 | - local r_unit_no=$(echo $peer | cut -d / -f 2) |
3008 | - if (($r_unit_no<$l_unit_no)); then |
3009 | - juju-log "Not oldest peer; deferring" |
3010 | - return 1 |
3011 | - fi |
3012 | - done |
3013 | - juju-log "Oldest peer; might take charge?" |
3014 | - return 0 |
3015 | -} |
3016 | - |
3017 | -########################################################################## |
3018 | -# Description: Determines whether the current service units is the |
3019 | -# leader within a) a cluster of its peers or b) across a |
3020 | -# set of unclustered peers. |
3021 | -# Parameters: CRM resource to check ownership of if clustered |
3022 | -# Returns: 0 if leader, 1 if not |
3023 | -########################################################################## |
3024 | -eligible_leader() { |
3025 | - if is_clustered; then |
3026 | - if ! is_leader $1; then |
3027 | - juju-log 'Deferring action to CRM leader' |
3028 | - return 1 |
3029 | - fi |
3030 | - else |
3031 | - peers=$(peer_units) |
3032 | - if [ -n "$peers" ] && ! oldest_peer "$peers"; then |
3033 | - juju-log 'Deferring action to oldest service unit.' |
3034 | - return 1 |
3035 | - fi |
3036 | - fi |
3037 | - return 0 |
3038 | -} |
3039 | - |
3040 | -########################################################################## |
3041 | -# Description: Query Cluster peer interface to see if peered |
3042 | -# Returns: 0 if peered, 1 if not peered |
3043 | -########################################################################## |
3044 | -is_peered() { |
3045 | - local r_id=$(relation-ids cluster) |
3046 | - if [ -n "$r_id" ]; then |
3047 | - if [ -n "$(relation-list -r $r_id)" ]; then |
3048 | - juju-log "Unit peered" |
3049 | - return 0 |
3050 | - fi |
3051 | - fi |
3052 | - juju-log "Unit not peered" |
3053 | - return 1 |
3054 | -} |
3055 | - |
3056 | -########################################################################## |
3057 | -# Description: Determines whether host is owner of clustered services |
3058 | -# Parameters: Name of CRM resource to check ownership of |
3059 | -# Returns: 0 if leader, 1 if not leader |
3060 | -########################################################################## |
3061 | -is_leader() { |
3062 | - hostname=`hostname` |
3063 | - if [ -x /usr/sbin/crm ]; then |
3064 | - if crm resource show $1 | grep -q $hostname; then |
3065 | - juju-log "$hostname is cluster leader." |
3066 | - return 0 |
3067 | - fi |
3068 | - fi |
3069 | - juju-log "$hostname is not cluster leader." |
3070 | - return 1 |
3071 | -} |
3072 | - |
3073 | -########################################################################## |
3074 | -# Description: Determines whether enough data has been provided in |
3075 | -# configuration or relation data to configure HTTPS. |
3076 | -# Parameters: None |
3077 | -# Returns: 0 if HTTPS can be configured, 1 if not. |
3078 | -########################################################################## |
3079 | -https() { |
3080 | - local r_id="" |
3081 | - if [[ -n "$(config-get ssl_cert)" ]] && |
3082 | - [[ -n "$(config-get ssl_key)" ]] ; then |
3083 | - return 0 |
3084 | - fi |
3085 | - for r_id in $(relation-ids identity-service) ; do |
3086 | - for unit in $(relation-list -r $r_id) ; do |
3087 | - if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] && |
3088 | - [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] && |
3089 | - [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] && |
3090 | - [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then |
3091 | - return 0 |
3092 | - fi |
3093 | - done |
3094 | - done |
3095 | - return 1 |
3096 | -} |
3097 | - |
3098 | -########################################################################## |
3099 | -# Description: For a given number of port mappings, configures apache2 |
3100 | -# HTTPs local reverse proxying using certficates and keys provided in |
3101 | -# either configuration data (preferred) or relation data. Assumes ports |
3102 | -# are not in use (calling charm should ensure that). |
3103 | -# Parameters: Variable number of proxy port mappings as |
3104 | -# $internal:$external. |
3105 | -# Returns: 0 if reverse proxy(s) have been configured, 0 if not. |
3106 | -########################################################################## |
3107 | -enable_https() { |
3108 | - local port_maps="$@" |
3109 | - local http_restart="" |
3110 | - juju-log "Enabling HTTPS for port mappings: $port_maps." |
3111 | - |
3112 | - # allow overriding of keystone provided certs with those set manually |
3113 | - # in config. |
3114 | - local cert=$(config-get ssl_cert) |
3115 | - local key=$(config-get ssl_key) |
3116 | - local ca_cert="" |
3117 | - if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then |
3118 | - juju-log "Inspecting identity-service relations for SSL certificate." |
3119 | - local r_id="" |
3120 | - cert="" |
3121 | - key="" |
3122 | - ca_cert="" |
3123 | - for r_id in $(relation-ids identity-service) ; do |
3124 | - for unit in $(relation-list -r $r_id) ; do |
3125 | - [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)" |
3126 | - [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)" |
3127 | - [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)" |
3128 | - done |
3129 | - done |
3130 | - [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di) |
3131 | - [[ -n "$key" ]] && key=$(echo $key | base64 -di) |
3132 | - [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di) |
3133 | - else |
3134 | - juju-log "Using SSL certificate provided in service config." |
3135 | - fi |
3136 | - |
3137 | - [[ -z "$cert" ]] || [[ -z "$key" ]] && |
3138 | - juju-log "Expected but could not find SSL certificate data, not "\ |
3139 | - "configuring HTTPS!" && return 1 |
3140 | - |
3141 | - apt-get -y install apache2 |
3142 | - a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" && |
3143 | - http_restart=1 |
3144 | - |
3145 | - mkdir -p /etc/apache2/ssl/$CHARM |
3146 | - echo "$cert" >/etc/apache2/ssl/$CHARM/cert |
3147 | - echo "$key" >/etc/apache2/ssl/$CHARM/key |
3148 | - if [[ -n "$ca_cert" ]] ; then |
3149 | - juju-log "Installing Keystone supplied CA cert." |
3150 | - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt |
3151 | - update-ca-certificates --fresh |
3152 | - |
3153 | - # XXX TODO: Find a better way of exporting this? |
3154 | - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then |
3155 | - [[ -e /var/www/keystone_juju_ca_cert.crt ]] && |
3156 | - rm -rf /var/www/keystone_juju_ca_cert.crt |
3157 | - ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \ |
3158 | - /var/www/keystone_juju_ca_cert.crt |
3159 | - fi |
3160 | - |
3161 | - fi |
3162 | - for port_map in $port_maps ; do |
3163 | - local ext_port=$(echo $port_map | cut -d: -f1) |
3164 | - local int_port=$(echo $port_map | cut -d: -f2) |
3165 | - juju-log "Creating apache2 reverse proxy vhost for $port_map." |
3166 | - cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END |
3167 | -Listen $ext_port |
3168 | -NameVirtualHost *:$ext_port |
3169 | -<VirtualHost *:$ext_port> |
3170 | - ServerName $(unit-get private-address) |
3171 | - SSLEngine on |
3172 | - SSLCertificateFile /etc/apache2/ssl/$CHARM/cert |
3173 | - SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key |
3174 | - ProxyPass / http://localhost:$int_port/ |
3175 | - ProxyPassReverse / http://localhost:$int_port/ |
3176 | - ProxyPreserveHost on |
3177 | -</VirtualHost> |
3178 | -<Proxy *> |
3179 | - Order deny,allow |
3180 | - Allow from all |
3181 | -</Proxy> |
3182 | -<Location /> |
3183 | - Order allow,deny |
3184 | - Allow from all |
3185 | -</Location> |
3186 | -END |
3187 | - a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && |
3188 | - http_restart=1 |
3189 | - done |
3190 | - if [[ -n "$http_restart" ]] ; then |
3191 | - service apache2 restart |
3192 | - fi |
3193 | -} |
3194 | - |
3195 | -########################################################################## |
3196 | -# Description: Ensure HTTPS reverse proxying is disabled for given port |
3197 | -# mappings. |
3198 | -# Parameters: Variable number of proxy port mappings as |
3199 | -# $internal:$external. |
3200 | -# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error. |
3201 | -########################################################################## |
3202 | -disable_https() { |
3203 | - local port_maps="$@" |
3204 | - local http_restart="" |
3205 | - juju-log "Ensuring HTTPS disabled for $port_maps." |
3206 | - ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0 |
3207 | - for port_map in $port_maps ; do |
3208 | - local ext_port=$(echo $port_map | cut -d: -f1) |
3209 | - local int_port=$(echo $port_map | cut -d: -f2) |
3210 | - if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then |
3211 | - juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map." |
3212 | - a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && |
3213 | - http_restart=1 |
3214 | - fi |
3215 | - done |
3216 | - if [[ -n "$http_restart" ]] ; then |
3217 | - service apache2 restart |
3218 | - fi |
3219 | -} |
3220 | - |
3221 | - |
3222 | -########################################################################## |
3223 | -# Description: Ensures HTTPS is either enabled or disabled for given port |
3224 | -# mapping. |
3225 | -# Parameters: Variable number of proxy port mappings as |
3226 | -# $internal:$external. |
3227 | -# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not. |
3228 | -########################################################################## |
3229 | -setup_https() { |
3230 | - # configure https via apache reverse proxying either |
3231 | - # using certs provided by config or keystone. |
3232 | - [[ -z "$CHARM" ]] && |
3233 | - error_out "setup_https(): CHARM not set." |
3234 | - if ! https ; then |
3235 | - disable_https $@ |
3236 | - else |
3237 | - enable_https $@ |
3238 | - fi |
3239 | -} |
3240 | - |
3241 | -########################################################################## |
3242 | -# Description: Determine correct API server listening port based on |
3243 | -# existence of HTTPS reverse proxy and/or haproxy. |
3244 | -# Paremeters: The standard public port for given service. |
3245 | -# Returns: The correct listening port for API service. |
3246 | -########################################################################## |
3247 | -determine_api_port() { |
3248 | - local public_port="$1" |
3249 | - local i=0 |
3250 | - ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1] |
3251 | - https >/dev/null 2>&1 && i=$[$i + 1] |
3252 | - echo $[$public_port - $[$i * 10]] |
3253 | -} |
3254 | - |
3255 | -########################################################################## |
3256 | -# Description: Determine correct proxy listening port based on public IP + |
3257 | -# existence of HTTPS reverse proxy. |
3258 | -# Paremeters: The standard public port for given service. |
3259 | -# Returns: The correct listening port for haproxy service public address. |
3260 | -########################################################################## |
3261 | -determine_haproxy_port() { |
3262 | - local public_port="$1" |
3263 | - local i=0 |
3264 | - https >/dev/null 2>&1 && i=$[$i + 1] |
3265 | - echo $[$public_port - $[$i * 10]] |
3266 | -} |
3267 | - |
3268 | -########################################################################## |
3269 | -# Description: Print the value for a given config option in an OpenStack |
3270 | -# .ini style configuration file. |
3271 | -# Parameters: File path, option to retrieve, optional |
3272 | -# section name (default=DEFAULT) |
3273 | -# Returns: Prints value if set, prints nothing otherwise. |
3274 | -########################################################################## |
3275 | -local_config_get() { |
3276 | - # return config values set in openstack .ini config files. |
3277 | - # default placeholders starting (eg, %AUTH_HOST%) treated as |
3278 | - # unset values. |
3279 | - local file="$1" |
3280 | - local option="$2" |
3281 | - local section="$3" |
3282 | - [[ -z "$section" ]] && section="DEFAULT" |
3283 | - python -c " |
3284 | -import ConfigParser |
3285 | -config = ConfigParser.RawConfigParser() |
3286 | -config.read('$file') |
3287 | -try: |
3288 | - value = config.get('$section', '$option') |
3289 | -except: |
3290 | - print '' |
3291 | - exit(0) |
3292 | -if value.startswith('%'): exit(0) |
3293 | -print value |
3294 | -" |
3295 | -} |
3296 | - |
3297 | -########################################################################## |
3298 | -# Description: Creates an rc file exporting environment variables to a |
3299 | -# script_path local to the charm's installed directory. |
3300 | -# Any charm scripts run outside the juju hook environment can source this |
3301 | -# scriptrc to obtain updated config information necessary to perform health |
3302 | -# checks or service changes |
3303 | -# |
3304 | -# Parameters: |
3305 | -# An array of '=' delimited ENV_VAR:value combinations to export. |
3306 | -# If optional script_path key is not provided in the array, script_path |
3307 | -# defaults to scripts/scriptrc |
3308 | -########################################################################## |
3309 | -function save_script_rc { |
3310 | - if [ ! -n "$JUJU_UNIT_NAME" ]; then |
3311 | - echo "Error: Missing JUJU_UNIT_NAME environment variable" |
3312 | - exit 1 |
3313 | - fi |
3314 | - # our default unit_path |
3315 | - unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/scripts/scriptrc" |
3316 | - echo $unit_path |
3317 | - tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc" |
3318 | - |
3319 | - echo "#!/bin/bash" > $tmp_rc |
3320 | - for env_var in "${@}" |
3321 | - do |
3322 | - if `echo $env_var | grep -q script_path`; then |
3323 | - # well then we need to reset the new unit-local script path |
3324 | - unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/${env_var/script_path=/}" |
3325 | - else |
3326 | - echo "export $env_var" >> $tmp_rc |
3327 | - fi |
3328 | - done |
3329 | - chmod 755 $tmp_rc |
3330 | - mv $tmp_rc $unit_path |
3331 | -} |
3332 | |
3333 | === removed file 'hooks/charmhelpers/contrib/openstack/openstack_utils.py' |
3334 | --- hooks/charmhelpers/contrib/openstack/openstack_utils.py 2013-06-07 09:39:50 +0000 |
3335 | +++ hooks/charmhelpers/contrib/openstack/openstack_utils.py 1970-01-01 00:00:00 +0000 |
3336 | @@ -1,228 +0,0 @@ |
3337 | -#!/usr/bin/python |
3338 | - |
3339 | -# Common python helper functions used for OpenStack charms. |
3340 | - |
3341 | -import apt_pkg as apt |
3342 | -import subprocess |
3343 | -import os |
3344 | -import sys |
3345 | - |
3346 | -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" |
3347 | -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' |
3348 | - |
3349 | -ubuntu_openstack_release = { |
3350 | - 'oneiric': 'diablo', |
3351 | - 'precise': 'essex', |
3352 | - 'quantal': 'folsom', |
3353 | - 'raring': 'grizzly', |
3354 | -} |
3355 | - |
3356 | - |
3357 | -openstack_codenames = { |
3358 | - '2011.2': 'diablo', |
3359 | - '2012.1': 'essex', |
3360 | - '2012.2': 'folsom', |
3361 | - '2013.1': 'grizzly', |
3362 | - '2013.2': 'havana', |
3363 | -} |
3364 | - |
3365 | -# The ugly duckling |
3366 | -swift_codenames = { |
3367 | - '1.4.3': 'diablo', |
3368 | - '1.4.8': 'essex', |
3369 | - '1.7.4': 'folsom', |
3370 | - '1.7.6': 'grizzly', |
3371 | - '1.7.7': 'grizzly', |
3372 | - '1.8.0': 'grizzly', |
3373 | -} |
3374 | - |
3375 | - |
3376 | -def juju_log(msg): |
3377 | - subprocess.check_call(['juju-log', msg]) |
3378 | - |
3379 | - |
3380 | -def error_out(msg): |
3381 | - juju_log("FATAL ERROR: %s" % msg) |
3382 | - sys.exit(1) |
3383 | - |
3384 | - |
3385 | -def lsb_release(): |
3386 | - '''Return /etc/lsb-release in a dict''' |
3387 | - lsb = open('/etc/lsb-release', 'r') |
3388 | - d = {} |
3389 | - for l in lsb: |
3390 | - k, v = l.split('=') |
3391 | - d[k.strip()] = v.strip() |
3392 | - return d |
3393 | - |
3394 | - |
3395 | -def get_os_codename_install_source(src): |
3396 | - '''Derive OpenStack release codename from a given installation source.''' |
3397 | - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
3398 | - |
3399 | - rel = '' |
3400 | - if src == 'distro': |
3401 | - try: |
3402 | - rel = ubuntu_openstack_release[ubuntu_rel] |
3403 | - except KeyError: |
3404 | - e = 'Could not derive openstack release for '\ |
3405 | - 'this Ubuntu release: %s' % ubuntu_rel |
3406 | - error_out(e) |
3407 | - return rel |
3408 | - |
3409 | - if src.startswith('cloud:'): |
3410 | - ca_rel = src.split(':')[1] |
3411 | - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] |
3412 | - return ca_rel |
3413 | - |
3414 | - # Best guess match based on deb string provided |
3415 | - if src.startswith('deb') or src.startswith('ppa'): |
3416 | - for k, v in openstack_codenames.iteritems(): |
3417 | - if v in src: |
3418 | - return v |
3419 | - |
3420 | - |
3421 | -def get_os_codename_version(vers): |
3422 | - '''Determine OpenStack codename from version number.''' |
3423 | - try: |
3424 | - return openstack_codenames[vers] |
3425 | - except KeyError: |
3426 | - e = 'Could not determine OpenStack codename for version %s' % vers |
3427 | - error_out(e) |
3428 | - |
3429 | - |
3430 | -def get_os_version_codename(codename): |
3431 | - '''Determine OpenStack version number from codename.''' |
3432 | - for k, v in openstack_codenames.iteritems(): |
3433 | - if v == codename: |
3434 | - return k |
3435 | - e = 'Could not derive OpenStack version for '\ |
3436 | - 'codename: %s' % codename |
3437 | - error_out(e) |
3438 | - |
3439 | - |
3440 | -def get_os_codename_package(pkg): |
3441 | - '''Derive OpenStack release codename from an installed package.''' |
3442 | - apt.init() |
3443 | - cache = apt.Cache() |
3444 | - |
3445 | - try: |
3446 | - pkg = cache[pkg] |
3447 | - except: |
3448 | - e = 'Could not determine version of installed package: %s' % pkg |
3449 | - error_out(e) |
3450 | - |
3451 | - vers = apt.UpstreamVersion(pkg.current_ver.ver_str) |
3452 | - |
3453 | - try: |
3454 | - if 'swift' in pkg.name: |
3455 | - vers = vers[:5] |
3456 | - return swift_codenames[vers] |
3457 | - else: |
3458 | - vers = vers[:6] |
3459 | - return openstack_codenames[vers] |
3460 | - except KeyError: |
3461 | - e = 'Could not determine OpenStack codename for version %s' % vers |
3462 | - error_out(e) |
3463 | - |
3464 | - |
3465 | -def get_os_version_package(pkg): |
3466 | - '''Derive OpenStack version number from an installed package.''' |
3467 | - codename = get_os_codename_package(pkg) |
3468 | - |
3469 | - if 'swift' in pkg: |
3470 | - vers_map = swift_codenames |
3471 | - else: |
3472 | - vers_map = openstack_codenames |
3473 | - |
3474 | - for version, cname in vers_map.iteritems(): |
3475 | - if cname == codename: |
3476 | - return version |
3477 | - #e = "Could not determine OpenStack version for package: %s" % pkg |
3478 | - #error_out(e) |
3479 | - |
3480 | -def import_key(keyid): |
3481 | - cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ |
3482 | - "--recv-keys %s" % keyid |
3483 | - try: |
3484 | - subprocess.check_call(cmd.split(' ')) |
3485 | - except subprocess.CalledProcessError: |
3486 | - error_out("Error importing repo key %s" % keyid) |
3487 | - |
3488 | -def configure_installation_source(rel): |
3489 | - '''Configure apt installation source.''' |
3490 | - if rel == 'distro': |
3491 | - return |
3492 | - elif rel[:4] == "ppa:": |
3493 | - src = rel |
3494 | - subprocess.check_call(["add-apt-repository", "-y", src]) |
3495 | - elif rel[:3] == "deb": |
3496 | - l = len(rel.split('|')) |
3497 | - if l == 2: |
3498 | - src, key = rel.split('|') |
3499 | - juju_log("Importing PPA key from keyserver for %s" % src) |
3500 | - import_key(key) |
3501 | - elif l == 1: |
3502 | - src = rel |
3503 | - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: |
3504 | - f.write(src) |
3505 | - elif rel[:6] == 'cloud:': |
3506 | - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
3507 | - rel = rel.split(':')[1] |
3508 | - u_rel = rel.split('-')[0] |
3509 | - ca_rel = rel.split('-')[1] |
3510 | - |
3511 | - if u_rel != ubuntu_rel: |
3512 | - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ |
3513 | - 'version (%s)' % (ca_rel, ubuntu_rel) |
3514 | - error_out(e) |
3515 | - |
3516 | - if 'staging' in ca_rel: |
3517 | - # staging is just a regular PPA. |
3518 | - os_rel = ca_rel.split('/')[0] |
3519 | - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel |
3520 | - cmd = 'add-apt-repository -y %s' % ppa |
3521 | - subprocess.check_call(cmd.split(' ')) |
3522 | - return |
3523 | - |
3524 | - # map charm config options to actual archive pockets. |
3525 | - pockets = { |
3526 | - 'folsom': 'precise-updates/folsom', |
3527 | - 'folsom/updates': 'precise-updates/folsom', |
3528 | - 'folsom/proposed': 'precise-proposed/folsom', |
3529 | - 'grizzly': 'precise-updates/grizzly', |
3530 | - 'grizzly/updates': 'precise-updates/grizzly', |
3531 | - 'grizzly/proposed': 'precise-proposed/grizzly' |
3532 | - } |
3533 | - |
3534 | - try: |
3535 | - pocket = pockets[ca_rel] |
3536 | - except KeyError: |
3537 | - e = 'Invalid Cloud Archive release specified: %s' % rel |
3538 | - error_out(e) |
3539 | - |
3540 | - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) |
3541 | - # TODO: Replace key import with cloud archive keyring pkg. |
3542 | - import_key(CLOUD_ARCHIVE_KEY_ID) |
3543 | - |
3544 | - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: |
3545 | - f.write(src) |
3546 | - else: |
3547 | - error_out("Invalid openstack-release specified: %s" % rel) |
3548 | - |
3549 | - |
3550 | -def save_script_rc(script_path="scripts/scriptrc", **env_vars): |
3551 | - """ |
3552 | - Write an rc file in the charm-delivered directory containing |
3553 | - exported environment variables provided by env_vars. Any charm scripts run |
3554 | - outside the juju hook environment can source this scriptrc to obtain |
3555 | - updated config information necessary to perform health checks or |
3556 | - service changes. |
3557 | - """ |
3558 | - unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') |
3559 | - juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) |
3560 | - with open(juju_rc_path, 'wb') as rc_script: |
3561 | - rc_script.write( |
3562 | - "#!/bin/bash\n") |
3563 | - [rc_script.write('export %s=%s\n' % (u, p)) |
3564 | - for u, p in env_vars.iteritems() if u != "script_path"] |
3565 | |
3566 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
3567 | --- hooks/charmhelpers/core/hookenv.py 2013-06-07 09:39:50 +0000 |
3568 | +++ hooks/charmhelpers/core/hookenv.py 2013-11-05 18:43:49 +0000 |
3569 | @@ -9,6 +9,7 @@ |
3570 | import yaml |
3571 | import subprocess |
3572 | import UserDict |
3573 | +from subprocess import CalledProcessError |
3574 | |
3575 | CRITICAL = "CRITICAL" |
3576 | ERROR = "ERROR" |
3577 | @@ -17,9 +18,47 @@ |
3578 | DEBUG = "DEBUG" |
3579 | MARKER = object() |
3580 | |
3581 | +cache = {} |
3582 | + |
3583 | + |
3584 | +def cached(func): |
3585 | + """Cache return values for multiple executions of func + args |
3586 | + |
3587 | + For example: |
3588 | + |
3589 | + @cached |
3590 | + def unit_get(attribute): |
3591 | + pass |
3592 | + |
3593 | + unit_get('test') |
3594 | + |
3595 | + will cache the result of unit_get + 'test' for future calls. |
3596 | + """ |
3597 | + def wrapper(*args, **kwargs): |
3598 | + global cache |
3599 | + key = str((func, args, kwargs)) |
3600 | + try: |
3601 | + return cache[key] |
3602 | + except KeyError: |
3603 | + res = func(*args, **kwargs) |
3604 | + cache[key] = res |
3605 | + return res |
3606 | + return wrapper |
3607 | + |
3608 | + |
3609 | +def flush(key): |
3610 | + """Flushes any entries from function cache where the |
3611 | + key is found in the function+args """ |
3612 | + flush_list = [] |
3613 | + for item in cache: |
3614 | + if key in item: |
3615 | + flush_list.append(item) |
3616 | + for item in flush_list: |
3617 | + del cache[item] |
3618 | + |
3619 | |
3620 | def log(message, level=None): |
3621 | - "Write a message to the juju log" |
3622 | + """Write a message to the juju log""" |
3623 | command = ['juju-log'] |
3624 | if level: |
3625 | command += ['-l', level] |
3626 | @@ -28,7 +67,7 @@ |
3627 | |
3628 | |
3629 | class Serializable(UserDict.IterableUserDict): |
3630 | - "Wrapper, an object that can be serialized to yaml or json" |
3631 | + """Wrapper, an object that can be serialized to yaml or json""" |
3632 | |
3633 | def __init__(self, obj): |
3634 | # wrap the object |
3635 | @@ -49,12 +88,20 @@ |
3636 | except KeyError: |
3637 | raise AttributeError(attr) |
3638 | |
3639 | + def __getstate__(self): |
3640 | + # Pickle as a standard dictionary. |
3641 | + return self.data |
3642 | + |
3643 | + def __setstate__(self, state): |
3644 | + # Unpickle into our wrapper. |
3645 | + self.data = state |
3646 | + |
3647 | def json(self): |
3648 | - "Serialize the object to json" |
3649 | + """Serialize the object to json""" |
3650 | return json.dumps(self.data) |
3651 | |
3652 | def yaml(self): |
3653 | - "Serialize the object to yaml" |
3654 | + """Serialize the object to yaml""" |
3655 | return yaml.dump(self.data) |
3656 | |
3657 | |
3658 | @@ -62,55 +109,62 @@ |
3659 | """A convenient bundling of the current execution context""" |
3660 | context = {} |
3661 | context['conf'] = config() |
3662 | - context['reltype'] = relation_type() |
3663 | - context['relid'] = relation_id() |
3664 | + if relation_id(): |
3665 | + context['reltype'] = relation_type() |
3666 | + context['relid'] = relation_id() |
3667 | + context['rel'] = relation_get() |
3668 | context['unit'] = local_unit() |
3669 | context['rels'] = relations() |
3670 | - context['rel'] = relation_get() |
3671 | context['env'] = os.environ |
3672 | return context |
3673 | |
3674 | |
3675 | def in_relation_hook(): |
3676 | - "Determine whether we're running in a relation hook" |
3677 | + """Determine whether we're running in a relation hook""" |
3678 | return 'JUJU_RELATION' in os.environ |
3679 | |
3680 | |
3681 | def relation_type(): |
3682 | - "The scope for the current relation hook" |
3683 | + """The scope for the current relation hook""" |
3684 | return os.environ.get('JUJU_RELATION', None) |
3685 | |
3686 | |
3687 | def relation_id(): |
3688 | - "The relation ID for the current relation hook" |
3689 | + """The relation ID for the current relation hook""" |
3690 | return os.environ.get('JUJU_RELATION_ID', None) |
3691 | |
3692 | |
3693 | def local_unit(): |
3694 | - "Local unit ID" |
3695 | + """Local unit ID""" |
3696 | return os.environ['JUJU_UNIT_NAME'] |
3697 | |
3698 | |
3699 | def remote_unit(): |
3700 | - "The remote unit for the current relation hook" |
3701 | + """The remote unit for the current relation hook""" |
3702 | return os.environ['JUJU_REMOTE_UNIT'] |
3703 | |
3704 | |
3705 | +def service_name(): |
3706 | + """The name service group this unit belongs to""" |
3707 | + return local_unit().split('/')[0] |
3708 | + |
3709 | + |
3710 | +@cached |
3711 | def config(scope=None): |
3712 | - "Juju charm configuration" |
3713 | + """Juju charm configuration""" |
3714 | config_cmd_line = ['config-get'] |
3715 | if scope is not None: |
3716 | config_cmd_line.append(scope) |
3717 | config_cmd_line.append('--format=json') |
3718 | try: |
3719 | - config_data = json.loads(subprocess.check_output(config_cmd_line)) |
3720 | - except (ValueError, OSError, subprocess.CalledProcessError) as err: |
3721 | - log(str(err), level=ERROR) |
3722 | - raise |
3723 | - return Serializable(config_data) |
3724 | - |
3725 | - |
3726 | + return json.loads(subprocess.check_output(config_cmd_line)) |
3727 | + except ValueError: |
3728 | + return None |
3729 | + |
3730 | + |
3731 | +@cached |
3732 | def relation_get(attribute=None, unit=None, rid=None): |
3733 | + """Get relation information""" |
3734 | _args = ['relation-get', '--format=json'] |
3735 | if rid: |
3736 | _args.append('-r') |
3737 | @@ -122,51 +176,63 @@ |
3738 | return json.loads(subprocess.check_output(_args)) |
3739 | except ValueError: |
3740 | return None |
3741 | + except CalledProcessError, e: |
3742 | + if e.returncode == 2: |
3743 | + return None |
3744 | + raise |
3745 | |
3746 | |
3747 | def relation_set(relation_id=None, relation_settings={}, **kwargs): |
3748 | + """Set relation information for the current unit""" |
3749 | relation_cmd_line = ['relation-set'] |
3750 | if relation_id is not None: |
3751 | relation_cmd_line.extend(('-r', relation_id)) |
3752 | - for k, v in relation_settings.items(): |
3753 | - relation_cmd_line.append('{}={}'.format(k, v)) |
3754 | - for k, v in kwargs.items(): |
3755 | - relation_cmd_line.append('{}={}'.format(k, v)) |
3756 | + for k, v in (relation_settings.items() + kwargs.items()): |
3757 | + if v is None: |
3758 | + relation_cmd_line.append('{}='.format(k)) |
3759 | + else: |
3760 | + relation_cmd_line.append('{}={}'.format(k, v)) |
3761 | subprocess.check_call(relation_cmd_line) |
3762 | - |
3763 | - |
3764 | + # Flush cache of any relation-gets for local unit |
3765 | + flush(local_unit()) |
3766 | + |
3767 | + |
3768 | +@cached |
3769 | def relation_ids(reltype=None): |
3770 | - "A list of relation_ids" |
3771 | + """A list of relation_ids""" |
3772 | reltype = reltype or relation_type() |
3773 | relid_cmd_line = ['relation-ids', '--format=json'] |
3774 | if reltype is not None: |
3775 | relid_cmd_line.append(reltype) |
3776 | - return json.loads(subprocess.check_output(relid_cmd_line)) |
3777 | + return json.loads(subprocess.check_output(relid_cmd_line)) or [] |
3778 | return [] |
3779 | |
3780 | |
3781 | +@cached |
3782 | def related_units(relid=None): |
3783 | - "A list of related units" |
3784 | + """A list of related units""" |
3785 | relid = relid or relation_id() |
3786 | units_cmd_line = ['relation-list', '--format=json'] |
3787 | if relid is not None: |
3788 | units_cmd_line.extend(('-r', relid)) |
3789 | - return json.loads(subprocess.check_output(units_cmd_line)) |
3790 | - |
3791 | - |
3792 | + return json.loads(subprocess.check_output(units_cmd_line)) or [] |
3793 | + |
3794 | + |
3795 | +@cached |
3796 | def relation_for_unit(unit=None, rid=None): |
3797 | - "Get the json represenation of a unit's relation" |
3798 | + """Get the json represenation of a unit's relation""" |
3799 | unit = unit or remote_unit() |
3800 | relation = relation_get(unit=unit, rid=rid) |
3801 | for key in relation: |
3802 | if key.endswith('-list'): |
3803 | relation[key] = relation[key].split() |
3804 | relation['__unit__'] = unit |
3805 | - return Serializable(relation) |
3806 | - |
3807 | - |
3808 | + return relation |
3809 | + |
3810 | + |
3811 | +@cached |
3812 | def relations_for_id(relid=None): |
3813 | - "Get relations of a specific relation ID" |
3814 | + """Get relations of a specific relation ID""" |
3815 | relation_data = [] |
3816 | relid = relid or relation_ids() |
3817 | for unit in related_units(relid): |
3818 | @@ -176,8 +242,9 @@ |
3819 | return relation_data |
3820 | |
3821 | |
3822 | +@cached |
3823 | def relations_of_type(reltype=None): |
3824 | - "Get relations of a specific type" |
3825 | + """Get relations of a specific type""" |
3826 | relation_data = [] |
3827 | reltype = reltype or relation_type() |
3828 | for relid in relation_ids(reltype): |
3829 | @@ -187,13 +254,14 @@ |
3830 | return relation_data |
3831 | |
3832 | |
3833 | +@cached |
3834 | def relation_types(): |
3835 | - "Get a list of relation types supported by this charm" |
3836 | + """Get a list of relation types supported by this charm""" |
3837 | charmdir = os.environ.get('CHARM_DIR', '') |
3838 | mdf = open(os.path.join(charmdir, 'metadata.yaml')) |
3839 | md = yaml.safe_load(mdf) |
3840 | rel_types = [] |
3841 | - for key in ('provides','requires','peers'): |
3842 | + for key in ('provides', 'requires', 'peers'): |
3843 | section = md.get(key) |
3844 | if section: |
3845 | rel_types.extend(section.keys()) |
3846 | @@ -201,12 +269,14 @@ |
3847 | return rel_types |
3848 | |
3849 | |
3850 | +@cached |
3851 | def relations(): |
3852 | + """Get a nested dictionary of relation data for all related units""" |
3853 | rels = {} |
3854 | for reltype in relation_types(): |
3855 | relids = {} |
3856 | for relid in relation_ids(reltype): |
3857 | - units = {} |
3858 | + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} |
3859 | for unit in related_units(relid): |
3860 | reldata = relation_get(unit=unit, rid=relid) |
3861 | units[unit] = reldata |
3862 | @@ -216,41 +286,70 @@ |
3863 | |
3864 | |
3865 | def open_port(port, protocol="TCP"): |
3866 | - "Open a service network port" |
3867 | + """Open a service network port""" |
3868 | _args = ['open-port'] |
3869 | _args.append('{}/{}'.format(port, protocol)) |
3870 | subprocess.check_call(_args) |
3871 | |
3872 | |
3873 | def close_port(port, protocol="TCP"): |
3874 | - "Close a service network port" |
3875 | + """Close a service network port""" |
3876 | _args = ['close-port'] |
3877 | _args.append('{}/{}'.format(port, protocol)) |
3878 | subprocess.check_call(_args) |
3879 | |
3880 | |
3881 | +@cached |
3882 | def unit_get(attribute): |
3883 | - _args = ['unit-get', attribute] |
3884 | - return subprocess.check_output(_args).strip() |
3885 | + """Get the unit ID for the remote unit""" |
3886 | + _args = ['unit-get', '--format=json', attribute] |
3887 | + try: |
3888 | + return json.loads(subprocess.check_output(_args)) |
3889 | + except ValueError: |
3890 | + return None |
3891 | |
3892 | |
3893 | def unit_private_ip(): |
3894 | + """Get this unit's private IP address""" |
3895 | return unit_get('private-address') |
3896 | |
3897 | |
3898 | class UnregisteredHookError(Exception): |
3899 | + """Raised when an undefined hook is called""" |
3900 | pass |
3901 | |
3902 | |
3903 | class Hooks(object): |
3904 | + """A convenient handler for hook functions. |
3905 | + |
3906 | + Example: |
3907 | + hooks = Hooks() |
3908 | + |
3909 | + # register a hook, taking its name from the function name |
3910 | + @hooks.hook() |
3911 | + def install(): |
3912 | + ... |
3913 | + |
3914 | + # register a hook, providing a custom hook name |
3915 | + @hooks.hook("config-changed") |
3916 | + def config_changed(): |
3917 | + ... |
3918 | + |
3919 | + if __name__ == "__main__": |
3920 | + # execute a hook based on the name the program is called by |
3921 | + hooks.execute(sys.argv) |
3922 | + """ |
3923 | + |
3924 | def __init__(self): |
3925 | super(Hooks, self).__init__() |
3926 | self._hooks = {} |
3927 | |
3928 | def register(self, name, function): |
3929 | + """Register a hook""" |
3930 | self._hooks[name] = function |
3931 | |
3932 | def execute(self, args): |
3933 | + """Execute a registered hook based on args[0]""" |
3934 | hook_name = os.path.basename(args[0]) |
3935 | if hook_name in self._hooks: |
3936 | self._hooks[hook_name]() |
3937 | @@ -258,10 +357,19 @@ |
3938 | raise UnregisteredHookError(hook_name) |
3939 | |
3940 | def hook(self, *hook_names): |
3941 | + """Decorator, registering them as hooks""" |
3942 | def wrapper(decorated): |
3943 | for hook_name in hook_names: |
3944 | self.register(hook_name, decorated) |
3945 | else: |
3946 | self.register(decorated.__name__, decorated) |
3947 | + if '_' in decorated.__name__: |
3948 | + self.register( |
3949 | + decorated.__name__.replace('_', '-'), decorated) |
3950 | return decorated |
3951 | return wrapper |
3952 | + |
3953 | + |
3954 | +def charm_dir(): |
3955 | + """Return the root directory of the current charm""" |
3956 | + return os.environ.get('CHARM_DIR') |
3957 | |
3958 | === modified file 'hooks/charmhelpers/core/host.py' |
3959 | --- hooks/charmhelpers/core/host.py 2013-06-07 09:39:50 +0000 |
3960 | +++ hooks/charmhelpers/core/host.py 2013-11-05 18:43:49 +0000 |
3961 | @@ -8,46 +8,75 @@ |
3962 | import os |
3963 | import pwd |
3964 | import grp |
3965 | +import random |
3966 | +import string |
3967 | import subprocess |
3968 | - |
3969 | -from hookenv import log, execution_environment |
3970 | +import hashlib |
3971 | + |
3972 | +from collections import OrderedDict |
3973 | + |
3974 | +from hookenv import log |
3975 | |
3976 | |
3977 | def service_start(service_name): |
3978 | - service('start', service_name) |
3979 | + """Start a system service""" |
3980 | + return service('start', service_name) |
3981 | |
3982 | |
3983 | def service_stop(service_name): |
3984 | - service('stop', service_name) |
3985 | + """Stop a system service""" |
3986 | + return service('stop', service_name) |
3987 | + |
3988 | + |
3989 | +def service_restart(service_name): |
3990 | + """Restart a system service""" |
3991 | + return service('restart', service_name) |
3992 | + |
3993 | + |
3994 | +def service_reload(service_name, restart_on_failure=False): |
3995 | + """Reload a system service, optionally falling back to restart if reload fails""" |
3996 | + service_result = service('reload', service_name) |
3997 | + if not service_result and restart_on_failure: |
3998 | + service_result = service('restart', service_name) |
3999 | + return service_result |
4000 | |
4001 | |
4002 | def service(action, service_name): |
4003 | - cmd = None |
4004 | - if os.path.exists(os.path.join('/etc/init', '%s.conf' % service_name)): |
4005 | - cmd = ['initctl', action, service_name] |
4006 | - elif os.path.exists(os.path.join('/etc/init.d', service_name)): |
4007 | - cmd = [os.path.join('/etc/init.d', service_name), action] |
4008 | - if cmd: |
4009 | - return_value = subprocess.call(cmd) |
4010 | - return return_value == 0 |
4011 | - return False |
4012 | - |
4013 | - |
4014 | -def adduser(username, password, shell='/bin/bash'): |
4015 | - """Add a user""" |
4016 | - # TODO: generate a password if none is given |
4017 | + """Control a system service""" |
4018 | + cmd = ['service', service_name, action] |
4019 | + return subprocess.call(cmd) == 0 |
4020 | + |
4021 | + |
4022 | +def service_running(service): |
4023 | + """Determine whether a system service is running""" |
4024 | + try: |
4025 | + output = subprocess.check_output(['service', service, 'status']) |
4026 | + except subprocess.CalledProcessError: |
4027 | + return False |
4028 | + else: |
4029 | + if ("start/running" in output or "is running" in output): |
4030 | + return True |
4031 | + else: |
4032 | + return False |
4033 | + |
4034 | + |
4035 | +def adduser(username, password=None, shell='/bin/bash', system_user=False): |
4036 | + """Add a user to the system""" |
4037 | try: |
4038 | user_info = pwd.getpwnam(username) |
4039 | log('user {0} already exists!'.format(username)) |
4040 | except KeyError: |
4041 | log('creating user {0}'.format(username)) |
4042 | - cmd = [ |
4043 | - 'useradd', |
4044 | - '--create-home', |
4045 | - '--shell', shell, |
4046 | - '--password', password, |
4047 | - username |
4048 | - ] |
4049 | + cmd = ['useradd'] |
4050 | + if system_user or password is None: |
4051 | + cmd.append('--system') |
4052 | + else: |
4053 | + cmd.extend([ |
4054 | + '--create-home', |
4055 | + '--shell', shell, |
4056 | + '--password', password, |
4057 | + ]) |
4058 | + cmd.append(username) |
4059 | subprocess.check_call(cmd) |
4060 | user_info = pwd.getpwnam(username) |
4061 | return user_info |
4062 | @@ -66,36 +95,33 @@ |
4063 | |
4064 | def rsync(from_path, to_path, flags='-r', options=None): |
4065 | """Replicate the contents of a path""" |
4066 | - context = execution_environment() |
4067 | options = options or ['--delete', '--executability'] |
4068 | cmd = ['/usr/bin/rsync', flags] |
4069 | cmd.extend(options) |
4070 | - cmd.append(from_path.format(**context)) |
4071 | - cmd.append(to_path.format(**context)) |
4072 | + cmd.append(from_path) |
4073 | + cmd.append(to_path) |
4074 | log(" ".join(cmd)) |
4075 | return subprocess.check_output(cmd).strip() |
4076 | |
4077 | |
4078 | def symlink(source, destination): |
4079 | """Create a symbolic link""" |
4080 | - context = execution_environment() |
4081 | log("Symlinking {} as {}".format(source, destination)) |
4082 | cmd = [ |
4083 | 'ln', |
4084 | '-sf', |
4085 | - source.format(**context), |
4086 | - destination.format(**context) |
4087 | + source, |
4088 | + destination, |
4089 | ] |
4090 | subprocess.check_call(cmd) |
4091 | |
4092 | |
4093 | def mkdir(path, owner='root', group='root', perms=0555, force=False): |
4094 | """Create a directory""" |
4095 | - context = execution_environment() |
4096 | log("Making dir {} {}:{} {:o}".format(path, owner, group, |
4097 | perms)) |
4098 | - uid = pwd.getpwnam(owner.format(**context)).pw_uid |
4099 | - gid = grp.getgrnam(group.format(**context)).gr_gid |
4100 | + uid = pwd.getpwnam(owner).pw_uid |
4101 | + gid = grp.getgrnam(group).gr_gid |
4102 | realpath = os.path.abspath(path) |
4103 | if os.path.exists(realpath): |
4104 | if force and not os.path.isdir(realpath): |
4105 | @@ -106,50 +132,19 @@ |
4106 | os.chown(realpath, uid, gid) |
4107 | |
4108 | |
4109 | -def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs): |
4110 | +def write_file(path, content, owner='root', group='root', perms=0444): |
4111 | """Create or overwrite a file with the contents of a string""" |
4112 | - context = execution_environment() |
4113 | - context.update(kwargs) |
4114 | - log("Writing file {} {}:{} {:o}".format(path, owner, group, |
4115 | - perms)) |
4116 | - uid = pwd.getpwnam(owner.format(**context)).pw_uid |
4117 | - gid = grp.getgrnam(group.format(**context)).gr_gid |
4118 | - with open(path.format(**context), 'w') as target: |
4119 | + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) |
4120 | + uid = pwd.getpwnam(owner).pw_uid |
4121 | + gid = grp.getgrnam(group).gr_gid |
4122 | + with open(path, 'w') as target: |
4123 | os.fchown(target.fileno(), uid, gid) |
4124 | os.fchmod(target.fileno(), perms) |
4125 | - target.write(fmtstr.format(**context)) |
4126 | - |
4127 | - |
4128 | -def render_template_file(source, destination, **kwargs): |
4129 | - """Create or overwrite a file using a template""" |
4130 | - log("Rendering template {} for {}".format(source, |
4131 | - destination)) |
4132 | - context = execution_environment() |
4133 | - with open(source.format(**context), 'r') as template: |
4134 | - write_file(destination.format(**context), template.read(), |
4135 | - **kwargs) |
4136 | - |
4137 | - |
4138 | -def apt_install(packages, options=None, fatal=False): |
4139 | - """Install one or more packages""" |
4140 | - options = options or [] |
4141 | - cmd = ['apt-get', '-y'] |
4142 | - cmd.extend(options) |
4143 | - cmd.append('install') |
4144 | - if isinstance(packages, basestring): |
4145 | - cmd.append(packages) |
4146 | - else: |
4147 | - cmd.extend(packages) |
4148 | - log("Installing {} with options: {}".format(packages, |
4149 | - options)) |
4150 | - if fatal: |
4151 | - subprocess.check_call(cmd) |
4152 | - else: |
4153 | - subprocess.call(cmd) |
4154 | + target.write(content) |
4155 | |
4156 | |
4157 | def mount(device, mountpoint, options=None, persist=False): |
4158 | - '''Mount a filesystem''' |
4159 | + """Mount a filesystem at a particular mountpoint""" |
4160 | cmd_args = ['mount'] |
4161 | if options is not None: |
4162 | cmd_args.extend(['-o', options]) |
4163 | @@ -166,7 +161,7 @@ |
4164 | |
4165 | |
4166 | def umount(mountpoint, persist=False): |
4167 | - '''Unmount a filesystem''' |
4168 | + """Unmount a filesystem""" |
4169 | cmd_args = ['umount', mountpoint] |
4170 | try: |
4171 | subprocess.check_output(cmd_args) |
4172 | @@ -180,9 +175,73 @@ |
4173 | |
4174 | |
4175 | def mounts(): |
4176 | - '''List of all mounted volumes as [[mountpoint,device],[...]]''' |
4177 | + """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" |
4178 | with open('/proc/mounts') as f: |
4179 | # [['/mount/point','/dev/path'],[...]] |
4180 | system_mounts = [m[1::-1] for m in [l.strip().split() |
4181 | for l in f.readlines()]] |
4182 | return system_mounts |
4183 | + |
4184 | + |
4185 | +def file_hash(path): |
4186 | + """Generate a md5 hash of the contents of 'path' or None if not found """ |
4187 | + if os.path.exists(path): |
4188 | + h = hashlib.md5() |
4189 | + with open(path, 'r') as source: |
4190 | + h.update(source.read()) # IGNORE:E1101 - it does have update |
4191 | + return h.hexdigest() |
4192 | + else: |
4193 | + return None |
4194 | + |
4195 | + |
4196 | +def restart_on_change(restart_map): |
4197 | + """Restart services based on configuration files changing |
4198 | + |
4199 | + This function is used a decorator, for example |
4200 | + |
4201 | + @restart_on_change({ |
4202 | + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
4203 | + }) |
4204 | + def ceph_client_changed(): |
4205 | + ... |
4206 | + |
4207 | + In this example, the cinder-api and cinder-volume services |
4208 | + would be restarted if /etc/ceph/ceph.conf is changed by the |
4209 | + ceph_client_changed function. |
4210 | + """ |
4211 | + def wrap(f): |
4212 | + def wrapped_f(*args): |
4213 | + checksums = {} |
4214 | + for path in restart_map: |
4215 | + checksums[path] = file_hash(path) |
4216 | + f(*args) |
4217 | + restarts = [] |
4218 | + for path in restart_map: |
4219 | + if checksums[path] != file_hash(path): |
4220 | + restarts += restart_map[path] |
4221 | + for service_name in list(OrderedDict.fromkeys(restarts)): |
4222 | + service('restart', service_name) |
4223 | + return wrapped_f |
4224 | + return wrap |
4225 | + |
4226 | + |
4227 | +def lsb_release(): |
4228 | + """Return /etc/lsb-release in a dict""" |
4229 | + d = {} |
4230 | + with open('/etc/lsb-release', 'r') as lsb: |
4231 | + for l in lsb: |
4232 | + k, v = l.split('=') |
4233 | + d[k.strip()] = v.strip() |
4234 | + return d |
4235 | + |
4236 | + |
4237 | +def pwgen(length=None): |
4238 | + """Generate a random pasword.""" |
4239 | + if length is None: |
4240 | + length = random.choice(range(35, 45)) |
4241 | + alphanumeric_chars = [ |
4242 | + l for l in (string.letters + string.digits) |
4243 | + if l not in 'l0QD1vAEIOUaeiou'] |
4244 | + random_chars = [ |
4245 | + random.choice(alphanumeric_chars) for _ in range(length)] |
4246 | + return(''.join(random_chars)) |
4247 | |
4248 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
4249 | --- hooks/charmhelpers/fetch/__init__.py 2013-06-07 09:39:50 +0000 |
4250 | +++ hooks/charmhelpers/fetch/__init__.py 2013-11-05 18:43:49 +0000 |
4251 | @@ -1,15 +1,116 @@ |
4252 | +import importlib |
4253 | from yaml import safe_load |
4254 | -from core.hookenv import config_get |
4255 | -from subprocess import check_call |
4256 | +from charmhelpers.core.host import ( |
4257 | + lsb_release |
4258 | +) |
4259 | +from urlparse import ( |
4260 | + urlparse, |
4261 | + urlunparse, |
4262 | +) |
4263 | +import subprocess |
4264 | +from charmhelpers.core.hookenv import ( |
4265 | + config, |
4266 | + log, |
4267 | +) |
4268 | +import apt_pkg |
4269 | + |
4270 | +CLOUD_ARCHIVE = """# Ubuntu Cloud Archive |
4271 | +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
4272 | +""" |
4273 | +PROPOSED_POCKET = """# Proposed |
4274 | +deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted |
4275 | +""" |
4276 | + |
4277 | + |
4278 | +def filter_installed_packages(packages): |
4279 | + """Returns a list of packages that require installation""" |
4280 | + apt_pkg.init() |
4281 | + cache = apt_pkg.Cache() |
4282 | + _pkgs = [] |
4283 | + for package in packages: |
4284 | + try: |
4285 | + p = cache[package] |
4286 | + p.current_ver or _pkgs.append(package) |
4287 | + except KeyError: |
4288 | + log('Package {} has no installation candidate.'.format(package), |
4289 | + level='WARNING') |
4290 | + _pkgs.append(package) |
4291 | + return _pkgs |
4292 | + |
4293 | + |
4294 | +def apt_install(packages, options=None, fatal=False): |
4295 | + """Install one or more packages""" |
4296 | + options = options or [] |
4297 | + cmd = ['apt-get', '-y'] |
4298 | + cmd.extend(options) |
4299 | + cmd.append('install') |
4300 | + if isinstance(packages, basestring): |
4301 | + cmd.append(packages) |
4302 | + else: |
4303 | + cmd.extend(packages) |
4304 | + log("Installing {} with options: {}".format(packages, |
4305 | + options)) |
4306 | + if fatal: |
4307 | + subprocess.check_call(cmd) |
4308 | + else: |
4309 | + subprocess.call(cmd) |
4310 | + |
4311 | + |
4312 | +def apt_update(fatal=False): |
4313 | + """Update local apt cache""" |
4314 | + cmd = ['apt-get', 'update'] |
4315 | + if fatal: |
4316 | + subprocess.check_call(cmd) |
4317 | + else: |
4318 | + subprocess.call(cmd) |
4319 | + |
4320 | + |
4321 | +def apt_purge(packages, fatal=False): |
4322 | + """Purge one or more packages""" |
4323 | + cmd = ['apt-get', '-y', 'purge'] |
4324 | + if isinstance(packages, basestring): |
4325 | + cmd.append(packages) |
4326 | + else: |
4327 | + cmd.extend(packages) |
4328 | + log("Purging {}".format(packages)) |
4329 | + if fatal: |
4330 | + subprocess.check_call(cmd) |
4331 | + else: |
4332 | + subprocess.call(cmd) |
4333 | + |
4334 | + |
4335 | +def apt_hold(packages, fatal=False): |
4336 | + """Hold one or more packages""" |
4337 | + cmd = ['apt-mark', 'hold'] |
4338 | + if isinstance(packages, basestring): |
4339 | + cmd.append(packages) |
4340 | + else: |
4341 | + cmd.extend(packages) |
4342 | + log("Holding {}".format(packages)) |
4343 | + if fatal: |
4344 | + subprocess.check_call(cmd) |
4345 | + else: |
4346 | + subprocess.call(cmd) |
4347 | |
4348 | |
4349 | def add_source(source, key=None): |
4350 | - if ((source.startswith('ppa:') or |
4351 | - source.startswith('cloud:') or |
4352 | - source.startswith('http:'))): |
4353 | - check_call('add-apt-repository', source) |
4354 | + if (source.startswith('ppa:') or |
4355 | + source.startswith('http:') or |
4356 | + source.startswith('deb ') or |
4357 | + source.startswith('cloud-archive:')): |
4358 | + subprocess.check_call(['add-apt-repository', '--yes', source]) |
4359 | + elif source.startswith('cloud:'): |
4360 | + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), |
4361 | + fatal=True) |
4362 | + pocket = source.split(':')[-1] |
4363 | + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
4364 | + apt.write(CLOUD_ARCHIVE.format(pocket)) |
4365 | + elif source == 'proposed': |
4366 | + release = lsb_release()['DISTRIB_CODENAME'] |
4367 | + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
4368 | + apt.write(PROPOSED_POCKET.format(release)) |
4369 | if key: |
4370 | - check_call('apt-key', 'import', key) |
4371 | + subprocess.check_call(['apt-key', 'import', key]) |
4372 | |
4373 | |
4374 | class SourceConfigError(Exception): |
4375 | @@ -32,15 +133,96 @@ |
4376 | |
4377 | Note that 'null' (a.k.a. None) should not be quoted. |
4378 | """ |
4379 | - sources = safe_load(config_get(sources_var)) |
4380 | - keys = safe_load(config_get(keys_var)) |
4381 | - if isinstance(sources, basestring) and isinstance(keys, basestring): |
4382 | + sources = safe_load(config(sources_var)) |
4383 | + keys = config(keys_var) |
4384 | + if keys is not None: |
4385 | + keys = safe_load(keys) |
4386 | + if isinstance(sources, basestring) and ( |
4387 | + keys is None or isinstance(keys, basestring)): |
4388 | add_source(sources, keys) |
4389 | else: |
4390 | if not len(sources) == len(keys): |
4391 | msg = 'Install sources and keys lists are different lengths' |
4392 | raise SourceConfigError(msg) |
4393 | for src_num in range(len(sources)): |
4394 | - add_source(sources[src_num], sources[src_num]) |
4395 | + add_source(sources[src_num], keys[src_num]) |
4396 | if update: |
4397 | - check_call(('apt-get', 'update')) |
4398 | + apt_update(fatal=True) |
4399 | + |
4400 | +# The order of this list is very important. Handlers should be listed in from |
4401 | +# least- to most-specific URL matching. |
4402 | +FETCH_HANDLERS = ( |
4403 | + 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', |
4404 | + 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', |
4405 | +) |
4406 | + |
4407 | + |
4408 | +class UnhandledSource(Exception): |
4409 | + pass |
4410 | + |
4411 | + |
4412 | +def install_remote(source): |
4413 | + """ |
4414 | + Install a file tree from a remote source |
4415 | + |
4416 | + The specified source should be a url of the form: |
4417 | + scheme://[host]/path[#[option=value][&...]] |
4418 | + |
4419 | + Schemes supported are based on this modules submodules |
4420 | + Options supported are submodule-specific""" |
4421 | + # We ONLY check for True here because can_handle may return a string |
4422 | + # explaining why it can't handle a given source. |
4423 | + handlers = [h for h in plugins() if h.can_handle(source) is True] |
4424 | + installed_to = None |
4425 | + for handler in handlers: |
4426 | + try: |
4427 | + installed_to = handler.install(source) |
4428 | + except UnhandledSource: |
4429 | + pass |
4430 | + if not installed_to: |
4431 | + raise UnhandledSource("No handler found for source {}".format(source)) |
4432 | + return installed_to |
4433 | + |
4434 | + |
4435 | +def install_from_config(config_var_name): |
4436 | + charm_config = config() |
4437 | + source = charm_config[config_var_name] |
4438 | + return install_remote(source) |
4439 | + |
4440 | + |
4441 | +class BaseFetchHandler(object): |
4442 | + """Base class for FetchHandler implementations in fetch plugins""" |
4443 | + def can_handle(self, source): |
4444 | + """Returns True if the source can be handled. Otherwise returns |
4445 | + a string explaining why it cannot""" |
4446 | + return "Wrong source type" |
4447 | + |
4448 | + def install(self, source): |
4449 | + """Try to download and unpack the source. Return the path to the |
4450 | + unpacked files or raise UnhandledSource.""" |
4451 | + raise UnhandledSource("Wrong source type {}".format(source)) |
4452 | + |
4453 | + def parse_url(self, url): |
4454 | + return urlparse(url) |
4455 | + |
4456 | + def base_url(self, url): |
4457 | + """Return url without querystring or fragment""" |
4458 | + parts = list(self.parse_url(url)) |
4459 | + parts[4:] = ['' for i in parts[4:]] |
4460 | + return urlunparse(parts) |
4461 | + |
4462 | + |
4463 | +def plugins(fetch_handlers=None): |
4464 | + if not fetch_handlers: |
4465 | + fetch_handlers = FETCH_HANDLERS |
4466 | + plugin_list = [] |
4467 | + for handler_name in fetch_handlers: |
4468 | + package, classname = handler_name.rsplit('.', 1) |
4469 | + try: |
4470 | + handler_class = getattr(importlib.import_module(package), classname) |
4471 | + plugin_list.append(handler_class()) |
4472 | + except (ImportError, AttributeError): |
4473 | + # Skip missing plugins so that they can be ommitted from |
4474 | + # installation if desired |
4475 | + log("FetchHandler {} not found, skipping plugin".format(handler_name)) |
4476 | + return plugin_list |
4477 | |
4478 | === added file 'hooks/charmhelpers/fetch/archiveurl.py' |
4479 | --- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000 |
4480 | +++ hooks/charmhelpers/fetch/archiveurl.py 2013-11-05 18:43:49 +0000 |
4481 | @@ -0,0 +1,48 @@ |
4482 | +import os |
4483 | +import urllib2 |
4484 | +from charmhelpers.fetch import ( |
4485 | + BaseFetchHandler, |
4486 | + UnhandledSource |
4487 | +) |
4488 | +from charmhelpers.payload.archive import ( |
4489 | + get_archive_handler, |
4490 | + extract, |
4491 | +) |
4492 | +from charmhelpers.core.host import mkdir |
4493 | + |
4494 | + |
4495 | +class ArchiveUrlFetchHandler(BaseFetchHandler): |
4496 | + """Handler for archives via generic URLs""" |
4497 | + def can_handle(self, source): |
4498 | + url_parts = self.parse_url(source) |
4499 | + if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
4500 | + return "Wrong source type" |
4501 | + if get_archive_handler(self.base_url(source)): |
4502 | + return True |
4503 | + return False |
4504 | + |
4505 | + def download(self, source, dest): |
4506 | + # propogate all exceptions |
4507 | + # URLError, OSError, etc |
4508 | + response = urllib2.urlopen(source) |
4509 | + try: |
4510 | + with open(dest, 'w') as dest_file: |
4511 | + dest_file.write(response.read()) |
4512 | + except Exception as e: |
4513 | + if os.path.isfile(dest): |
4514 | + os.unlink(dest) |
4515 | + raise e |
4516 | + |
4517 | + def install(self, source): |
4518 | + url_parts = self.parse_url(source) |
4519 | + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') |
4520 | + if not os.path.exists(dest_dir): |
4521 | + mkdir(dest_dir, perms=0755) |
4522 | + dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) |
4523 | + try: |
4524 | + self.download(source, dld_file) |
4525 | + except urllib2.URLError as e: |
4526 | + raise UnhandledSource(e.reason) |
4527 | + except OSError as e: |
4528 | + raise UnhandledSource(e.strerror) |
4529 | + return extract(dld_file) |
4530 | |
4531 | === added file 'hooks/charmhelpers/fetch/bzrurl.py' |
4532 | --- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000 |
4533 | +++ hooks/charmhelpers/fetch/bzrurl.py 2013-11-05 18:43:49 +0000 |
4534 | @@ -0,0 +1,49 @@ |
4535 | +import os |
4536 | +from charmhelpers.fetch import ( |
4537 | + BaseFetchHandler, |
4538 | + UnhandledSource |
4539 | +) |
4540 | +from charmhelpers.core.host import mkdir |
4541 | + |
4542 | +try: |
4543 | + from bzrlib.branch import Branch |
4544 | +except ImportError: |
4545 | + from charmhelpers.fetch import apt_install |
4546 | + apt_install("python-bzrlib") |
4547 | + from bzrlib.branch import Branch |
4548 | + |
4549 | +class BzrUrlFetchHandler(BaseFetchHandler): |
4550 | + """Handler for bazaar branches via generic and lp URLs""" |
4551 | + def can_handle(self, source): |
4552 | + url_parts = self.parse_url(source) |
4553 | + if url_parts.scheme not in ('bzr+ssh', 'lp'): |
4554 | + return False |
4555 | + else: |
4556 | + return True |
4557 | + |
4558 | + def branch(self, source, dest): |
4559 | + url_parts = self.parse_url(source) |
4560 | + # If we use lp:branchname scheme we need to load plugins |
4561 | + if not self.can_handle(source): |
4562 | + raise UnhandledSource("Cannot handle {}".format(source)) |
4563 | + if url_parts.scheme == "lp": |
4564 | + from bzrlib.plugin import load_plugins |
4565 | + load_plugins() |
4566 | + try: |
4567 | + remote_branch = Branch.open(source) |
4568 | + remote_branch.bzrdir.sprout(dest).open_branch() |
4569 | + except Exception as e: |
4570 | + raise e |
4571 | + |
4572 | + def install(self, source): |
4573 | + url_parts = self.parse_url(source) |
4574 | + branch_name = url_parts.path.strip("/").split("/")[-1] |
4575 | + dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) |
4576 | + if not os.path.exists(dest_dir): |
4577 | + mkdir(dest_dir, perms=0755) |
4578 | + try: |
4579 | + self.branch(source, dest_dir) |
4580 | + except OSError as e: |
4581 | + raise UnhandledSource(e.strerror) |
4582 | + return dest_dir |
4583 | + |
4584 | |
4585 | === removed directory 'hooks/charmhelpers/payload' |
4586 | === removed file 'hooks/charmhelpers/payload/__init__.py' |
4587 | --- hooks/charmhelpers/payload/__init__.py 2013-06-07 09:39:50 +0000 |
4588 | +++ hooks/charmhelpers/payload/__init__.py 1970-01-01 00:00:00 +0000 |
4589 | @@ -1,1 +0,0 @@ |
4590 | -"Tools for working with files injected into a charm just before deployment." |
4591 | |
4592 | === removed file 'hooks/charmhelpers/payload/execd.py' |
4593 | --- hooks/charmhelpers/payload/execd.py 2013-06-07 09:39:50 +0000 |
4594 | +++ hooks/charmhelpers/payload/execd.py 1970-01-01 00:00:00 +0000 |
4595 | @@ -1,40 +0,0 @@ |
4596 | -#!/usr/bin/env python |
4597 | - |
4598 | -import os |
4599 | -import sys |
4600 | -import subprocess |
4601 | -from charmhelpers.core import hookenv |
4602 | - |
4603 | - |
4604 | -def default_execd_dir(): |
4605 | - return os.path.join(os.environ['CHARM_DIR'],'exec.d') |
4606 | - |
4607 | - |
4608 | -def execd_module_paths(execd_dir=None): |
4609 | - if not execd_dir: |
4610 | - execd_dir = default_execd_dir() |
4611 | - for subpath in os.listdir(execd_dir): |
4612 | - module = os.path.join(execd_dir, subpath) |
4613 | - if os.path.isdir(module): |
4614 | - yield module |
4615 | - |
4616 | - |
4617 | -def execd_submodule_paths(submodule, execd_dir=None): |
4618 | - for module_path in execd_module_paths(execd_dir): |
4619 | - path = os.path.join(module_path, submodule) |
4620 | - if os.access(path, os.X_OK) and os.path.isfile(path): |
4621 | - yield path |
4622 | - |
4623 | - |
4624 | -def execd_run(submodule, execd_dir=None, die_on_error=False): |
4625 | - for submodule_path in execd_submodule_paths(submodule, execd_dir): |
4626 | - try: |
4627 | - subprocess.check_call(submodule_path, shell=True) |
4628 | - except subprocess.CalledProcessError as e: |
4629 | - hookenv.log(e.output) |
4630 | - if die_on_error: |
4631 | - sys.exit(e.returncode) |
4632 | - |
4633 | - |
4634 | -def execd_preinstall(execd_dir=None): |
4635 | - execd_run(execd_dir, 'charm-pre-install') |
4636 | |
4637 | === modified file 'hooks/hooks.py' |
4638 | --- hooks/hooks.py 2013-07-03 05:54:19 +0000 |
4639 | +++ hooks/hooks.py 2013-11-05 18:43:49 +0000 |
4640 | @@ -10,12 +10,15 @@ |
4641 | service_start, |
4642 | service_stop, |
4643 | adduser, |
4644 | - apt_install, |
4645 | log, |
4646 | mkdir, |
4647 | symlink, |
4648 | ) |
4649 | |
4650 | +from charmhelpers.fetch import ( |
4651 | + apt_install, |
4652 | +) |
4653 | + |
4654 | from charmhelpers.core.hookenv import ( |
4655 | Hooks, |
4656 | relation_get, |
4657 | @@ -69,7 +72,7 @@ |
4658 | |
4659 | def add_extra_repos(): |
4660 | extra_repos = config('extra_archives') |
4661 | - if extra_repos.data: #serialize cannot be cast as boolean |
4662 | + if extra_repos != None: |
4663 | repos_added = False |
4664 | extra_repos_added = set() |
4665 | for repo in extra_repos.split(): |
Thanks for this submission! LGTM!