Merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2 into lp:charms/etherpad-lite

Proposed by Erik B. Andersen
Status: Merged
Merged at revision: 14
Proposed branch: lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2
Merge into: lp:charms/etherpad-lite
Diff against target: 4665 lines (+588/-3619)
28 files modified
charm-helpers.yaml (+6/-0)
hooks/charmhelpers/contrib/charmhelpers/IMPORT (+0/-4)
hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-183)
hooks/charmhelpers/contrib/charmsupport/IMPORT (+0/-14)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-217)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156)
hooks/charmhelpers/contrib/hahelpers/IMPORT (+0/-7)
hooks/charmhelpers/contrib/hahelpers/apache_utils.py (+0/-196)
hooks/charmhelpers/contrib/hahelpers/ceph_utils.py (+0/-256)
hooks/charmhelpers/contrib/hahelpers/cluster_utils.py (+0/-130)
hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py (+0/-55)
hooks/charmhelpers/contrib/hahelpers/utils.py (+0/-332)
hooks/charmhelpers/contrib/jujugui/IMPORT (+0/-4)
hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602)
hooks/charmhelpers/contrib/openstack/IMPORT (+0/-9)
hooks/charmhelpers/contrib/openstack/nova/essex (+0/-43)
hooks/charmhelpers/contrib/openstack/nova/folsom (+0/-81)
hooks/charmhelpers/contrib/openstack/nova/nova-common (+0/-147)
hooks/charmhelpers/contrib/openstack/openstack-common (+0/-781)
hooks/charmhelpers/contrib/openstack/openstack_utils.py (+0/-228)
hooks/charmhelpers/core/hookenv.py (+153/-45)
hooks/charmhelpers/core/host.py (+133/-74)
hooks/charmhelpers/fetch/__init__.py (+194/-12)
hooks/charmhelpers/fetch/archiveurl.py (+48/-0)
hooks/charmhelpers/fetch/bzrurl.py (+49/-0)
hooks/charmhelpers/payload/__init__.py (+0/-1)
hooks/charmhelpers/payload/execd.py (+0/-40)
hooks/hooks.py (+5/-2)
To merge this branch: bzr merge lp:~azendale/charms/precise/etherpad-lite/lp1247636-fix-try2
Reviewer Review Type Date Requested Status
Marco Ceppi (community) Approve
Review via email: mp+193986@code.launchpad.net

Description of the change

Update of the hooks.py and addition of charm-helpers.yaml to be able to use the charm helpers sync tool.

Then a charm helpers sync, which pulls in a version that should fix LP:1247636

To post a comment you must log in.
Revision history for this message
Marco Ceppi (marcoceppi) wrote :

Thanks for this submission! LGTM!

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file 'charm-helpers.yaml'
--- charm-helpers.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers.yaml 2013-11-05 18:43:49 +0000
@@ -0,0 +1,6 @@
1destination: "hooks/charmhelpers"
2branch: "lp:charm-helpers"
3include:
4 - core
5 - fetch
6
07
=== removed directory 'hooks/charmhelpers/contrib'
=== removed file 'hooks/charmhelpers/contrib/__init__.py'
=== removed directory 'hooks/charmhelpers/contrib/charmhelpers'
=== removed file 'hooks/charmhelpers/contrib/charmhelpers/IMPORT'
--- hooks/charmhelpers/contrib/charmhelpers/IMPORT 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/charmhelpers/IMPORT 1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@
1Source lp:charm-tools/trunk
2
3charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
4charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py
50
=== removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py'
--- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,183 +0,0 @@
1# Copyright 2012 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4import warnings
5warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning)
6
7"""Helper functions for writing Juju charms in Python."""
8
9__metaclass__ = type
10__all__ = [
11 #'get_config', # core.hookenv.config()
12 #'log', # core.hookenv.log()
13 #'log_entry', # core.hookenv.log()
14 #'log_exit', # core.hookenv.log()
15 #'relation_get', # core.hookenv.relation_get()
16 #'relation_set', # core.hookenv.relation_set()
17 #'relation_ids', # core.hookenv.relation_ids()
18 #'relation_list', # core.hookenv.relation_units()
19 #'config_get', # core.hookenv.config()
20 #'unit_get', # core.hookenv.unit_get()
21 #'open_port', # core.hookenv.open_port()
22 #'close_port', # core.hookenv.close_port()
23 #'service_control', # core.host.service()
24 'unit_info', # client-side, NOT IMPLEMENTED
25 'wait_for_machine', # client-side, NOT IMPLEMENTED
26 'wait_for_page_contents', # client-side, NOT IMPLEMENTED
27 'wait_for_relation', # client-side, NOT IMPLEMENTED
28 'wait_for_unit', # client-side, NOT IMPLEMENTED
29 ]
30
31import operator
32from shelltoolbox import (
33 command,
34)
35import tempfile
36import time
37import urllib2
38import yaml
39
40SLEEP_AMOUNT = 0.1
41# We create a juju_status Command here because it makes testing much,
42# much easier.
43juju_status = lambda: command('juju')('status')
44
45# re-implemented as charmhelpers.fetch.configure_sources()
46#def configure_source(update=False):
47# source = config_get('source')
48# if ((source.startswith('ppa:') or
49# source.startswith('cloud:') or
50# source.startswith('http:'))):
51# run('add-apt-repository', source)
52# if source.startswith("http:"):
53# run('apt-key', 'import', config_get('key'))
54# if update:
55# run('apt-get', 'update')
56
57# DEPRECATED: client-side only
58def make_charm_config_file(charm_config):
59 charm_config_file = tempfile.NamedTemporaryFile()
60 charm_config_file.write(yaml.dump(charm_config))
61 charm_config_file.flush()
62 # The NamedTemporaryFile instance is returned instead of just the name
63 # because we want to take advantage of garbage collection-triggered
64 # deletion of the temp file when it goes out of scope in the caller.
65 return charm_config_file
66
67
68# DEPRECATED: client-side only
69def unit_info(service_name, item_name, data=None, unit=None):
70 if data is None:
71 data = yaml.safe_load(juju_status())
72 service = data['services'].get(service_name)
73 if service is None:
74 # XXX 2012-02-08 gmb:
75 # This allows us to cope with the race condition that we
76 # have between deploying a service and having it come up in
77 # `juju status`. We could probably do with cleaning it up so
78 # that it fails a bit more noisily after a while.
79 return ''
80 units = service['units']
81 if unit is not None:
82 item = units[unit][item_name]
83 else:
84 # It might seem odd to sort the units here, but we do it to
85 # ensure that when no unit is specified, the first unit for the
86 # service (or at least the one with the lowest number) is the
87 # one whose data gets returned.
88 sorted_unit_names = sorted(units.keys())
89 item = units[sorted_unit_names[0]][item_name]
90 return item
91
92
93# DEPRECATED: client-side only
94def get_machine_data():
95 return yaml.safe_load(juju_status())['machines']
96
97
98# DEPRECATED: client-side only
99def wait_for_machine(num_machines=1, timeout=300):
100 """Wait `timeout` seconds for `num_machines` machines to come up.
101
102 This wait_for... function can be called by other wait_for functions
103 whose timeouts might be too short in situations where only a bare
104 Juju setup has been bootstrapped.
105
106 :return: A tuple of (num_machines, time_taken). This is used for
107 testing.
108 """
109 # You may think this is a hack, and you'd be right. The easiest way
110 # to tell what environment we're working in (LXC vs EC2) is to check
111 # the dns-name of the first machine. If it's localhost we're in LXC
112 # and we can just return here.
113 if get_machine_data()[0]['dns-name'] == 'localhost':
114 return 1, 0
115 start_time = time.time()
116 while True:
117 # Drop the first machine, since it's the Zookeeper and that's
118 # not a machine that we need to wait for. This will only work
119 # for EC2 environments, which is why we return early above if
120 # we're in LXC.
121 machine_data = get_machine_data()
122 non_zookeeper_machines = [
123 machine_data[key] for key in machine_data.keys()[1:]]
124 if len(non_zookeeper_machines) >= num_machines:
125 all_machines_running = True
126 for machine in non_zookeeper_machines:
127 if machine.get('instance-state') != 'running':
128 all_machines_running = False
129 break
130 if all_machines_running:
131 break
132 if time.time() - start_time >= timeout:
133 raise RuntimeError('timeout waiting for service to start')
134 time.sleep(SLEEP_AMOUNT)
135 return num_machines, time.time() - start_time
136
137
138# DEPRECATED: client-side only
139def wait_for_unit(service_name, timeout=480):
140 """Wait `timeout` seconds for a given service name to come up."""
141 wait_for_machine(num_machines=1)
142 start_time = time.time()
143 while True:
144 state = unit_info(service_name, 'agent-state')
145 if 'error' in state or state == 'started':
146 break
147 if time.time() - start_time >= timeout:
148 raise RuntimeError('timeout waiting for service to start')
149 time.sleep(SLEEP_AMOUNT)
150 if state != 'started':
151 raise RuntimeError('unit did not start, agent-state: ' + state)
152
153
154# DEPRECATED: client-side only
155def wait_for_relation(service_name, relation_name, timeout=120):
156 """Wait `timeout` seconds for a given relation to come up."""
157 start_time = time.time()
158 while True:
159 relation = unit_info(service_name, 'relations').get(relation_name)
160 if relation is not None and relation['state'] == 'up':
161 break
162 if time.time() - start_time >= timeout:
163 raise RuntimeError('timeout waiting for relation to be up')
164 time.sleep(SLEEP_AMOUNT)
165
166
167# DEPRECATED: client-side only
168def wait_for_page_contents(url, contents, timeout=120, validate=None):
169 if validate is None:
170 validate = operator.contains
171 start_time = time.time()
172 while True:
173 try:
174 stream = urllib2.urlopen(url)
175 except (urllib2.HTTPError, urllib2.URLError):
176 pass
177 else:
178 page = stream.read()
179 if validate(page, contents):
180 return page
181 if time.time() - start_time >= timeout:
182 raise RuntimeError('timeout waiting for contents of ' + url)
183 time.sleep(SLEEP_AMOUNT)
1840
=== removed directory 'hooks/charmhelpers/contrib/charmsupport'
=== removed file 'hooks/charmhelpers/contrib/charmsupport/IMPORT'
--- hooks/charmhelpers/contrib/charmsupport/IMPORT 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/charmsupport/IMPORT 1970-01-01 00:00:00 +0000
@@ -1,14 +0,0 @@
1Source: lp:charmsupport/trunk
2
3charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py
4charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py
5charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py
6charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py
7charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py
8
9charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py
10charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py
11charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py
12charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py
13
14charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport
150
=== removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
@@ -1,217 +0,0 @@
1"""Compatibility with the nrpe-external-master charm"""
2# Copyright 2012 Canonical Ltd.
3#
4# Authors:
5# Matthew Wedgwood <matthew.wedgwood@canonical.com>
6
7import subprocess
8import pwd
9import grp
10import os
11import re
12import shlex
13import yaml
14
15from charmhelpers.core.hookenv import (
16 config,
17 local_unit,
18 log,
19 relation_ids,
20 relation_set,
21 )
22from charmhelpers.core.host import service
23
24# This module adds compatibility with the nrpe-external-master and plain nrpe
25# subordinate charms. To use it in your charm:
26#
27# 1. Update metadata.yaml
28#
29# provides:
30# (...)
31# nrpe-external-master:
32# interface: nrpe-external-master
33# scope: container
34#
35# and/or
36#
37# provides:
38# (...)
39# local-monitors:
40# interface: local-monitors
41# scope: container
42
43#
44# 2. Add the following to config.yaml
45#
46# nagios_context:
47# default: "juju"
48# type: string
49# description: |
50# Used by the nrpe subordinate charms.
51# A string that will be prepended to instance name to set the host name
52# in nagios. So for instance the hostname would be something like:
53# juju-myservice-0
54# If you're running multiple environments with the same services in them
55# this allows you to differentiate between them.
56#
57# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
58#
59# 4. Update your hooks.py with something like this:
60#
61# from charmsupport.nrpe import NRPE
62# (...)
63# def update_nrpe_config():
64# nrpe_compat = NRPE()
65# nrpe_compat.add_check(
66# shortname = "myservice",
67# description = "Check MyService",
68# check_cmd = "check_http -w 2 -c 10 http://localhost"
69# )
70# nrpe_compat.add_check(
71# "myservice_other",
72# "Check for widget failures",
73# check_cmd = "/srv/myapp/scripts/widget_check"
74# )
75# nrpe_compat.write()
76#
77# def config_changed():
78# (...)
79# update_nrpe_config()
80#
81# def nrpe_external_master_relation_changed():
82# update_nrpe_config()
83#
84# def local_monitors_relation_changed():
85# update_nrpe_config()
86#
87# 5. ln -s hooks.py nrpe-external-master-relation-changed
88# ln -s hooks.py local-monitors-relation-changed
89
90
91class CheckException(Exception):
92 pass
93
94
95class Check(object):
96 shortname_re = '[A-Za-z0-9-_]+$'
97 service_template = ("""
98#---------------------------------------------------
99# This file is Juju managed
100#---------------------------------------------------
101define service {{
102 use active-service
103 host_name {nagios_hostname}
104 service_description {nagios_hostname}[{shortname}] """
105 """{description}
106 check_command check_nrpe!{command}
107 servicegroups {nagios_servicegroup}
108}}
109""")
110
111 def __init__(self, shortname, description, check_cmd):
112 super(Check, self).__init__()
113 # XXX: could be better to calculate this from the service name
114 if not re.match(self.shortname_re, shortname):
115 raise CheckException("shortname must match {}".format(
116 Check.shortname_re))
117 self.shortname = shortname
118 self.command = "check_{}".format(shortname)
119 # Note: a set of invalid characters is defined by the
120 # Nagios server config
121 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
122 self.description = description
123 self.check_cmd = self._locate_cmd(check_cmd)
124
125 def _locate_cmd(self, check_cmd):
126 search_path = (
127 '/',
128 os.path.join(os.environ['CHARM_DIR'],
129 'files/nrpe-external-master'),
130 '/usr/lib/nagios/plugins',
131 )
132 parts = shlex.split(check_cmd)
133 for path in search_path:
134 if os.path.exists(os.path.join(path, parts[0])):
135 command = os.path.join(path, parts[0])
136 if len(parts) > 1:
137 command += " " + " ".join(parts[1:])
138 return command
139 log('Check command not found: {}'.format(parts[0]))
140 return ''
141
142 def write(self, nagios_context, hostname):
143 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
144 self.command)
145 with open(nrpe_check_file, 'w') as nrpe_check_config:
146 nrpe_check_config.write("# check {}\n".format(self.shortname))
147 nrpe_check_config.write("command[{}]={}\n".format(
148 self.command, self.check_cmd))
149
150 if not os.path.exists(NRPE.nagios_exportdir):
151 log('Not writing service config as {} is not accessible'.format(
152 NRPE.nagios_exportdir))
153 else:
154 self.write_service_config(nagios_context, hostname)
155
156 def write_service_config(self, nagios_context, hostname):
157 for f in os.listdir(NRPE.nagios_exportdir):
158 if re.search('.*{}.cfg'.format(self.command), f):
159 os.remove(os.path.join(NRPE.nagios_exportdir, f))
160
161 templ_vars = {
162 'nagios_hostname': hostname,
163 'nagios_servicegroup': nagios_context,
164 'description': self.description,
165 'shortname': self.shortname,
166 'command': self.command,
167 }
168 nrpe_service_text = Check.service_template.format(**templ_vars)
169 nrpe_service_file = '{}/service__{}_{}.cfg'.format(
170 NRPE.nagios_exportdir, hostname, self.command)
171 with open(nrpe_service_file, 'w') as nrpe_service_config:
172 nrpe_service_config.write(str(nrpe_service_text))
173
174 def run(self):
175 subprocess.call(self.check_cmd)
176
177
178class NRPE(object):
179 nagios_logdir = '/var/log/nagios'
180 nagios_exportdir = '/var/lib/nagios/export'
181 nrpe_confdir = '/etc/nagios/nrpe.d'
182
183 def __init__(self):
184 super(NRPE, self).__init__()
185 self.config = config()
186 self.nagios_context = self.config['nagios_context']
187 self.unit_name = local_unit().replace('/', '-')
188 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
189 self.checks = []
190
191 def add_check(self, *args, **kwargs):
192 self.checks.append(Check(*args, **kwargs))
193
194 def write(self):
195 try:
196 nagios_uid = pwd.getpwnam('nagios').pw_uid
197 nagios_gid = grp.getgrnam('nagios').gr_gid
198 except:
199 log("Nagios user not set up, nrpe checks not updated")
200 return
201
202 if not os.path.exists(NRPE.nagios_logdir):
203 os.mkdir(NRPE.nagios_logdir)
204 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
205
206 nrpe_monitors = {}
207 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
208 for nrpecheck in self.checks:
209 nrpecheck.write(self.nagios_context, self.hostname)
210 nrpe_monitors[nrpecheck.shortname] = {
211 "command": nrpecheck.command,
212 }
213
214 service('restart', 'nagios-nrpe-server')
215
216 for rid in relation_ids("local-monitors"):
217 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
2180
=== removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
@@ -1,156 +0,0 @@
1'''
2Functions for managing volumes in juju units. One volume is supported per unit.
3Subordinates may have their own storage, provided it is on its own partition.
4
5Configuration stanzas:
6 volume-ephemeral:
7 type: boolean
8 default: true
9 description: >
10 If false, a volume is mounted as sepecified in "volume-map"
11 If true, ephemeral storage will be used, meaning that log data
12 will only exist as long as the machine. YOU HAVE BEEN WARNED.
13 volume-map:
14 type: string
15 default: {}
16 description: >
17 YAML map of units to device names, e.g:
18 "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
19 Service units will raise a configure-error if volume-ephemeral
20 is 'true' and no volume-map value is set. Use 'juju set' to set a
21 value and 'juju resolved' to complete configuration.
22
23Usage:
24 from charmsupport.volumes import configure_volume, VolumeConfigurationError
25 from charmsupport.hookenv import log, ERROR
26 def post_mount_hook():
27 stop_service('myservice')
28 def post_mount_hook():
29 start_service('myservice')
30
31 if __name__ == '__main__':
32 try:
33 configure_volume(before_change=pre_mount_hook,
34 after_change=post_mount_hook)
35 except VolumeConfigurationError:
36 log('Storage could not be configured', ERROR)
37'''
38
39# XXX: Known limitations
40# - fstab is neither consulted nor updated
41
42import os
43import hookenv
44import host
45import yaml
46
47
48MOUNT_BASE = '/srv/juju/volumes'
49
50
51class VolumeConfigurationError(Exception):
52 '''Volume configuration data is missing or invalid'''
53 pass
54
55
56def get_config():
57 '''Gather and sanity-check volume configuration data'''
58 volume_config = {}
59 config = hookenv.config()
60
61 errors = False
62
63 if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
64 volume_config['ephemeral'] = True
65 else:
66 volume_config['ephemeral'] = False
67
68 try:
69 volume_map = yaml.safe_load(config.get('volume-map', '{}'))
70 except yaml.YAMLError as e:
71 hookenv.log("Error parsing YAML volume-map: {}".format(e),
72 hookenv.ERROR)
73 errors = True
74 if volume_map is None:
75 # probably an empty string
76 volume_map = {}
77 elif isinstance(volume_map, dict):
78 hookenv.log("Volume-map should be a dictionary, not {}".format(
79 type(volume_map)))
80 errors = True
81
82 volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
83 if volume_config['device'] and volume_config['ephemeral']:
84 # asked for ephemeral storage but also defined a volume ID
85 hookenv.log('A volume is defined for this unit, but ephemeral '
86 'storage was requested', hookenv.ERROR)
87 errors = True
88 elif not volume_config['device'] and not volume_config['ephemeral']:
89 # asked for permanent storage but did not define volume ID
90 hookenv.log('Ephemeral storage was requested, but there is no volume '
91 'defined for this unit.', hookenv.ERROR)
92 errors = True
93
94 unit_mount_name = hookenv.local_unit().replace('/', '-')
95 volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
96
97 if errors:
98 return None
99 return volume_config
100
101
102def mount_volume(config):
103 if os.path.exists(config['mountpoint']):
104 if not os.path.isdir(config['mountpoint']):
105 hookenv.log('Not a directory: {}'.format(config['mountpoint']))
106 raise VolumeConfigurationError()
107 else:
108 host.mkdir(config['mountpoint'])
109 if os.path.ismount(config['mountpoint']):
110 unmount_volume(config)
111 if not host.mount(config['device'], config['mountpoint'], persist=True):
112 raise VolumeConfigurationError()
113
114
115def unmount_volume(config):
116 if os.path.ismount(config['mountpoint']):
117 if not host.umount(config['mountpoint'], persist=True):
118 raise VolumeConfigurationError()
119
120
121def managed_mounts():
122 '''List of all mounted managed volumes'''
123 return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
124
125
126def configure_volume(before_change=lambda: None, after_change=lambda: None):
127 '''Set up storage (or don't) according to the charm's volume configuration.
128 Returns the mount point or "ephemeral". before_change and after_change
129 are optional functions to be called if the volume configuration changes.
130 '''
131
132 config = get_config()
133 if not config:
134 hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
135 raise VolumeConfigurationError()
136
137 if config['ephemeral']:
138 if os.path.ismount(config['mountpoint']):
139 before_change()
140 unmount_volume(config)
141 after_change()
142 return 'ephemeral'
143 else:
144 # persistent storage
145 if os.path.ismount(config['mountpoint']):
146 mounts = dict(managed_mounts())
147 if mounts.get(config['mountpoint']) != config['device']:
148 before_change()
149 unmount_volume(config)
150 mount_volume(config)
151 after_change()
152 else:
153 before_change()
154 mount_volume(config)
155 after_change()
156 return config['mountpoint']
1570
=== removed directory 'hooks/charmhelpers/contrib/hahelpers'
=== removed file 'hooks/charmhelpers/contrib/hahelpers/IMPORT'
--- hooks/charmhelpers/contrib/hahelpers/IMPORT 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/IMPORT 1970-01-01 00:00:00 +0000
@@ -1,7 +0,0 @@
1Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers
2
3ha-helpers/lib/apache_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/apache_utils.py
4ha-helpers/lib/cluster_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/cluster_utils.py
5ha-helpers/lib/ceph_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/ceph_utils.py
6ha-helpers/lib/haproxy_utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/haproxy_utils.py
7ha-helpers/lib/utils.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/utils.py
80
=== removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/hahelpers/apache_utils.py'
--- hooks/charmhelpers/contrib/hahelpers/apache_utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache_utils.py 1970-01-01 00:00:00 +0000
@@ -1,196 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11from hahelpers.utils import (
12 relation_ids,
13 relation_list,
14 relation_get,
15 render_template,
16 juju_log,
17 config_get,
18 install,
19 get_host_ip,
20 restart
21 )
22from hahelpers.cluster_utils import https
23
24import os
25import subprocess
26from base64 import b64decode
27
28APACHE_SITE_DIR = "/etc/apache2/sites-available"
29SITE_TEMPLATE = "apache2_site.tmpl"
30RELOAD_CHECK = "To activate the new configuration"
31
32
33def get_cert():
34 cert = config_get('ssl_cert')
35 key = config_get('ssl_key')
36 if not (cert and key):
37 juju_log('INFO',
38 "Inspecting identity-service relations for SSL certificate.")
39 cert = key = None
40 for r_id in relation_ids('identity-service'):
41 for unit in relation_list(r_id):
42 if not cert:
43 cert = relation_get('ssl_cert',
44 rid=r_id, unit=unit)
45 if not key:
46 key = relation_get('ssl_key',
47 rid=r_id, unit=unit)
48 return (cert, key)
49
50
51def get_ca_cert():
52 ca_cert = None
53 juju_log('INFO',
54 "Inspecting identity-service relations for CA SSL certificate.")
55 for r_id in relation_ids('identity-service'):
56 for unit in relation_list(r_id):
57 if not ca_cert:
58 ca_cert = relation_get('ca_cert',
59 rid=r_id, unit=unit)
60 return ca_cert
61
62
63def install_ca_cert(ca_cert):
64 if ca_cert:
65 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
66 'w') as crt:
67 crt.write(ca_cert)
68 subprocess.check_call(['update-ca-certificates', '--fresh'])
69
70
71def enable_https(port_maps, namespace, cert, key, ca_cert=None):
72 '''
73 For a given number of port mappings, configures apache2
74 HTTPs local reverse proxying using certficates and keys provided in
75 either configuration data (preferred) or relation data. Assumes ports
76 are not in use (calling charm should ensure that).
77
78 port_maps: dict: external to internal port mappings
79 namespace: str: name of charm
80 '''
81 def _write_if_changed(path, new_content):
82 content = None
83 if os.path.exists(path):
84 with open(path, 'r') as f:
85 content = f.read().strip()
86 if content != new_content:
87 with open(path, 'w') as f:
88 f.write(new_content)
89 return True
90 else:
91 return False
92
93 juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
94 http_restart = False
95
96 if cert:
97 cert = b64decode(cert)
98 if key:
99 key = b64decode(key)
100 if ca_cert:
101 ca_cert = b64decode(ca_cert)
102
103 if not cert and not key:
104 juju_log('ERROR',
105 "Expected but could not find SSL certificate data, not "
106 "configuring HTTPS!")
107 return False
108
109 install('apache2')
110 if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
111 'proxy', 'proxy_http']):
112 http_restart = True
113
114 ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
115 if not os.path.exists(ssl_dir):
116 os.makedirs(ssl_dir)
117
118 if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
119 http_restart = True
120 if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
121 http_restart = True
122 os.chmod(os.path.join(ssl_dir, 'key'), 0600)
123
124 install_ca_cert(ca_cert)
125
126 sites_dir = '/etc/apache2/sites-available'
127 for ext_port, int_port in port_maps.items():
128 juju_log('INFO',
129 'Creating apache2 reverse proxy vhost'
130 ' for {}:{}'.format(ext_port,
131 int_port))
132 site = "{}_{}".format(namespace, ext_port)
133 site_path = os.path.join(sites_dir, site)
134 with open(site_path, 'w') as fsite:
135 context = {
136 "ext": ext_port,
137 "int": int_port,
138 "namespace": namespace,
139 "private_address": get_host_ip()
140 }
141 fsite.write(render_template(SITE_TEMPLATE,
142 context))
143
144 if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
145 http_restart = True
146
147 if http_restart:
148 restart('apache2')
149
150 return True
151
152
153def disable_https(port_maps, namespace):
154 '''
155 Ensure HTTPS reverse proxying is disables for given port mappings
156
157 port_maps: dict: of ext -> int port mappings
158 namespace: str: name of chamr
159 '''
160 juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
161
162 if (not os.path.exists('/etc/apache2') or
163 not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
164 return
165
166 http_restart = False
167 for ext_port in port_maps.keys():
168 if os.path.exists(os.path.join(APACHE_SITE_DIR,
169 "{}_{}".format(namespace,
170 ext_port))):
171 juju_log('INFO',
172 "Disabling HTTPS reverse proxy"
173 " for {} {}.".format(namespace,
174 ext_port))
175 if (RELOAD_CHECK in
176 subprocess.check_output(['a2dissite',
177 '{}_{}'.format(namespace,
178 ext_port)])):
179 http_restart = True
180
181 if http_restart:
182 restart(['apache2'])
183
184
185def setup_https(port_maps, namespace, cert, key, ca_cert=None):
186 '''
187 Ensures HTTPS is either enabled or disabled for given port
188 mapping.
189
190 port_maps: dict: of ext -> int port mappings
191 namespace: str: name of charm
192 '''
193 if not https:
194 disable_https(port_maps, namespace)
195 else:
196 enable_https(port_maps, namespace, cert, key, ca_cert)
1970
=== removed file 'hooks/charmhelpers/contrib/hahelpers/ceph_utils.py'
--- hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/ceph_utils.py 1970-01-01 00:00:00 +0000
@@ -1,256 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import commands
12import subprocess
13import os
14import shutil
15import hahelpers.utils as utils
16
17KEYRING = '/etc/ceph/ceph.client.%s.keyring'
18KEYFILE = '/etc/ceph/ceph.client.%s.key'
19
20CEPH_CONF = """[global]
21 auth supported = %(auth)s
22 keyring = %(keyring)s
23 mon host = %(mon_hosts)s
24"""
25
26
27def execute(cmd):
28 subprocess.check_call(cmd)
29
30
31def execute_shell(cmd):
32 subprocess.check_call(cmd, shell=True)
33
34
35def install():
36 ceph_dir = "/etc/ceph"
37 if not os.path.isdir(ceph_dir):
38 os.mkdir(ceph_dir)
39 utils.install('ceph-common')
40
41
42def rbd_exists(service, pool, rbd_img):
43 (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\
44 (service, pool))
45 return rbd_img in out
46
47
48def create_rbd_image(service, pool, image, sizemb):
49 cmd = [
50 'rbd',
51 'create',
52 image,
53 '--size',
54 str(sizemb),
55 '--id',
56 service,
57 '--pool',
58 pool
59 ]
60 execute(cmd)
61
62
63def pool_exists(service, name):
64 (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
65 return name in out
66
67
68def create_pool(service, name):
69 cmd = [
70 'rados',
71 '--id',
72 service,
73 'mkpool',
74 name
75 ]
76 execute(cmd)
77
78
79def keyfile_path(service):
80 return KEYFILE % service
81
82
83def keyring_path(service):
84 return KEYRING % service
85
86
87def create_keyring(service, key):
88 keyring = keyring_path(service)
89 if os.path.exists(keyring):
90 utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
91 cmd = [
92 'ceph-authtool',
93 keyring,
94 '--create-keyring',
95 '--name=client.%s' % service,
96 '--add-key=%s' % key
97 ]
98 execute(cmd)
99 utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
100
101
102def create_key_file(service, key):
103 # create a file containing the key
104 keyfile = keyfile_path(service)
105 if os.path.exists(keyfile):
106 utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
107 fd = open(keyfile, 'w')
108 fd.write(key)
109 fd.close()
110 utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
111
112
113def get_ceph_nodes():
114 hosts = []
115 for r_id in utils.relation_ids('ceph'):
116 for unit in utils.relation_list(r_id):
117 hosts.append(utils.relation_get('private-address',
118 unit=unit, rid=r_id))
119 return hosts
120
121
122def configure(service, key, auth):
123 create_keyring(service, key)
124 create_key_file(service, key)
125 hosts = get_ceph_nodes()
126 mon_hosts = ",".join(map(str, hosts))
127 keyring = keyring_path(service)
128 with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
129 ceph_conf.write(CEPH_CONF % locals())
130 modprobe_kernel_module('rbd')
131
132
133def image_mapped(image_name):
134 (rc, out) = commands.getstatusoutput('rbd showmapped')
135 return image_name in out
136
137
138def map_block_storage(service, pool, image):
139 cmd = [
140 'rbd',
141 'map',
142 '%s/%s' % (pool, image),
143 '--user',
144 service,
145 '--secret',
146 keyfile_path(service),
147 ]
148 execute(cmd)
149
150
151def filesystem_mounted(fs):
152 return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
153
154
155def make_filesystem(blk_device, fstype='ext4'):
156 utils.juju_log('INFO',
157 'ceph: Formatting block device %s as filesystem %s.' %\
158 (blk_device, fstype))
159 cmd = ['mkfs', '-t', fstype, blk_device]
160 execute(cmd)
161
162
163def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
164 # mount block device into /mnt
165 cmd = ['mount', '-t', fstype, blk_device, '/mnt']
166 execute(cmd)
167
168 # copy data to /mnt
169 try:
170 copy_files(data_src_dst, '/mnt')
171 except:
172 pass
173
174 # umount block device
175 cmd = ['umount', '/mnt']
176 execute(cmd)
177
178 _dir = os.stat(data_src_dst)
179 uid = _dir.st_uid
180 gid = _dir.st_gid
181
182 # re-mount where the data should originally be
183 cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
184 execute(cmd)
185
186 # ensure original ownership of new mount.
187 cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
188 execute(cmd)
189
190
191# TODO: re-use
192def modprobe_kernel_module(module):
193 utils.juju_log('INFO', 'Loading kernel module')
194 cmd = ['modprobe', module]
195 execute(cmd)
196 cmd = 'echo %s >> /etc/modules' % module
197 execute_shell(cmd)
198
199
200def copy_files(src, dst, symlinks=False, ignore=None):
201 for item in os.listdir(src):
202 s = os.path.join(src, item)
203 d = os.path.join(dst, item)
204 if os.path.isdir(s):
205 shutil.copytree(s, d, symlinks, ignore)
206 else:
207 shutil.copy2(s, d)
208
209
210def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
211 blk_device, fstype, system_services=[]):
212 """
213 To be called from the current cluster leader.
214 Ensures given pool and RBD image exists, is mapped to a block device,
215 and the device is formatted and mounted at the given mount_point.
216
217 If formatting a device for the first time, data existing at mount_point
218 will be migrated to the RBD device before being remounted.
219
220 All services listed in system_services will be stopped prior to data
221 migration and restarted when complete.
222 """
223 # Ensure pool, RBD image, RBD mappings are in place.
224 if not pool_exists(service, pool):
225 utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
226 create_pool(service, pool)
227
228 if not rbd_exists(service, pool, rbd_img):
229 utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
230 create_rbd_image(service, pool, rbd_img, sizemb)
231
232 if not image_mapped(rbd_img):
233 utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
234 map_block_storage(service, pool, rbd_img)
235
236 # make file system
237 # TODO: What happens if for whatever reason this is run again and
238 # the data is already in the rbd device and/or is mounted??
239 # When it is mounted already, it will fail to make the fs
240 # XXX: This is really sketchy! Need to at least add an fstab entry
241 # otherwise this hook will blow away existing data if its executed
242 # after a reboot.
243 if not filesystem_mounted(mount_point):
244 make_filesystem(blk_device, fstype)
245
246 for svc in system_services:
247 if utils.running(svc):
248 utils.juju_log('INFO',
249 'Stopping services %s prior to migrating '\
250 'data' % svc)
251 utils.stop(svc)
252
253 place_data_on_ceph(service, blk_device, mount_point, fstype)
254
255 for svc in system_services:
256 utils.start(svc)
2570
=== removed file 'hooks/charmhelpers/contrib/hahelpers/cluster_utils.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster_utils.py 1970-01-01 00:00:00 +0000
@@ -1,130 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11from hahelpers.utils import (
12 juju_log,
13 relation_ids,
14 relation_list,
15 relation_get,
16 get_unit_hostname,
17 config_get
18 )
19import subprocess
20import os
21
22
23def is_clustered():
24 for r_id in (relation_ids('ha') or []):
25 for unit in (relation_list(r_id) or []):
26 clustered = relation_get('clustered',
27 rid=r_id,
28 unit=unit)
29 if clustered:
30 return True
31 return False
32
33
34def is_leader(resource):
35 cmd = [
36 "crm", "resource",
37 "show", resource
38 ]
39 try:
40 status = subprocess.check_output(cmd)
41 except subprocess.CalledProcessError:
42 return False
43 else:
44 if get_unit_hostname() in status:
45 return True
46 else:
47 return False
48
49
50def peer_units():
51 peers = []
52 for r_id in (relation_ids('cluster') or []):
53 for unit in (relation_list(r_id) or []):
54 peers.append(unit)
55 return peers
56
57
58def oldest_peer(peers):
59 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
60 for peer in peers:
61 remote_unit_no = int(peer.split('/')[1])
62 if remote_unit_no < local_unit_no:
63 return False
64 return True
65
66
67def eligible_leader(resource):
68 if is_clustered():
69 if not is_leader(resource):
70 juju_log('INFO', 'Deferring action to CRM leader.')
71 return False
72 else:
73 peers = peer_units()
74 if peers and not oldest_peer(peers):
75 juju_log('INFO', 'Deferring action to oldest service unit.')
76 return False
77 return True
78
79
80def https():
81 '''
82 Determines whether enough data has been provided in configuration
83 or relation data to configure HTTPS
84 .
85 returns: boolean
86 '''
87 if config_get('use-https') == "yes":
88 return True
89 if config_get('ssl_cert') and config_get('ssl_key'):
90 return True
91 for r_id in relation_ids('identity-service'):
92 for unit in relation_list(r_id):
93 if (relation_get('https_keystone', rid=r_id, unit=unit) and
94 relation_get('ssl_cert', rid=r_id, unit=unit) and
95 relation_get('ssl_key', rid=r_id, unit=unit) and
96 relation_get('ca_cert', rid=r_id, unit=unit)):
97 return True
98 return False
99
100
101def determine_api_port(public_port):
102 '''
103 Determine correct API server listening port based on
104 existence of HTTPS reverse proxy and/or haproxy.
105
106 public_port: int: standard public port for given service
107
108 returns: int: the correct listening port for the API service
109 '''
110 i = 0
111 if len(peer_units()) > 0 or is_clustered():
112 i += 1
113 if https():
114 i += 1
115 return public_port - (i * 10)
116
117
118def determine_haproxy_port(public_port):
119 '''
120 Description: Determine correct proxy listening port based on public IP +
121 existence of HTTPS reverse proxy.
122
123 public_port: int: standard public port for given service
124
125 returns: int: the correct listening port for the HAProxy service
126 '''
127 i = 0
128 if https():
129 i += 1
130 return public_port - (i * 10)
1310
=== removed file 'hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py'
--- hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py 1970-01-01 00:00:00 +0000
@@ -1,55 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11from lib.utils import (
12 relation_ids,
13 relation_list,
14 relation_get,
15 unit_get,
16 reload,
17 render_template
18 )
19import os
20
21HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
22HAPROXY_DEFAULT = '/etc/default/haproxy'
23
24
25def configure_haproxy(service_ports):
26 '''
27 Configure HAProxy based on the current peers in the service
28 cluster using the provided port map:
29
30 "swift": [ 8080, 8070 ]
31
32 HAproxy will also be reloaded/started if required
33
34 service_ports: dict: dict of lists of [ frontend, backend ]
35 '''
36 cluster_hosts = {}
37 cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
38 unit_get('private-address')
39 for r_id in relation_ids('cluster'):
40 for unit in relation_list(r_id):
41 cluster_hosts[unit.replace('/', '-')] = \
42 relation_get(attribute='private-address',
43 rid=r_id,
44 unit=unit)
45 context = {
46 'units': cluster_hosts,
47 'service_ports': service_ports
48 }
49 with open(HAPROXY_CONF, 'w') as f:
50 f.write(render_template(os.path.basename(HAPROXY_CONF),
51 context))
52 with open(HAPROXY_DEFAULT, 'w') as f:
53 f.write('ENABLED=1')
54
55 reload('haproxy')
560
=== removed file 'hooks/charmhelpers/contrib/hahelpers/utils.py'
--- hooks/charmhelpers/contrib/hahelpers/utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/hahelpers/utils.py 1970-01-01 00:00:00 +0000
@@ -1,332 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Paul Collins <paul.collins@canonical.com>
9# Adam Gandelman <adamg@ubuntu.com>
10#
11
12import json
13import os
14import subprocess
15import socket
16import sys
17
18
19def do_hooks(hooks):
20 hook = os.path.basename(sys.argv[0])
21
22 try:
23 hook_func = hooks[hook]
24 except KeyError:
25 juju_log('INFO',
26 "This charm doesn't know how to handle '{}'.".format(hook))
27 else:
28 hook_func()
29
30
31def install(*pkgs):
32 cmd = [
33 'apt-get',
34 '-y',
35 'install'
36 ]
37 for pkg in pkgs:
38 cmd.append(pkg)
39 subprocess.check_call(cmd)
40
41TEMPLATES_DIR = 'templates'
42
43try:
44 import jinja2
45except ImportError:
46 install('python-jinja2')
47 import jinja2
48
49try:
50 import dns.resolver
51except ImportError:
52 install('python-dnspython')
53 import dns.resolver
54
55
56def render_template(template_name, context, template_dir=TEMPLATES_DIR):
57 templates = jinja2.Environment(
58 loader=jinja2.FileSystemLoader(template_dir)
59 )
60 template = templates.get_template(template_name)
61 return template.render(context)
62
63CLOUD_ARCHIVE = \
64""" # Ubuntu Cloud Archive
65deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
66"""
67
68CLOUD_ARCHIVE_POCKETS = {
69 'folsom': 'precise-updates/folsom',
70 'folsom/updates': 'precise-updates/folsom',
71 'folsom/proposed': 'precise-proposed/folsom',
72 'grizzly': 'precise-updates/grizzly',
73 'grizzly/updates': 'precise-updates/grizzly',
74 'grizzly/proposed': 'precise-proposed/grizzly'
75 }
76
77
78def configure_source():
79 source = str(config_get('openstack-origin'))
80 if not source:
81 return
82 if source.startswith('ppa:'):
83 cmd = [
84 'add-apt-repository',
85 source
86 ]
87 subprocess.check_call(cmd)
88 if source.startswith('cloud:'):
89 # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
90 # cloud:precise-folsom/updates or cloud:precise-folsom/proposed
91 install('ubuntu-cloud-keyring')
92 pocket = source.split(':')[1]
93 pocket = pocket.split('-')[1]
94 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
95 apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
96 if source.startswith('deb'):
97 l = len(source.split('|'))
98 if l == 2:
99 (apt_line, key) = source.split('|')
100 cmd = [
101 'apt-key',
102 'adv', '--keyserver keyserver.ubuntu.com',
103 '--recv-keys', key
104 ]
105 subprocess.check_call(cmd)
106 elif l == 1:
107 apt_line = source
108
109 with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
110 apt.write(apt_line + "\n")
111 cmd = [
112 'apt-get',
113 'update'
114 ]
115 subprocess.check_call(cmd)
116
117# Protocols
118TCP = 'TCP'
119UDP = 'UDP'
120
121
122def expose(port, protocol='TCP'):
123 cmd = [
124 'open-port',
125 '{}/{}'.format(port, protocol)
126 ]
127 subprocess.check_call(cmd)
128
129
130def juju_log(severity, message):
131 cmd = [
132 'juju-log',
133 '--log-level', severity,
134 message
135 ]
136 subprocess.check_call(cmd)
137
138
139cache = {}
140
141
142def cached(func):
143 def wrapper(*args, **kwargs):
144 global cache
145 key = str((func, args, kwargs))
146 try:
147 return cache[key]
148 except KeyError:
149 res = func(*args, **kwargs)
150 cache[key] = res
151 return res
152 return wrapper
153
154
155@cached
156def relation_ids(relation):
157 cmd = [
158 'relation-ids',
159 relation
160 ]
161 result = str(subprocess.check_output(cmd)).split()
162 if result == "":
163 return None
164 else:
165 return result
166
167
168@cached
169def relation_list(rid):
170 cmd = [
171 'relation-list',
172 '-r', rid,
173 ]
174 result = str(subprocess.check_output(cmd)).split()
175 if result == "":
176 return None
177 else:
178 return result
179
180
181@cached
182def relation_get(attribute, unit=None, rid=None):
183 cmd = [
184 'relation-get',
185 ]
186 if rid:
187 cmd.append('-r')
188 cmd.append(rid)
189 cmd.append(attribute)
190 if unit:
191 cmd.append(unit)
192 value = subprocess.check_output(cmd).strip() # IGNORE:E1103
193 if value == "":
194 return None
195 else:
196 return value
197
198
199@cached
200def relation_get_dict(relation_id=None, remote_unit=None):
201 """Obtain all relation data as dict by way of JSON"""
202 cmd = [
203 'relation-get', '--format=json'
204 ]
205 if relation_id:
206 cmd.append('-r')
207 cmd.append(relation_id)
208 if remote_unit:
209 remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
210 os.environ['JUJU_REMOTE_UNIT'] = remote_unit
211 j = subprocess.check_output(cmd)
212 if remote_unit and remote_unit_orig:
213 os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
214 d = json.loads(j)
215 settings = {}
216 # convert unicode to strings
217 for k, v in d.iteritems():
218 settings[str(k)] = str(v)
219 return settings
220
221
222def relation_set(**kwargs):
223 cmd = [
224 'relation-set'
225 ]
226 args = []
227 for k, v in kwargs.items():
228 if k == 'rid':
229 if v:
230 cmd.append('-r')
231 cmd.append(v)
232 else:
233 args.append('{}={}'.format(k, v))
234 cmd += args
235 subprocess.check_call(cmd)
236
237
238@cached
239def unit_get(attribute):
240 cmd = [
241 'unit-get',
242 attribute
243 ]
244 value = subprocess.check_output(cmd).strip() # IGNORE:E1103
245 if value == "":
246 return None
247 else:
248 return value
249
250
251@cached
252def config_get(attribute):
253 cmd = [
254 'config-get',
255 '--format',
256 'json',
257 ]
258 out = subprocess.check_output(cmd).strip() # IGNORE:E1103
259 cfg = json.loads(out)
260
261 try:
262 return cfg[attribute]
263 except KeyError:
264 return None
265
266
267@cached
268def get_unit_hostname():
269 return socket.gethostname()
270
271
272@cached
273def get_host_ip(hostname=unit_get('private-address')):
274 try:
275 # Test to see if already an IPv4 address
276 socket.inet_aton(hostname)
277 return hostname
278 except socket.error:
279 answers = dns.resolver.query(hostname, 'A')
280 if answers:
281 return answers[0].address
282 return None
283
284
285def _svc_control(service, action):
286 subprocess.check_call(['service', service, action])
287
288
289def restart(*services):
290 for service in services:
291 _svc_control(service, 'restart')
292
293
294def stop(*services):
295 for service in services:
296 _svc_control(service, 'stop')
297
298
299def start(*services):
300 for service in services:
301 _svc_control(service, 'start')
302
303
304def reload(*services):
305 for service in services:
306 try:
307 _svc_control(service, 'reload')
308 except subprocess.CalledProcessError:
309 # Reload failed - either service does not support reload
310 # or it was not running - restart will fixup most things
311 _svc_control(service, 'restart')
312
313
314def running(service):
315 try:
316 output = subprocess.check_output(['service', service, 'status'])
317 except subprocess.CalledProcessError:
318 return False
319 else:
320 if ("start/running" in output or
321 "is running" in output):
322 return True
323 else:
324 return False
325
326
327def is_relation_made(relation, key='private-address'):
328 for r_id in (relation_ids(relation) or []):
329 for unit in (relation_list(r_id) or []):
330 if relation_get(key, rid=r_id, unit=unit):
331 return True
332 return False
3330
=== removed directory 'hooks/charmhelpers/contrib/jujugui'
=== removed file 'hooks/charmhelpers/contrib/jujugui/IMPORT'
--- hooks/charmhelpers/contrib/jujugui/IMPORT 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/jujugui/IMPORT 1970-01-01 00:00:00 +0000
@@ -1,4 +0,0 @@
1Source: lp:charms/juju-gui
2
3juju-gui/hooks/utils.py -> charm-helpers/charmhelpers/contrib/jujugui/utils.py
4juju-gui/tests/test_utils.py -> charm-helpers/tests/contrib/jujugui/test_utils.py
50
=== removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/jujugui/utils.py'
--- hooks/charmhelpers/contrib/jujugui/utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000
@@ -1,602 +0,0 @@
1"""Juju GUI charm utilities."""
2
3__all__ = [
4 'AGENT',
5 'APACHE',
6 'API_PORT',
7 'CURRENT_DIR',
8 'HAPROXY',
9 'IMPROV',
10 'JUJU_DIR',
11 'JUJU_GUI_DIR',
12 'JUJU_GUI_SITE',
13 'JUJU_PEM',
14 'WEB_PORT',
15 'bzr_checkout',
16 'chain',
17 'cmd_log',
18 'fetch_api',
19 'fetch_gui',
20 'find_missing_packages',
21 'first_path_in_dir',
22 'get_api_address',
23 'get_npm_cache_archive_url',
24 'get_release_file_url',
25 'get_staging_dependencies',
26 'get_zookeeper_address',
27 'legacy_juju',
28 'log_hook',
29 'merge',
30 'parse_source',
31 'prime_npm_cache',
32 'render_to_file',
33 'save_or_create_certificates',
34 'setup_apache',
35 'setup_gui',
36 'start_agent',
37 'start_gui',
38 'start_improv',
39 'write_apache_config',
40]
41
42from contextlib import contextmanager
43import errno
44import json
45import os
46import logging
47import shutil
48from subprocess import CalledProcessError
49import tempfile
50from urlparse import urlparse
51
52import apt
53import tempita
54
55from launchpadlib.launchpad import Launchpad
56from shelltoolbox import (
57 Serializer,
58 apt_get_install,
59 command,
60 environ,
61 install_extra_repositories,
62 run,
63 script_name,
64 search_file,
65 su,
66)
67from charmhelpers.core.host import (
68 service_start,
69)
70from charmhelpers.core.hookenv import (
71 log,
72 config,
73 unit_get,
74)
75
76
77AGENT = 'juju-api-agent'
78APACHE = 'apache2'
79IMPROV = 'juju-api-improv'
80HAPROXY = 'haproxy'
81
82API_PORT = 8080
83WEB_PORT = 8000
84
85CURRENT_DIR = os.getcwd()
86JUJU_DIR = os.path.join(CURRENT_DIR, 'juju')
87JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui')
88JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui'
89JUJU_GUI_PORTS = '/etc/apache2/ports.conf'
90JUJU_PEM = 'juju.includes-private-key.pem'
91BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',)
92DEB_BUILD_DEPENDENCIES = (
93 'bzr', 'imagemagick', 'make', 'nodejs', 'npm',
94)
95DEB_STAGE_DEPENDENCIES = (
96 'zookeeper',
97)
98
99
100# Store the configuration from on invocation to the next.
101config_json = Serializer('/tmp/config.json')
102# Bazaar checkout command.
103bzr_checkout = command('bzr', 'co', '--lightweight')
104# Whether or not the charm is deployed using juju-core.
105# If juju-core has been used to deploy the charm, an agent.conf file must
106# be present in the charm parent directory.
107legacy_juju = lambda: not os.path.exists(
108 os.path.join(CURRENT_DIR, '..', 'agent.conf'))
109
110
111def _get_build_dependencies():
112 """Install deb dependencies for building."""
113 log('Installing build dependencies.')
114 cmd_log(install_extra_repositories(*BUILD_REPOSITORIES))
115 cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES))
116
117
118def get_api_address(unit_dir):
119 """Return the Juju API address stored in the uniter agent.conf file."""
120 import yaml # python-yaml is only installed if juju-core is used.
121 # XXX 2013-03-27 frankban bug=1161443:
122 # currently the uniter agent.conf file does not include the API
123 # address. For now retrieve it from the machine agent file.
124 base_dir = os.path.abspath(os.path.join(unit_dir, '..'))
125 for dirname in os.listdir(base_dir):
126 if dirname.startswith('machine-'):
127 agent_conf = os.path.join(base_dir, dirname, 'agent.conf')
128 break
129 else:
130 raise IOError('Juju agent configuration file not found.')
131 contents = yaml.load(open(agent_conf))
132 return contents['apiinfo']['addrs'][0]
133
134
135def get_staging_dependencies():
136 """Install deb dependencies for the stage (improv) environment."""
137 log('Installing stage dependencies.')
138 cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES))
139
140
141def first_path_in_dir(directory):
142 """Return the full path of the first file/dir in *directory*."""
143 return os.path.join(directory, os.listdir(directory)[0])
144
145
146def _get_by_attr(collection, attr, value):
147 """Return the first item in collection having attr == value.
148
149 Return None if the item is not found.
150 """
151 for item in collection:
152 if getattr(item, attr) == value:
153 return item
154
155
156def get_release_file_url(project, series_name, release_version):
157 """Return the URL of the release file hosted in Launchpad.
158
159 The returned URL points to a release file for the given project, series
160 name and release version.
161 The argument *project* is a project object as returned by launchpadlib.
162 The arguments *series_name* and *release_version* are strings. If
163 *release_version* is None, the URL of the latest release will be returned.
164 """
165 series = _get_by_attr(project.series, 'name', series_name)
166 if series is None:
167 raise ValueError('%r: series not found' % series_name)
168 # Releases are returned by Launchpad in reverse date order.
169 releases = list(series.releases)
170 if not releases:
171 raise ValueError('%r: series does not contain releases' % series_name)
172 if release_version is not None:
173 release = _get_by_attr(releases, 'version', release_version)
174 if release is None:
175 raise ValueError('%r: release not found' % release_version)
176 releases = [release]
177 for release in releases:
178 for file_ in release.files:
179 if str(file_).endswith('.tgz'):
180 return file_.file_link
181 raise ValueError('%r: file not found' % release_version)
182
183
184def get_zookeeper_address(agent_file_path):
185 """Retrieve the Zookeeper address contained in the given *agent_file_path*.
186
187 The *agent_file_path* is a path to a file containing a line similar to the
188 following::
189
190 env JUJU_ZOOKEEPER="address"
191 """
192 line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip()
193 return line.split('=')[1].strip('"')
194
195
196@contextmanager
197def log_hook():
198 """Log when a hook starts and stops its execution.
199
200 Also log to stdout possible CalledProcessError exceptions raised executing
201 the hook.
202 """
203 script = script_name()
204 log(">>> Entering {}".format(script))
205 try:
206 yield
207 except CalledProcessError as err:
208 log('Exception caught:')
209 log(err.output)
210 raise
211 finally:
212 log("<<< Exiting {}".format(script))
213
214
215def parse_source(source):
216 """Parse the ``juju-gui-source`` option.
217
218 Return a tuple of two elements representing info on how to deploy Juju GUI.
219 Examples:
220 - ('stable', None): latest stable release;
221 - ('stable', '0.1.0'): stable release v0.1.0;
222 - ('trunk', None): latest trunk release;
223 - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1;
224 - ('branch', 'lp:juju-gui'): release is made from a branch;
225 - ('url', 'http://example.com/gui'): release from a downloaded file.
226 """
227 if source.startswith('url:'):
228 source = source[4:]
229 # Support file paths, including relative paths.
230 if urlparse(source).scheme == '':
231 if not source.startswith('/'):
232 source = os.path.join(os.path.abspath(CURRENT_DIR), source)
233 source = "file://%s" % source
234 return 'url', source
235 if source in ('stable', 'trunk'):
236 return source, None
237 if source.startswith('lp:') or source.startswith('http://'):
238 return 'branch', source
239 if 'build' in source:
240 return 'trunk', source
241 return 'stable', source
242
243
244def render_to_file(template_name, context, destination):
245 """Render the given *template_name* into *destination* using *context*.
246
247 The tempita template language is used to render contents
248 (see http://pythonpaste.org/tempita/).
249 The argument *template_name* is the name or path of the template file:
250 it may be either a path relative to ``../config`` or an absolute path.
251 The argument *destination* is a file path.
252 The argument *context* is a dict-like object.
253 """
254 template_path = os.path.abspath(template_name)
255 template = tempita.Template.from_filename(template_path)
256 with open(destination, 'w') as stream:
257 stream.write(template.substitute(context))
258
259
260results_log = None
261
262
263def _setupLogging():
264 global results_log
265 if results_log is not None:
266 return
267 cfg = config()
268 logging.basicConfig(
269 filename=cfg['command-log-file'],
270 level=logging.INFO,
271 format="%(asctime)s: %(name)s@%(levelname)s %(message)s")
272 results_log = logging.getLogger('juju-gui')
273
274
275def cmd_log(results):
276 global results_log
277 if not results:
278 return
279 if results_log is None:
280 _setupLogging()
281 # Since 'results' may be multi-line output, start it on a separate line
282 # from the logger timestamp, etc.
283 results_log.info('\n' + results)
284
285
286def start_improv(staging_env, ssl_cert_path,
287 config_path='/etc/init/juju-api-improv.conf'):
288 """Start a simulated juju environment using ``improv.py``."""
289 log('Setting up staging start up script.')
290 context = {
291 'juju_dir': JUJU_DIR,
292 'keys': ssl_cert_path,
293 'port': API_PORT,
294 'staging_env': staging_env,
295 }
296 render_to_file('config/juju-api-improv.conf.template', context, config_path)
297 log('Starting the staging backend.')
298 with su('root'):
299 service_start(IMPROV)
300
301
302def start_agent(
303 ssl_cert_path, config_path='/etc/init/juju-api-agent.conf',
304 read_only=False):
305 """Start the Juju agent and connect to the current environment."""
306 # Retrieve the Zookeeper address from the start up script.
307 unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..'))
308 agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir))
309 zookeeper = get_zookeeper_address(agent_file)
310 log('Setting up API agent start up script.')
311 context = {
312 'juju_dir': JUJU_DIR,
313 'keys': ssl_cert_path,
314 'port': API_PORT,
315 'zookeeper': zookeeper,
316 'read_only': read_only
317 }
318 render_to_file('config/juju-api-agent.conf.template', context, config_path)
319 log('Starting API agent.')
320 with su('root'):
321 service_start(AGENT)
322
323
324def start_gui(
325 console_enabled, login_help, readonly, in_staging, ssl_cert_path,
326 charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg',
327 config_js_path=None, secure=True, sandbox=False):
328 """Set up and start the Juju GUI server."""
329 with su('root'):
330 run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR)
331 # XXX 2013-02-05 frankban bug=1116320:
332 # External insecure resources are still loaded when testing in the
333 # debug environment. For now, switch to the production environment if
334 # the charm is configured to serve tests.
335 if in_staging and not serve_tests:
336 build_dirname = 'build-debug'
337 else:
338 build_dirname = 'build-prod'
339 build_dir = os.path.join(JUJU_GUI_DIR, build_dirname)
340 log('Generating the Juju GUI configuration file.')
341 is_legacy_juju = legacy_juju()
342 user, password = None, None
343 if (is_legacy_juju and in_staging) or sandbox:
344 user, password = 'admin', 'admin'
345 else:
346 user, password = None, None
347
348 api_backend = 'python' if is_legacy_juju else 'go'
349 if secure:
350 protocol = 'wss'
351 else:
352 log('Running in insecure mode! Port 80 will serve unencrypted.')
353 protocol = 'ws'
354
355 context = {
356 'raw_protocol': protocol,
357 'address': unit_get('public-address'),
358 'console_enabled': json.dumps(console_enabled),
359 'login_help': json.dumps(login_help),
360 'password': json.dumps(password),
361 'api_backend': json.dumps(api_backend),
362 'readonly': json.dumps(readonly),
363 'user': json.dumps(user),
364 'protocol': json.dumps(protocol),
365 'sandbox': json.dumps(sandbox),
366 'charmworld_url': json.dumps(charmworld_url),
367 }
368 if config_js_path is None:
369 config_js_path = os.path.join(
370 build_dir, 'juju-ui', 'assets', 'config.js')
371 render_to_file('config/config.js.template', context, config_js_path)
372
373 write_apache_config(build_dir, serve_tests)
374
375 log('Generating haproxy configuration file.')
376 if is_legacy_juju:
377 # The PyJuju API agent is listening on localhost.
378 api_address = '127.0.0.1:{0}'.format(API_PORT)
379 else:
380 # Retrieve the juju-core API server address.
381 api_address = get_api_address(os.path.join(CURRENT_DIR, '..'))
382 context = {
383 'api_address': api_address,
384 'api_pem': JUJU_PEM,
385 'legacy_juju': is_legacy_juju,
386 'ssl_cert_path': ssl_cert_path,
387 # In PyJuju environments, use the same certificate for both HTTPS and
388 # WebSocket connections. In juju-core the system already has the proper
389 # certificate installed.
390 'web_pem': JUJU_PEM,
391 'web_port': WEB_PORT,
392 'secure': secure
393 }
394 render_to_file('config/haproxy.cfg.template', context, haproxy_path)
395 log('Starting Juju GUI.')
396
397
398def write_apache_config(build_dir, serve_tests=False):
399 log('Generating the apache site configuration file.')
400 context = {
401 'port': WEB_PORT,
402 'serve_tests': serve_tests,
403 'server_root': build_dir,
404 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''),
405 }
406 render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS)
407 render_to_file('config/apache-site.template', context, JUJU_GUI_SITE)
408
409
410def get_npm_cache_archive_url(Launchpad=Launchpad):
411 """Figure out the URL of the most recent NPM cache archive on Launchpad."""
412 launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production')
413 project = launchpad.projects['juju-gui']
414 # Find the URL of the most recently created NPM cache archive.
415 npm_cache_url = get_release_file_url(project, 'npm-cache', None)
416 return npm_cache_url
417
418
419def prime_npm_cache(npm_cache_url):
420 """Download NPM cache archive and prime the NPM cache with it."""
421 # Download the cache archive and then uncompress it into the NPM cache.
422 npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz')
423 cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url))
424 npm_cache_dir = os.path.expanduser('~/.npm')
425 # The NPM cache directory probably does not exist, so make it if not.
426 try:
427 os.mkdir(npm_cache_dir)
428 except OSError, e:
429 # If the directory already exists then ignore the error.
430 if e.errno != errno.EEXIST: # File exists.
431 raise
432 uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f')
433 cmd_log(uncompress(npm_cache_archive))
434
435
436def fetch_gui(juju_gui_source, logpath):
437 """Retrieve the Juju GUI release/branch."""
438 # Retrieve a Juju GUI release.
439 origin, version_or_branch = parse_source(juju_gui_source)
440 if origin == 'branch':
441 # Make sure we have the dependencies necessary for us to actually make
442 # a build.
443 _get_build_dependencies()
444 # Create a release starting from a branch.
445 juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source')
446 log('Retrieving Juju GUI source checkout from %s.' % version_or_branch)
447 cmd_log(run('rm', '-rf', juju_gui_source_dir))
448 cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir))
449 log('Preparing a Juju GUI release.')
450 logdir = os.path.dirname(logpath)
451 fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir)
452 log('Output from "make distfile" sent to %s' % name)
453 with environ(NO_BZR='1'):
454 run('make', '-C', juju_gui_source_dir, 'distfile',
455 stdout=fd, stderr=fd)
456 release_tarball = first_path_in_dir(
457 os.path.join(juju_gui_source_dir, 'releases'))
458 else:
459 log('Retrieving Juju GUI release.')
460 if origin == 'url':
461 file_url = version_or_branch
462 else:
463 # Retrieve a release from Launchpad.
464 launchpad = Launchpad.login_anonymously(
465 'Juju GUI charm', 'production')
466 project = launchpad.projects['juju-gui']
467 file_url = get_release_file_url(project, origin, version_or_branch)
468 log('Downloading release file from %s.' % file_url)
469 release_tarball = os.path.join(CURRENT_DIR, 'release.tgz')
470 cmd_log(run('curl', '-L', '-o', release_tarball, file_url))
471 return release_tarball
472
473
474def fetch_api(juju_api_branch):
475 """Retrieve the Juju branch."""
476 # Retrieve Juju API source checkout.
477 log('Retrieving Juju API source checkout.')
478 cmd_log(run('rm', '-rf', JUJU_DIR))
479 cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR))
480
481
482def setup_gui(release_tarball):
483 """Set up Juju GUI."""
484 # Uncompress the release tarball.
485 log('Installing Juju GUI.')
486 release_dir = os.path.join(CURRENT_DIR, 'release')
487 cmd_log(run('rm', '-rf', release_dir))
488 os.mkdir(release_dir)
489 uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f')
490 cmd_log(uncompress(release_tarball))
491 # Link the Juju GUI dir to the contents of the release tarball.
492 cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR))
493
494
495def setup_apache():
496 """Set up apache."""
497 log('Setting up apache.')
498 if not os.path.exists(JUJU_GUI_SITE):
499 cmd_log(run('touch', JUJU_GUI_SITE))
500 cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE))
501 cmd_log(
502 run('ln', '-s', JUJU_GUI_SITE,
503 '/etc/apache2/sites-enabled/juju-gui'))
504
505 if not os.path.exists(JUJU_GUI_PORTS):
506 cmd_log(run('touch', JUJU_GUI_PORTS))
507 cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS))
508
509 with su('root'):
510 run('a2dissite', 'default')
511 run('a2ensite', 'juju-gui')
512
513
514def save_or_create_certificates(
515 ssl_cert_path, ssl_cert_contents, ssl_key_contents):
516 """Generate the SSL certificates.
517
518 If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them
519 as certificates; otherwise, generate them.
520
521 Also create a pem file, suitable for use in the haproxy configuration,
522 concatenating the key and the certificate files.
523 """
524 crt_path = os.path.join(ssl_cert_path, 'juju.crt')
525 key_path = os.path.join(ssl_cert_path, 'juju.key')
526 if not os.path.exists(ssl_cert_path):
527 os.makedirs(ssl_cert_path)
528 if ssl_cert_contents and ssl_key_contents:
529 # Save the provided certificates.
530 with open(crt_path, 'w') as cert_file:
531 cert_file.write(ssl_cert_contents)
532 with open(key_path, 'w') as key_file:
533 key_file.write(ssl_key_contents)
534 else:
535 # Generate certificates.
536 # See http://superuser.com/questions/226192/openssl-without-prompt
537 cmd_log(run(
538 'openssl', 'req', '-new', '-newkey', 'rsa:4096',
539 '-days', '365', '-nodes', '-x509', '-subj',
540 # These are arbitrary test values for the certificate.
541 '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com',
542 '-keyout', key_path, '-out', crt_path))
543 # Generate the pem file.
544 pem_path = os.path.join(ssl_cert_path, JUJU_PEM)
545 if os.path.exists(pem_path):
546 os.remove(pem_path)
547 with open(pem_path, 'w') as pem_file:
548 shutil.copyfileobj(open(key_path), pem_file)
549 shutil.copyfileobj(open(crt_path), pem_file)
550
551
552def find_missing_packages(*packages):
553 """Given a list of packages, return the packages which are not installed.
554 """
555 cache = apt.Cache()
556 missing = set()
557 for pkg_name in packages:
558 try:
559 pkg = cache[pkg_name]
560 except KeyError:
561 missing.add(pkg_name)
562 continue
563 if pkg.is_installed:
564 continue
565 missing.add(pkg_name)
566 return missing
567
568
569## Backend support decorators
570
571def chain(name):
572 """Helper method to compose a set of mixin objects into a callable.
573
574 Each method is called in the context of its mixin instance, and its
575 argument is the Backend instance.
576 """
577 # Chain method calls through all implementing mixins.
578 def method(self):
579 for mixin in self.mixins:
580 a_callable = getattr(type(mixin), name, None)
581 if a_callable:
582 a_callable(mixin, self)
583
584 method.__name__ = name
585 return method
586
587
588def merge(name):
589 """Helper to merge a property from a set of strategy objects
590 into a unified set.
591 """
592 # Return merged property from every providing mixin as a set.
593 @property
594 def method(self):
595 result = set()
596 for mixin in self.mixins:
597 segment = getattr(type(mixin), name, None)
598 if segment and isinstance(segment, (list, tuple, set)):
599 result |= set(segment)
600
601 return result
602 return method
6030
=== removed directory 'hooks/charmhelpers/contrib/openstack'
=== removed file 'hooks/charmhelpers/contrib/openstack/IMPORT'
--- hooks/charmhelpers/contrib/openstack/IMPORT 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/IMPORT 1970-01-01 00:00:00 +0000
@@ -1,9 +0,0 @@
1Source: lp:~openstack-charmers/openstack-charm-helpers/ha-helpers
2
3ha-helpers/lib/openstack-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack-common
4ha-helpers/lib/openstack_common.py -> charm-helpers/charmhelpers/contrib/openstackhelpers/openstack_common.py
5ha-helpers/lib/nova -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova
6ha-helpers/lib/nova/nova-common -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/nova-common
7ha-helpers/lib/nova/grizzly -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/grizzly
8ha-helpers/lib/nova/essex -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/essex
9ha-helpers/lib/nova/folsom -> charm-helpers/charmhelpers/contrib/openstackhelpers/nova/folsom
100
=== removed file 'hooks/charmhelpers/contrib/openstack/__init__.py'
=== removed directory 'hooks/charmhelpers/contrib/openstack/nova'
=== removed file 'hooks/charmhelpers/contrib/openstack/nova/essex'
--- hooks/charmhelpers/contrib/openstack/nova/essex 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/nova/essex 1970-01-01 00:00:00 +0000
@@ -1,43 +0,0 @@
1#!/bin/bash -e
2
3# Essex-specific functions
4
5nova_set_or_update() {
6 # Set a config option in nova.conf or api-paste.ini, depending
7 # Defaults to updating nova.conf
8 local key=$1
9 local value=$2
10 local conf_file=$3
11 local pattern=""
12
13 local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
14 local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
15 local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
16 [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1
17 [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1
18 [[ -z "$conf_file" ]] && conf_file=$nova_conf
19
20 case "$conf_file" in
21 "$nova_conf") match="\-\-$key="
22 pattern="--$key="
23 out=$pattern
24 ;;
25 "$api_conf"|"$libvirtd_conf") match="^$key = "
26 pattern="$match"
27 out="$key = "
28 ;;
29 *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)"
30 esac
31
32 cat $conf_file | grep "$match$value" >/dev/null &&
33 juju-log "$CHARM: $key=$value already in set in $conf_file" \
34 && return 0
35 if cat $conf_file | grep "$match" >/dev/null ; then
36 juju-log "$CHARM: Updating $conf_file, $key=$value"
37 sed -i "s|\($pattern\).*|\1$value|" $conf_file
38 else
39 juju-log "$CHARM: Setting new option $key=$value in $conf_file"
40 echo "$out$value" >>$conf_file
41 fi
42 CONFIG_CHANGED=True
43}
440
=== removed file 'hooks/charmhelpers/contrib/openstack/nova/folsom'
--- hooks/charmhelpers/contrib/openstack/nova/folsom 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/nova/folsom 1970-01-01 00:00:00 +0000
@@ -1,81 +0,0 @@
1#!/bin/bash -e
2
3# Folsom-specific functions
4
5nova_set_or_update() {
6 # TODO: This needs to be shared among folsom, grizzly and beyond.
7 # Set a config option in nova.conf or api-paste.ini, depending
8 # Defaults to updating nova.conf
9 local key="$1"
10 local value="$2"
11 local conf_file="$3"
12 local section="${4:-DEFAULT}"
13
14 local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf}
15 local api_conf=${API_CONF:-/etc/nova/api-paste.ini}
16 local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf}
17 local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini}
18 local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini}
19 local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf}
20
21 [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1
22 [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1
23
24 [[ -z "$conf_file" ]] && conf_file=$nova_conf
25
26 local pattern=""
27 case "$conf_file" in
28 "$nova_conf") match="^$key="
29 pattern="$key="
30 out=$pattern
31 ;;
32 "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \
33 "$libvirtd_conf")
34 match="^$key = "
35 pattern="$match"
36 out="$key = "
37 ;;
38 *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)"
39 esac
40
41 cat $conf_file | grep "$match$value" >/dev/null &&
42 juju-log "$CHARM: $key=$value already in set in $conf_file" \
43 && return 0
44
45 case $conf_file in
46 "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf")
47 python -c "
48import ConfigParser
49config = ConfigParser.RawConfigParser()
50config.read('$conf_file')
51config.set('$section','$key','$value')
52with open('$conf_file', 'wb') as configfile:
53 config.write(configfile)
54"
55 ;;
56 *)
57 if cat $conf_file | grep "$match" >/dev/null ; then
58 juju-log "$CHARM: Updating $conf_file, $key=$value"
59 sed -i "s|\($pattern\).*|\1$value|" $conf_file
60 else
61 juju-log "$CHARM: Setting new option $key=$value in $conf_file"
62 echo "$out$value" >>$conf_file
63 fi
64 ;;
65 esac
66 CONFIG_CHANGED="True"
67}
68
69# Upgrade Helpers
70nova_pre_upgrade() {
71 # Pre-upgrade helper. Caller should pass the version of OpenStack we are
72 # upgrading from.
73 return 0 # Nothing to do here, yet.
74}
75
76nova_post_upgrade() {
77 # Post-upgrade helper. Caller should pass the version of OpenStack we are
78 # upgrading from.
79 juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom."
80 # nothing to do here yet.
81}
820
=== removed symlink 'hooks/charmhelpers/contrib/openstack/nova/grizzly'
=== target was u'folsom'
=== removed file 'hooks/charmhelpers/contrib/openstack/nova/nova-common'
--- hooks/charmhelpers/contrib/openstack/nova/nova-common 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/nova/nova-common 1970-01-01 00:00:00 +0000
@@ -1,147 +0,0 @@
1#!/bin/bash -e
2
3# Common utility functions used across all nova charms.
4
5CONFIG_CHANGED=False
6
7# Load the common OpenStack helper library.
8if [[ -e $CHARM_DIR/lib/openstack-common ]] ; then
9 . $CHARM_DIR/lib/openstack-common
10else
11 juju-log "Couldn't load $CHARM_DIR/lib/opentack-common." && exit 1
12fi
13
14set_or_update() {
15 # Update config flags in nova.conf or api-paste.ini.
16 # Config layout changed in Folsom, so this is now OpenStack release specific.
17 local rel=$(get_os_codename_package "nova-common")
18 . $CHARM_DIR/lib/nova/$rel
19 nova_set_or_update $@
20}
21
22function set_config_flags() {
23 # Set user-defined nova.conf flags from deployment config
24 juju-log "$CHARM: Processing config-flags."
25 flags=$(config-get config-flags)
26 if [[ "$flags" != "None" && -n "$flags" ]] ; then
27 for f in $(echo $flags | sed -e 's/,/ /g') ; do
28 k=$(echo $f | cut -d= -f1)
29 v=$(echo $f | cut -d= -f2)
30 set_or_update "$k" "$v"
31 done
32 fi
33}
34
35configure_volume_service() {
36 local svc="$1"
37 local cur_vers="$(get_os_codename_package "nova-common")"
38 case "$svc" in
39 "cinder")
40 set_or_update "volume_api_class" "nova.volume.cinder.API" ;;
41 "nova-volume")
42 # nova-volume only supported before grizzly.
43 [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] &&
44 set_or_update "volume_api_class" "nova.volume.api.API"
45 ;;
46 *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc"
47 return 1 ;;
48 esac
49}
50
51function configure_network_manager {
52 local manager="$1"
53 echo "$CHARM: configuring $manager network manager"
54 case $1 in
55 "FlatManager")
56 set_or_update "network_manager" "nova.network.manager.FlatManager"
57 ;;
58 "FlatDHCPManager")
59 set_or_update "network_manager" "nova.network.manager.FlatDHCPManager"
60
61 if [[ "$CHARM" == "nova-compute" ]] ; then
62 local flat_interface=$(config-get flat-interface)
63 local ec2_host=$(relation-get ec2_host)
64 set_or_update flat_inteface "$flat_interface"
65 set_or_update ec2_dmz_host "$ec2_host"
66
67 # Ensure flat_interface has link.
68 if ip link show $flat_interface >/dev/null 2>&1 ; then
69 ip link set $flat_interface up
70 fi
71
72 # work around (LP: #1035172)
73 if [[ -e /dev/vhost-net ]] ; then
74 iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \
75 --checksum-fill
76 fi
77 fi
78
79 ;;
80 "Quantum")
81 local local_ip=$(get_ip `unit-get private-address`)
82 [[ -n $local_ip ]] || {
83 juju-log "Unable to resolve local IP address"
84 exit 1
85 }
86 set_or_update "network_api_class" "nova.network.quantumv2.api.API"
87 set_or_update "quantum_auth_strategy" "keystone"
88 set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF"
89 set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF"
90 if [ "$QUANTUM_PLUGIN" == "ovs" ]; then
91 set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS"
92 set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS"
93 set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS"
94 set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS"
95 fi
96 ;;
97 *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;;
98 esac
99}
100
101function trigger_remote_service_restarts() {
102 # Trigger a service restart on all other nova nodes that have a relation
103 # via the cloud-controller interface.
104
105 # possible relations to other nova services.
106 local relations="cloud-compute nova-volume-service"
107
108 for rel in $relations; do
109 local r_ids=$(relation-ids $rel)
110 for r_id in $r_ids ; do
111 juju-log "$CHARM: Triggering a service restart on relation $r_id."
112 relation-set -r $r_id restart-trigger=$(uuid)
113 done
114 done
115}
116
117do_openstack_upgrade() {
118 # update openstack components to those provided by a new installation source
119 # it is assumed the calling hook has confirmed that the upgrade is sane.
120 local rel="$1"
121 shift
122 local packages=$@
123
124 orig_os_rel=$(get_os_codename_package "nova-common")
125 new_rel=$(get_os_codename_install_source "$rel")
126
127 # Backup the config directory.
128 local stamp=$(date +"%Y%m%d%M%S")
129 tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR
130
131 # load the release helper library for pre/post upgrade hooks specific to the
132 # release we are upgrading to.
133 . $CHARM_DIR/lib/nova/$new_rel
134
135 # new release specific pre-upgrade hook
136 nova_pre_upgrade "$orig_os_rel"
137
138 # Setup apt repository access and kick off the actual package upgrade.
139 configure_install_source "$rel"
140 apt-get update
141 DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \
142 install --no-install-recommends $packages
143
144 # new release sepcific post-upgrade hook
145 nova_post_upgrade "$orig_os_rel"
146
147}
1480
=== removed file 'hooks/charmhelpers/contrib/openstack/openstack-common'
--- hooks/charmhelpers/contrib/openstack/openstack-common 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/openstack-common 1970-01-01 00:00:00 +0000
@@ -1,781 +0,0 @@
1#!/bin/bash -e
2
3# Common utility functions used across all OpenStack charms.
4
5error_out() {
6 juju-log "$CHARM ERROR: $@"
7 exit 1
8}
9
10function service_ctl_status {
11 # Return 0 if a service is running, 1 otherwise.
12 local svc="$1"
13 local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }')
14 case $status in
15 "start") return 0 ;;
16 "stop") return 1 ;;
17 *) error_out "Unexpected status of service $svc: $status" ;;
18 esac
19}
20
21function service_ctl {
22 # control a specific service, or all (as defined by $SERVICES)
23 # service restarts will only occur depending on global $CONFIG_CHANGED,
24 # which should be updated in charm's set_or_update().
25 local config_changed=${CONFIG_CHANGED:-True}
26 if [[ $1 == "all" ]] ; then
27 ctl="$SERVICES"
28 else
29 ctl="$1"
30 fi
31 action="$2"
32 if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then
33 error_out "ERROR service_ctl: Not enough arguments"
34 fi
35
36 for i in $ctl ; do
37 case $action in
38 "start")
39 service_ctl_status $i || service $i start ;;
40 "stop")
41 service_ctl_status $i && service $i stop || return 0 ;;
42 "restart")
43 if [[ "$config_changed" == "True" ]] ; then
44 service_ctl_status $i && service $i restart || service $i start
45 fi
46 ;;
47 esac
48 if [[ $? != 0 ]] ; then
49 juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action"
50 fi
51 done
52 # all configs should have been reloaded on restart of all services, reset
53 # flag if its being used.
54 if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] &&
55 [[ "$ctl" == "all" ]]; then
56 CONFIG_CHANGED="False"
57 fi
58}
59
60function configure_install_source {
61 # Setup and configure installation source based on a config flag.
62 local src="$1"
63
64 # Default to installing from the main Ubuntu archive.
65 [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0
66
67 . /etc/lsb-release
68
69 # standard 'ppa:someppa/name' format.
70 if [[ "${src:0:4}" == "ppa:" ]] ; then
71 juju-log "$CHARM: Configuring installation from custom src ($src)"
72 add-apt-repository -y "$src" || error_out "Could not configure PPA access."
73 return 0
74 fi
75
76 # standard 'deb http://url/ubuntu main' entries. gpg key ids must
77 # be appended to the end of url after a |, ie:
78 # 'deb http://url/ubuntu main|$GPGKEYID'
79 if [[ "${src:0:3}" == "deb" ]] ; then
80 juju-log "$CHARM: Configuring installation from custom src URL ($src)"
81 if echo "$src" | grep -q "|" ; then
82 # gpg key id tagged to end of url folloed by a |
83 url=$(echo $src | cut -d'|' -f1)
84 key=$(echo $src | cut -d'|' -f2)
85 juju-log "$CHARM: Importing repository key: $key"
86 apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \
87 juju-log "$CHARM WARN: Could not import key from keyserver: $key"
88 else
89 juju-log "$CHARM No repository key specified."
90 url="$src"
91 fi
92 echo "$url" > /etc/apt/sources.list.d/juju_deb.list
93 return 0
94 fi
95
96 # Cloud Archive
97 if [[ "${src:0:6}" == "cloud:" ]] ; then
98
99 # current os releases supported by the UCA.
100 local cloud_archive_versions="folsom grizzly"
101
102 local ca_rel=$(echo $src | cut -d: -f2)
103 local u_rel=$(echo $ca_rel | cut -d- -f1)
104 local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
105
106 [[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
107 error_out "Cannot install from Cloud Archive pocket $src " \
108 "on this Ubuntu version ($DISTRIB_CODENAME)!"
109
110 valid_release=""
111 for rel in $cloud_archive_versions ; do
112 if [[ "$os_rel" == "$rel" ]] ; then
113 valid_release=1
114 juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
115 fi
116 done
117 if [[ -z "$valid_release" ]] ; then
118 error_out "OpenStack release ($os_rel) not supported by "\
119 "the Ubuntu Cloud Archive."
120 fi
121
122 # CA staging repos are standard PPAs.
123 if echo $ca_rel | grep -q "staging" ; then
124 add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
125 return 0
126 fi
127
128 # the others are LP-external deb repos.
129 case "$ca_rel" in
130 "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
131 "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
132 "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
133 "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
134 *) error_out "Invalid Cloud Archive repo specified: $src"
135 esac
136
137 apt-get -y install ubuntu-cloud-keyring
138 entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
139 echo "$entry" \
140 >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
141 return 0
142 fi
143
144 error_out "Invalid installation source specified in config: $src"
145
146}
147
148get_os_codename_install_source() {
149 # derive the openstack release provided by a supported installation source.
150 local rel="$1"
151 local codename="unknown"
152 . /etc/lsb-release
153
154 # map ubuntu releases to the openstack version shipped with it.
155 if [[ "$rel" == "distro" ]] ; then
156 case "$DISTRIB_CODENAME" in
157 "oneiric") codename="diablo" ;;
158 "precise") codename="essex" ;;
159 "quantal") codename="folsom" ;;
160 "raring") codename="grizzly" ;;
161 esac
162 fi
163
164 # derive version from cloud archive strings.
165 if [[ "${rel:0:6}" == "cloud:" ]] ; then
166 rel=$(echo $rel | cut -d: -f2)
167 local u_rel=$(echo $rel | cut -d- -f1)
168 local ca_rel=$(echo $rel | cut -d- -f2)
169 if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then
170 case "$ca_rel" in
171 "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging")
172 codename="folsom" ;;
173 "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging")
174 codename="grizzly" ;;
175 esac
176 fi
177 fi
178
179 # have a guess based on the deb string provided
180 if [[ "${rel:0:3}" == "deb" ]] || \
181 [[ "${rel:0:3}" == "ppa" ]] ; then
182 CODENAMES="diablo essex folsom grizzly havana"
183 for cname in $CODENAMES; do
184 if echo $rel | grep -q $cname; then
185 codename=$cname
186 fi
187 done
188 fi
189 echo $codename
190}
191
192get_os_codename_package() {
193 local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none"
194 pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs
195 case "${pkg_vers:0:6}" in
196 "2011.2") echo "diablo" ;;
197 "2012.1") echo "essex" ;;
198 "2012.2") echo "folsom" ;;
199 "2013.1") echo "grizzly" ;;
200 "2013.2") echo "havana" ;;
201 esac
202}
203
204get_os_version_codename() {
205 case "$1" in
206 "diablo") echo "2011.2" ;;
207 "essex") echo "2012.1" ;;
208 "folsom") echo "2012.2" ;;
209 "grizzly") echo "2013.1" ;;
210 "havana") echo "2013.2" ;;
211 esac
212}
213
214get_ip() {
215 dpkg -l | grep -q python-dnspython || {
216 apt-get -y install python-dnspython 2>&1 > /dev/null
217 }
218 hostname=$1
219 python -c "
220import dns.resolver
221import socket
222try:
223 # Test to see if already an IPv4 address
224 socket.inet_aton('$hostname')
225 print '$hostname'
226except socket.error:
227 try:
228 answers = dns.resolver.query('$hostname', 'A')
229 if answers:
230 print answers[0].address
231 except dns.resolver.NXDOMAIN:
232 pass
233"
234}
235
236# Common storage routines used by cinder, nova-volume and swift-storage.
237clean_storage() {
238 # if configured to overwrite existing storage, we unmount the block-dev
239 # if mounted and clear any previous pv signatures
240 local block_dev="$1"
241 juju-log "Cleaining storage '$block_dev'"
242 if grep -q "^$block_dev" /proc/mounts ; then
243 mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }')
244 juju-log "Unmounting $block_dev from $mp"
245 umount "$mp" || error_out "ERROR: Could not unmount storage from $mp"
246 fi
247 if pvdisplay "$block_dev" >/dev/null 2>&1 ; then
248 juju-log "Removing existing LVM PV signatures from $block_dev"
249
250 # deactivate any volgroups that may be built on this dev
251 vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }')
252 if [[ -n "$vg" ]] ; then
253 juju-log "Deactivating existing volume group: $vg"
254 vgchange -an "$vg" ||
255 error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
256 fi
257 echo "yes" | pvremove -ff "$block_dev" ||
258 error_out "Could not pvremove $block_dev"
259 else
260 juju-log "Zapping disk of all GPT and MBR structures"
261 sgdisk --zap-all $block_dev ||
262 error_out "Unable to zap $block_dev"
263 fi
264}
265
266function get_block_device() {
267 # given a string, return full path to the block device for that
268 # if input is not a block device, find a loopback device
269 local input="$1"
270
271 case "$input" in
272 /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist."
273 echo "$input"; return 0;;
274 /*) :;;
275 *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist."
276 echo "/dev/$input"; return 0;;
277 esac
278
279 # this represents a file
280 # support "/path/to/file|5G"
281 local fpath size oifs="$IFS"
282 if [ "${input#*|}" != "${input}" ]; then
283 size=${input##*|}
284 fpath=${input%|*}
285 else
286 fpath=${input}
287 size=5G
288 fi
289
290 ## loop devices are not namespaced. This is bad for containers.
291 ## it means that the output of 'losetup' may have the given $fpath
292 ## in it, but that may not represent this containers $fpath, but
293 ## another containers. To address that, we really need to
294 ## allow some uniq container-id to be expanded within path.
295 ## TODO: find a unique container-id that will be consistent for
296 ## this container throughout its lifetime and expand it
297 ## in the fpath.
298 # fpath=${fpath//%{id}/$THAT_ID}
299
300 local found=""
301 # parse through 'losetup -a' output, looking for this file
302 # output is expected to look like:
303 # /dev/loop0: [0807]:961814 (/tmp/my.img)
304 found=$(losetup -a |
305 awk 'BEGIN { found=0; }
306 $3 == f { sub(/:$/,"",$1); print $1; found=found+1; }
307 END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \
308 f="($fpath)")
309
310 if [ $? -ne 0 ]; then
311 echo "multiple devices found for $fpath: $found" 1>&2
312 return 1;
313 fi
314
315 [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; }
316
317 if [ -n "$found" ]; then
318 echo "confused, $found is not a block device for $fpath";
319 return 1;
320 fi
321
322 # no existing device was found, create one
323 mkdir -p "${fpath%/*}"
324 truncate --size "$size" "$fpath" ||
325 { echo "failed to create $fpath of size $size"; return 1; }
326
327 found=$(losetup --find --show "$fpath") ||
328 { echo "failed to setup loop device for $fpath" 1>&2; return 1; }
329
330 echo "$found"
331 return 0
332}
333
334HAPROXY_CFG=/etc/haproxy/haproxy.cfg
335HAPROXY_DEFAULT=/etc/default/haproxy
336##########################################################################
337# Description: Configures HAProxy services for Openstack API's
338# Parameters:
339# Space delimited list of service:port:mode combinations for which
340# haproxy service configuration should be generated for. The function
341# assumes the name of the peer relation is 'cluster' and that every
342# service unit in the peer relation is running the same services.
343#
344# Services that do not specify :mode in parameter will default to http.
345#
346# Example
347# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http
348##########################################################################
349configure_haproxy() {
350 local address=`unit-get private-address`
351 local name=${JUJU_UNIT_NAME////-}
352 cat > $HAPROXY_CFG << EOF
353global
354 log 127.0.0.1 local0
355 log 127.0.0.1 local1 notice
356 maxconn 20000
357 user haproxy
358 group haproxy
359 spread-checks 0
360
361defaults
362 log global
363 mode http
364 option httplog
365 option dontlognull
366 retries 3
367 timeout queue 1000
368 timeout connect 1000
369 timeout client 30000
370 timeout server 30000
371
372listen stats :8888
373 mode http
374 stats enable
375 stats hide-version
376 stats realm Haproxy\ Statistics
377 stats uri /
378 stats auth admin:password
379
380EOF
381 for service in $@; do
382 local service_name=$(echo $service | cut -d : -f 1)
383 local haproxy_listen_port=$(echo $service | cut -d : -f 2)
384 local api_listen_port=$(echo $service | cut -d : -f 3)
385 local mode=$(echo $service | cut -d : -f 4)
386 [[ -z "$mode" ]] && mode="http"
387 juju-log "Adding haproxy configuration entry for $service "\
388 "($haproxy_listen_port -> $api_listen_port)"
389 cat >> $HAPROXY_CFG << EOF
390listen $service_name 0.0.0.0:$haproxy_listen_port
391 balance roundrobin
392 mode $mode
393 option ${mode}log
394 server $name $address:$api_listen_port check
395EOF
396 local r_id=""
397 local unit=""
398 for r_id in `relation-ids cluster`; do
399 for unit in `relation-list -r $r_id`; do
400 local unit_name=${unit////-}
401 local unit_address=`relation-get -r $r_id private-address $unit`
402 if [ -n "$unit_address" ]; then
403 echo " server $unit_name $unit_address:$api_listen_port check" \
404 >> $HAPROXY_CFG
405 fi
406 done
407 done
408 done
409 echo "ENABLED=1" > $HAPROXY_DEFAULT
410 service haproxy restart
411}
412
413##########################################################################
414# Description: Query HA interface to determine is cluster is configured
415# Returns: 0 if configured, 1 if not configured
416##########################################################################
417is_clustered() {
418 local r_id=""
419 local unit=""
420 for r_id in $(relation-ids ha); do
421 if [ -n "$r_id" ]; then
422 for unit in $(relation-list -r $r_id); do
423 clustered=$(relation-get -r $r_id clustered $unit)
424 if [ -n "$clustered" ]; then
425 juju-log "Unit is haclustered"
426 return 0
427 fi
428 done
429 fi
430 done
431 juju-log "Unit is not haclustered"
432 return 1
433}
434
435##########################################################################
436# Description: Return a list of all peers in cluster relations
437##########################################################################
438peer_units() {
439 local peers=""
440 local r_id=""
441 for r_id in $(relation-ids cluster); do
442 peers="$peers $(relation-list -r $r_id)"
443 done
444 echo $peers
445}
446
447##########################################################################
448# Description: Determines whether the current unit is the oldest of all
449# its peers - supports partial leader election
450# Returns: 0 if oldest, 1 if not
451##########################################################################
452oldest_peer() {
453 peers=$1
454 local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2)
455 for peer in $peers; do
456 echo "Comparing $JUJU_UNIT_NAME with peers: $peers"
457 local r_unit_no=$(echo $peer | cut -d / -f 2)
458 if (($r_unit_no<$l_unit_no)); then
459 juju-log "Not oldest peer; deferring"
460 return 1
461 fi
462 done
463 juju-log "Oldest peer; might take charge?"
464 return 0
465}
466
467##########################################################################
468# Description: Determines whether the current service units is the
469# leader within a) a cluster of its peers or b) across a
470# set of unclustered peers.
471# Parameters: CRM resource to check ownership of if clustered
472# Returns: 0 if leader, 1 if not
473##########################################################################
474eligible_leader() {
475 if is_clustered; then
476 if ! is_leader $1; then
477 juju-log 'Deferring action to CRM leader'
478 return 1
479 fi
480 else
481 peers=$(peer_units)
482 if [ -n "$peers" ] && ! oldest_peer "$peers"; then
483 juju-log 'Deferring action to oldest service unit.'
484 return 1
485 fi
486 fi
487 return 0
488}
489
490##########################################################################
491# Description: Query Cluster peer interface to see if peered
492# Returns: 0 if peered, 1 if not peered
493##########################################################################
494is_peered() {
495 local r_id=$(relation-ids cluster)
496 if [ -n "$r_id" ]; then
497 if [ -n "$(relation-list -r $r_id)" ]; then
498 juju-log "Unit peered"
499 return 0
500 fi
501 fi
502 juju-log "Unit not peered"
503 return 1
504}
505
506##########################################################################
507# Description: Determines whether host is owner of clustered services
508# Parameters: Name of CRM resource to check ownership of
509# Returns: 0 if leader, 1 if not leader
510##########################################################################
511is_leader() {
512 hostname=`hostname`
513 if [ -x /usr/sbin/crm ]; then
514 if crm resource show $1 | grep -q $hostname; then
515 juju-log "$hostname is cluster leader."
516 return 0
517 fi
518 fi
519 juju-log "$hostname is not cluster leader."
520 return 1
521}
522
523##########################################################################
524# Description: Determines whether enough data has been provided in
525# configuration or relation data to configure HTTPS.
526# Parameters: None
527# Returns: 0 if HTTPS can be configured, 1 if not.
528##########################################################################
529https() {
530 local r_id=""
531 if [[ -n "$(config-get ssl_cert)" ]] &&
532 [[ -n "$(config-get ssl_key)" ]] ; then
533 return 0
534 fi
535 for r_id in $(relation-ids identity-service) ; do
536 for unit in $(relation-list -r $r_id) ; do
537 if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] &&
538 [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] &&
539 [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] &&
540 [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then
541 return 0
542 fi
543 done
544 done
545 return 1
546}
547
548##########################################################################
549# Description: For a given number of port mappings, configures apache2
550# HTTPs local reverse proxying using certficates and keys provided in
551# either configuration data (preferred) or relation data. Assumes ports
552# are not in use (calling charm should ensure that).
553# Parameters: Variable number of proxy port mappings as
554# $internal:$external.
555# Returns: 0 if reverse proxy(s) have been configured, 0 if not.
556##########################################################################
557enable_https() {
558 local port_maps="$@"
559 local http_restart=""
560 juju-log "Enabling HTTPS for port mappings: $port_maps."
561
562 # allow overriding of keystone provided certs with those set manually
563 # in config.
564 local cert=$(config-get ssl_cert)
565 local key=$(config-get ssl_key)
566 local ca_cert=""
567 if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then
568 juju-log "Inspecting identity-service relations for SSL certificate."
569 local r_id=""
570 cert=""
571 key=""
572 ca_cert=""
573 for r_id in $(relation-ids identity-service) ; do
574 for unit in $(relation-list -r $r_id) ; do
575 [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)"
576 [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)"
577 [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)"
578 done
579 done
580 [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di)
581 [[ -n "$key" ]] && key=$(echo $key | base64 -di)
582 [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di)
583 else
584 juju-log "Using SSL certificate provided in service config."
585 fi
586
587 [[ -z "$cert" ]] || [[ -z "$key" ]] &&
588 juju-log "Expected but could not find SSL certificate data, not "\
589 "configuring HTTPS!" && return 1
590
591 apt-get -y install apache2
592 a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" &&
593 http_restart=1
594
595 mkdir -p /etc/apache2/ssl/$CHARM
596 echo "$cert" >/etc/apache2/ssl/$CHARM/cert
597 echo "$key" >/etc/apache2/ssl/$CHARM/key
598 if [[ -n "$ca_cert" ]] ; then
599 juju-log "Installing Keystone supplied CA cert."
600 echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
601 update-ca-certificates --fresh
602
603 # XXX TODO: Find a better way of exporting this?
604 if [[ "$CHARM" == "nova-cloud-controller" ]] ; then
605 [[ -e /var/www/keystone_juju_ca_cert.crt ]] &&
606 rm -rf /var/www/keystone_juju_ca_cert.crt
607 ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \
608 /var/www/keystone_juju_ca_cert.crt
609 fi
610
611 fi
612 for port_map in $port_maps ; do
613 local ext_port=$(echo $port_map | cut -d: -f1)
614 local int_port=$(echo $port_map | cut -d: -f2)
615 juju-log "Creating apache2 reverse proxy vhost for $port_map."
616 cat >/etc/apache2/sites-available/${CHARM}_${ext_port} <<END
617Listen $ext_port
618NameVirtualHost *:$ext_port
619<VirtualHost *:$ext_port>
620 ServerName $(unit-get private-address)
621 SSLEngine on
622 SSLCertificateFile /etc/apache2/ssl/$CHARM/cert
623 SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key
624 ProxyPass / http://localhost:$int_port/
625 ProxyPassReverse / http://localhost:$int_port/
626 ProxyPreserveHost on
627</VirtualHost>
628<Proxy *>
629 Order deny,allow
630 Allow from all
631</Proxy>
632<Location />
633 Order allow,deny
634 Allow from all
635</Location>
636END
637 a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
638 http_restart=1
639 done
640 if [[ -n "$http_restart" ]] ; then
641 service apache2 restart
642 fi
643}
644
645##########################################################################
646# Description: Ensure HTTPS reverse proxying is disabled for given port
647# mappings.
648# Parameters: Variable number of proxy port mappings as
649# $internal:$external.
650# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error.
651##########################################################################
652disable_https() {
653 local port_maps="$@"
654 local http_restart=""
655 juju-log "Ensuring HTTPS disabled for $port_maps."
656 ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0
657 for port_map in $port_maps ; do
658 local ext_port=$(echo $port_map | cut -d: -f1)
659 local int_port=$(echo $port_map | cut -d: -f2)
660 if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then
661 juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map."
662 a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" &&
663 http_restart=1
664 fi
665 done
666 if [[ -n "$http_restart" ]] ; then
667 service apache2 restart
668 fi
669}
670
671
672##########################################################################
673# Description: Ensures HTTPS is either enabled or disabled for given port
674# mapping.
675# Parameters: Variable number of proxy port mappings as
676# $internal:$external.
677# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not.
678##########################################################################
679setup_https() {
680 # configure https via apache reverse proxying either
681 # using certs provided by config or keystone.
682 [[ -z "$CHARM" ]] &&
683 error_out "setup_https(): CHARM not set."
684 if ! https ; then
685 disable_https $@
686 else
687 enable_https $@
688 fi
689}
690
691##########################################################################
692# Description: Determine correct API server listening port based on
693# existence of HTTPS reverse proxy and/or haproxy.
694# Paremeters: The standard public port for given service.
695# Returns: The correct listening port for API service.
696##########################################################################
697determine_api_port() {
698 local public_port="$1"
699 local i=0
700 ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1]
701 https >/dev/null 2>&1 && i=$[$i + 1]
702 echo $[$public_port - $[$i * 10]]
703}
704
705##########################################################################
706# Description: Determine correct proxy listening port based on public IP +
707# existence of HTTPS reverse proxy.
708# Paremeters: The standard public port for given service.
709# Returns: The correct listening port for haproxy service public address.
710##########################################################################
711determine_haproxy_port() {
712 local public_port="$1"
713 local i=0
714 https >/dev/null 2>&1 && i=$[$i + 1]
715 echo $[$public_port - $[$i * 10]]
716}
717
718##########################################################################
719# Description: Print the value for a given config option in an OpenStack
720# .ini style configuration file.
721# Parameters: File path, option to retrieve, optional
722# section name (default=DEFAULT)
723# Returns: Prints value if set, prints nothing otherwise.
724##########################################################################
725local_config_get() {
726 # return config values set in openstack .ini config files.
727 # default placeholders starting (eg, %AUTH_HOST%) treated as
728 # unset values.
729 local file="$1"
730 local option="$2"
731 local section="$3"
732 [[ -z "$section" ]] && section="DEFAULT"
733 python -c "
734import ConfigParser
735config = ConfigParser.RawConfigParser()
736config.read('$file')
737try:
738 value = config.get('$section', '$option')
739except:
740 print ''
741 exit(0)
742if value.startswith('%'): exit(0)
743print value
744"
745}
746
747##########################################################################
748# Description: Creates an rc file exporting environment variables to a
749# script_path local to the charm's installed directory.
750# Any charm scripts run outside the juju hook environment can source this
751# scriptrc to obtain updated config information necessary to perform health
752# checks or service changes
753#
754# Parameters:
755# An array of '=' delimited ENV_VAR:value combinations to export.
756# If optional script_path key is not provided in the array, script_path
757# defaults to scripts/scriptrc
758##########################################################################
759function save_script_rc {
760 if [ ! -n "$JUJU_UNIT_NAME" ]; then
761 echo "Error: Missing JUJU_UNIT_NAME environment variable"
762 exit 1
763 fi
764 # our default unit_path
765 unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/scripts/scriptrc"
766 echo $unit_path
767 tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc"
768
769 echo "#!/bin/bash" > $tmp_rc
770 for env_var in "${@}"
771 do
772 if `echo $env_var | grep -q script_path`; then
773 # well then we need to reset the new unit-local script path
774 unit_path="/var/lib/juju/units/${JUJU_UNIT_NAME/\//-}/charm/${env_var/script_path=/}"
775 else
776 echo "export $env_var" >> $tmp_rc
777 fi
778 done
779 chmod 755 $tmp_rc
780 mv $tmp_rc $unit_path
781}
7820
=== removed file 'hooks/charmhelpers/contrib/openstack/openstack_utils.py'
--- hooks/charmhelpers/contrib/openstack/openstack_utils.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/contrib/openstack/openstack_utils.py 1970-01-01 00:00:00 +0000
@@ -1,228 +0,0 @@
1#!/usr/bin/python
2
3# Common python helper functions used for OpenStack charms.
4
5import apt_pkg as apt
6import subprocess
7import os
8import sys
9
10CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
11CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
12
13ubuntu_openstack_release = {
14 'oneiric': 'diablo',
15 'precise': 'essex',
16 'quantal': 'folsom',
17 'raring': 'grizzly',
18}
19
20
21openstack_codenames = {
22 '2011.2': 'diablo',
23 '2012.1': 'essex',
24 '2012.2': 'folsom',
25 '2013.1': 'grizzly',
26 '2013.2': 'havana',
27}
28
29# The ugly duckling
30swift_codenames = {
31 '1.4.3': 'diablo',
32 '1.4.8': 'essex',
33 '1.7.4': 'folsom',
34 '1.7.6': 'grizzly',
35 '1.7.7': 'grizzly',
36 '1.8.0': 'grizzly',
37}
38
39
40def juju_log(msg):
41 subprocess.check_call(['juju-log', msg])
42
43
44def error_out(msg):
45 juju_log("FATAL ERROR: %s" % msg)
46 sys.exit(1)
47
48
49def lsb_release():
50 '''Return /etc/lsb-release in a dict'''
51 lsb = open('/etc/lsb-release', 'r')
52 d = {}
53 for l in lsb:
54 k, v = l.split('=')
55 d[k.strip()] = v.strip()
56 return d
57
58
59def get_os_codename_install_source(src):
60 '''Derive OpenStack release codename from a given installation source.'''
61 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
62
63 rel = ''
64 if src == 'distro':
65 try:
66 rel = ubuntu_openstack_release[ubuntu_rel]
67 except KeyError:
68 e = 'Could not derive openstack release for '\
69 'this Ubuntu release: %s' % ubuntu_rel
70 error_out(e)
71 return rel
72
73 if src.startswith('cloud:'):
74 ca_rel = src.split(':')[1]
75 ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
76 return ca_rel
77
78 # Best guess match based on deb string provided
79 if src.startswith('deb') or src.startswith('ppa'):
80 for k, v in openstack_codenames.iteritems():
81 if v in src:
82 return v
83
84
85def get_os_codename_version(vers):
86 '''Determine OpenStack codename from version number.'''
87 try:
88 return openstack_codenames[vers]
89 except KeyError:
90 e = 'Could not determine OpenStack codename for version %s' % vers
91 error_out(e)
92
93
94def get_os_version_codename(codename):
95 '''Determine OpenStack version number from codename.'''
96 for k, v in openstack_codenames.iteritems():
97 if v == codename:
98 return k
99 e = 'Could not derive OpenStack version for '\
100 'codename: %s' % codename
101 error_out(e)
102
103
104def get_os_codename_package(pkg):
105 '''Derive OpenStack release codename from an installed package.'''
106 apt.init()
107 cache = apt.Cache()
108
109 try:
110 pkg = cache[pkg]
111 except:
112 e = 'Could not determine version of installed package: %s' % pkg
113 error_out(e)
114
115 vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
116
117 try:
118 if 'swift' in pkg.name:
119 vers = vers[:5]
120 return swift_codenames[vers]
121 else:
122 vers = vers[:6]
123 return openstack_codenames[vers]
124 except KeyError:
125 e = 'Could not determine OpenStack codename for version %s' % vers
126 error_out(e)
127
128
129def get_os_version_package(pkg):
130 '''Derive OpenStack version number from an installed package.'''
131 codename = get_os_codename_package(pkg)
132
133 if 'swift' in pkg:
134 vers_map = swift_codenames
135 else:
136 vers_map = openstack_codenames
137
138 for version, cname in vers_map.iteritems():
139 if cname == codename:
140 return version
141 #e = "Could not determine OpenStack version for package: %s" % pkg
142 #error_out(e)
143
144def import_key(keyid):
145 cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
146 "--recv-keys %s" % keyid
147 try:
148 subprocess.check_call(cmd.split(' '))
149 except subprocess.CalledProcessError:
150 error_out("Error importing repo key %s" % keyid)
151
152def configure_installation_source(rel):
153 '''Configure apt installation source.'''
154 if rel == 'distro':
155 return
156 elif rel[:4] == "ppa:":
157 src = rel
158 subprocess.check_call(["add-apt-repository", "-y", src])
159 elif rel[:3] == "deb":
160 l = len(rel.split('|'))
161 if l == 2:
162 src, key = rel.split('|')
163 juju_log("Importing PPA key from keyserver for %s" % src)
164 import_key(key)
165 elif l == 1:
166 src = rel
167 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
168 f.write(src)
169 elif rel[:6] == 'cloud:':
170 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
171 rel = rel.split(':')[1]
172 u_rel = rel.split('-')[0]
173 ca_rel = rel.split('-')[1]
174
175 if u_rel != ubuntu_rel:
176 e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
177 'version (%s)' % (ca_rel, ubuntu_rel)
178 error_out(e)
179
180 if 'staging' in ca_rel:
181 # staging is just a regular PPA.
182 os_rel = ca_rel.split('/')[0]
183 ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
184 cmd = 'add-apt-repository -y %s' % ppa
185 subprocess.check_call(cmd.split(' '))
186 return
187
188 # map charm config options to actual archive pockets.
189 pockets = {
190 'folsom': 'precise-updates/folsom',
191 'folsom/updates': 'precise-updates/folsom',
192 'folsom/proposed': 'precise-proposed/folsom',
193 'grizzly': 'precise-updates/grizzly',
194 'grizzly/updates': 'precise-updates/grizzly',
195 'grizzly/proposed': 'precise-proposed/grizzly'
196 }
197
198 try:
199 pocket = pockets[ca_rel]
200 except KeyError:
201 e = 'Invalid Cloud Archive release specified: %s' % rel
202 error_out(e)
203
204 src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
205 # TODO: Replace key import with cloud archive keyring pkg.
206 import_key(CLOUD_ARCHIVE_KEY_ID)
207
208 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
209 f.write(src)
210 else:
211 error_out("Invalid openstack-release specified: %s" % rel)
212
213
214def save_script_rc(script_path="scripts/scriptrc", **env_vars):
215 """
216 Write an rc file in the charm-delivered directory containing
217 exported environment variables provided by env_vars. Any charm scripts run
218 outside the juju hook environment can source this scriptrc to obtain
219 updated config information necessary to perform health checks or
220 service changes.
221 """
222 unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-')
223 juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path)
224 with open(juju_rc_path, 'wb') as rc_script:
225 rc_script.write(
226 "#!/bin/bash\n")
227 [rc_script.write('export %s=%s\n' % (u, p))
228 for u, p in env_vars.iteritems() if u != "script_path"]
2290
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/core/hookenv.py 2013-11-05 18:43:49 +0000
@@ -9,6 +9,7 @@
9import yaml9import yaml
10import subprocess10import subprocess
11import UserDict11import UserDict
12from subprocess import CalledProcessError
1213
13CRITICAL = "CRITICAL"14CRITICAL = "CRITICAL"
14ERROR = "ERROR"15ERROR = "ERROR"
@@ -17,9 +18,47 @@
17DEBUG = "DEBUG"18DEBUG = "DEBUG"
18MARKER = object()19MARKER = object()
1920
21cache = {}
22
23
24def cached(func):
25 """Cache return values for multiple executions of func + args
26
27 For example:
28
29 @cached
30 def unit_get(attribute):
31 pass
32
33 unit_get('test')
34
35 will cache the result of unit_get + 'test' for future calls.
36 """
37 def wrapper(*args, **kwargs):
38 global cache
39 key = str((func, args, kwargs))
40 try:
41 return cache[key]
42 except KeyError:
43 res = func(*args, **kwargs)
44 cache[key] = res
45 return res
46 return wrapper
47
48
49def flush(key):
50 """Flushes any entries from function cache where the
51 key is found in the function+args """
52 flush_list = []
53 for item in cache:
54 if key in item:
55 flush_list.append(item)
56 for item in flush_list:
57 del cache[item]
58
2059
21def log(message, level=None):60def log(message, level=None):
22 "Write a message to the juju log"61 """Write a message to the juju log"""
23 command = ['juju-log']62 command = ['juju-log']
24 if level:63 if level:
25 command += ['-l', level]64 command += ['-l', level]
@@ -28,7 +67,7 @@
2867
2968
30class Serializable(UserDict.IterableUserDict):69class Serializable(UserDict.IterableUserDict):
31 "Wrapper, an object that can be serialized to yaml or json"70 """Wrapper, an object that can be serialized to yaml or json"""
3271
33 def __init__(self, obj):72 def __init__(self, obj):
34 # wrap the object73 # wrap the object
@@ -49,12 +88,20 @@
49 except KeyError:88 except KeyError:
50 raise AttributeError(attr)89 raise AttributeError(attr)
5190
91 def __getstate__(self):
92 # Pickle as a standard dictionary.
93 return self.data
94
95 def __setstate__(self, state):
96 # Unpickle into our wrapper.
97 self.data = state
98
52 def json(self):99 def json(self):
53 "Serialize the object to json"100 """Serialize the object to json"""
54 return json.dumps(self.data)101 return json.dumps(self.data)
55102
56 def yaml(self):103 def yaml(self):
57 "Serialize the object to yaml"104 """Serialize the object to yaml"""
58 return yaml.dump(self.data)105 return yaml.dump(self.data)
59106
60107
@@ -62,55 +109,62 @@
62 """A convenient bundling of the current execution context"""109 """A convenient bundling of the current execution context"""
63 context = {}110 context = {}
64 context['conf'] = config()111 context['conf'] = config()
65 context['reltype'] = relation_type()112 if relation_id():
66 context['relid'] = relation_id()113 context['reltype'] = relation_type()
114 context['relid'] = relation_id()
115 context['rel'] = relation_get()
67 context['unit'] = local_unit()116 context['unit'] = local_unit()
68 context['rels'] = relations()117 context['rels'] = relations()
69 context['rel'] = relation_get()
70 context['env'] = os.environ118 context['env'] = os.environ
71 return context119 return context
72120
73121
74def in_relation_hook():122def in_relation_hook():
75 "Determine whether we're running in a relation hook"123 """Determine whether we're running in a relation hook"""
76 return 'JUJU_RELATION' in os.environ124 return 'JUJU_RELATION' in os.environ
77125
78126
79def relation_type():127def relation_type():
80 "The scope for the current relation hook"128 """The scope for the current relation hook"""
81 return os.environ.get('JUJU_RELATION', None)129 return os.environ.get('JUJU_RELATION', None)
82130
83131
84def relation_id():132def relation_id():
85 "The relation ID for the current relation hook"133 """The relation ID for the current relation hook"""
86 return os.environ.get('JUJU_RELATION_ID', None)134 return os.environ.get('JUJU_RELATION_ID', None)
87135
88136
89def local_unit():137def local_unit():
90 "Local unit ID"138 """Local unit ID"""
91 return os.environ['JUJU_UNIT_NAME']139 return os.environ['JUJU_UNIT_NAME']
92140
93141
94def remote_unit():142def remote_unit():
95 "The remote unit for the current relation hook"143 """The remote unit for the current relation hook"""
96 return os.environ['JUJU_REMOTE_UNIT']144 return os.environ['JUJU_REMOTE_UNIT']
97145
98146
147def service_name():
148 """The name service group this unit belongs to"""
149 return local_unit().split('/')[0]
150
151
152@cached
99def config(scope=None):153def config(scope=None):
100 "Juju charm configuration"154 """Juju charm configuration"""
101 config_cmd_line = ['config-get']155 config_cmd_line = ['config-get']
102 if scope is not None:156 if scope is not None:
103 config_cmd_line.append(scope)157 config_cmd_line.append(scope)
104 config_cmd_line.append('--format=json')158 config_cmd_line.append('--format=json')
105 try:159 try:
106 config_data = json.loads(subprocess.check_output(config_cmd_line))160 return json.loads(subprocess.check_output(config_cmd_line))
107 except (ValueError, OSError, subprocess.CalledProcessError) as err:161 except ValueError:
108 log(str(err), level=ERROR)162 return None
109 raise163
110 return Serializable(config_data)164
111165@cached
112
113def relation_get(attribute=None, unit=None, rid=None):166def relation_get(attribute=None, unit=None, rid=None):
167 """Get relation information"""
114 _args = ['relation-get', '--format=json']168 _args = ['relation-get', '--format=json']
115 if rid:169 if rid:
116 _args.append('-r')170 _args.append('-r')
@@ -122,51 +176,63 @@
122 return json.loads(subprocess.check_output(_args))176 return json.loads(subprocess.check_output(_args))
123 except ValueError:177 except ValueError:
124 return None178 return None
179 except CalledProcessError, e:
180 if e.returncode == 2:
181 return None
182 raise
125183
126184
127def relation_set(relation_id=None, relation_settings={}, **kwargs):185def relation_set(relation_id=None, relation_settings={}, **kwargs):
186 """Set relation information for the current unit"""
128 relation_cmd_line = ['relation-set']187 relation_cmd_line = ['relation-set']
129 if relation_id is not None:188 if relation_id is not None:
130 relation_cmd_line.extend(('-r', relation_id))189 relation_cmd_line.extend(('-r', relation_id))
131 for k, v in relation_settings.items():190 for k, v in (relation_settings.items() + kwargs.items()):
132 relation_cmd_line.append('{}={}'.format(k, v))191 if v is None:
133 for k, v in kwargs.items():192 relation_cmd_line.append('{}='.format(k))
134 relation_cmd_line.append('{}={}'.format(k, v))193 else:
194 relation_cmd_line.append('{}={}'.format(k, v))
135 subprocess.check_call(relation_cmd_line)195 subprocess.check_call(relation_cmd_line)
136196 # Flush cache of any relation-gets for local unit
137197 flush(local_unit())
198
199
200@cached
138def relation_ids(reltype=None):201def relation_ids(reltype=None):
139 "A list of relation_ids"202 """A list of relation_ids"""
140 reltype = reltype or relation_type()203 reltype = reltype or relation_type()
141 relid_cmd_line = ['relation-ids', '--format=json']204 relid_cmd_line = ['relation-ids', '--format=json']
142 if reltype is not None:205 if reltype is not None:
143 relid_cmd_line.append(reltype)206 relid_cmd_line.append(reltype)
144 return json.loads(subprocess.check_output(relid_cmd_line))207 return json.loads(subprocess.check_output(relid_cmd_line)) or []
145 return []208 return []
146209
147210
211@cached
148def related_units(relid=None):212def related_units(relid=None):
149 "A list of related units"213 """A list of related units"""
150 relid = relid or relation_id()214 relid = relid or relation_id()
151 units_cmd_line = ['relation-list', '--format=json']215 units_cmd_line = ['relation-list', '--format=json']
152 if relid is not None:216 if relid is not None:
153 units_cmd_line.extend(('-r', relid))217 units_cmd_line.extend(('-r', relid))
154 return json.loads(subprocess.check_output(units_cmd_line))218 return json.loads(subprocess.check_output(units_cmd_line)) or []
155219
156220
221@cached
157def relation_for_unit(unit=None, rid=None):222def relation_for_unit(unit=None, rid=None):
158 "Get the json represenation of a unit's relation"223 """Get the json represenation of a unit's relation"""
159 unit = unit or remote_unit()224 unit = unit or remote_unit()
160 relation = relation_get(unit=unit, rid=rid)225 relation = relation_get(unit=unit, rid=rid)
161 for key in relation:226 for key in relation:
162 if key.endswith('-list'):227 if key.endswith('-list'):
163 relation[key] = relation[key].split()228 relation[key] = relation[key].split()
164 relation['__unit__'] = unit229 relation['__unit__'] = unit
165 return Serializable(relation)230 return relation
166231
167232
233@cached
168def relations_for_id(relid=None):234def relations_for_id(relid=None):
169 "Get relations of a specific relation ID"235 """Get relations of a specific relation ID"""
170 relation_data = []236 relation_data = []
171 relid = relid or relation_ids()237 relid = relid or relation_ids()
172 for unit in related_units(relid):238 for unit in related_units(relid):
@@ -176,8 +242,9 @@
176 return relation_data242 return relation_data
177243
178244
245@cached
179def relations_of_type(reltype=None):246def relations_of_type(reltype=None):
180 "Get relations of a specific type"247 """Get relations of a specific type"""
181 relation_data = []248 relation_data = []
182 reltype = reltype or relation_type()249 reltype = reltype or relation_type()
183 for relid in relation_ids(reltype):250 for relid in relation_ids(reltype):
@@ -187,13 +254,14 @@
187 return relation_data254 return relation_data
188255
189256
257@cached
190def relation_types():258def relation_types():
191 "Get a list of relation types supported by this charm"259 """Get a list of relation types supported by this charm"""
192 charmdir = os.environ.get('CHARM_DIR', '')260 charmdir = os.environ.get('CHARM_DIR', '')
193 mdf = open(os.path.join(charmdir, 'metadata.yaml'))261 mdf = open(os.path.join(charmdir, 'metadata.yaml'))
194 md = yaml.safe_load(mdf)262 md = yaml.safe_load(mdf)
195 rel_types = []263 rel_types = []
196 for key in ('provides','requires','peers'):264 for key in ('provides', 'requires', 'peers'):
197 section = md.get(key)265 section = md.get(key)
198 if section:266 if section:
199 rel_types.extend(section.keys())267 rel_types.extend(section.keys())
@@ -201,12 +269,14 @@
201 return rel_types269 return rel_types
202270
203271
272@cached
204def relations():273def relations():
274 """Get a nested dictionary of relation data for all related units"""
205 rels = {}275 rels = {}
206 for reltype in relation_types():276 for reltype in relation_types():
207 relids = {}277 relids = {}
208 for relid in relation_ids(reltype):278 for relid in relation_ids(reltype):
209 units = {}279 units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
210 for unit in related_units(relid):280 for unit in related_units(relid):
211 reldata = relation_get(unit=unit, rid=relid)281 reldata = relation_get(unit=unit, rid=relid)
212 units[unit] = reldata282 units[unit] = reldata
@@ -216,41 +286,70 @@
216286
217287
218def open_port(port, protocol="TCP"):288def open_port(port, protocol="TCP"):
219 "Open a service network port"289 """Open a service network port"""
220 _args = ['open-port']290 _args = ['open-port']
221 _args.append('{}/{}'.format(port, protocol))291 _args.append('{}/{}'.format(port, protocol))
222 subprocess.check_call(_args)292 subprocess.check_call(_args)
223293
224294
225def close_port(port, protocol="TCP"):295def close_port(port, protocol="TCP"):
226 "Close a service network port"296 """Close a service network port"""
227 _args = ['close-port']297 _args = ['close-port']
228 _args.append('{}/{}'.format(port, protocol))298 _args.append('{}/{}'.format(port, protocol))
229 subprocess.check_call(_args)299 subprocess.check_call(_args)
230300
231301
302@cached
232def unit_get(attribute):303def unit_get(attribute):
233 _args = ['unit-get', attribute]304 """Get the unit ID for the remote unit"""
234 return subprocess.check_output(_args).strip()305 _args = ['unit-get', '--format=json', attribute]
306 try:
307 return json.loads(subprocess.check_output(_args))
308 except ValueError:
309 return None
235310
236311
237def unit_private_ip():312def unit_private_ip():
313 """Get this unit's private IP address"""
238 return unit_get('private-address')314 return unit_get('private-address')
239315
240316
241class UnregisteredHookError(Exception):317class UnregisteredHookError(Exception):
318 """Raised when an undefined hook is called"""
242 pass319 pass
243320
244321
245class Hooks(object):322class Hooks(object):
323 """A convenient handler for hook functions.
324
325 Example:
326 hooks = Hooks()
327
328 # register a hook, taking its name from the function name
329 @hooks.hook()
330 def install():
331 ...
332
333 # register a hook, providing a custom hook name
334 @hooks.hook("config-changed")
335 def config_changed():
336 ...
337
338 if __name__ == "__main__":
339 # execute a hook based on the name the program is called by
340 hooks.execute(sys.argv)
341 """
342
246 def __init__(self):343 def __init__(self):
247 super(Hooks, self).__init__()344 super(Hooks, self).__init__()
248 self._hooks = {}345 self._hooks = {}
249346
250 def register(self, name, function):347 def register(self, name, function):
348 """Register a hook"""
251 self._hooks[name] = function349 self._hooks[name] = function
252350
253 def execute(self, args):351 def execute(self, args):
352 """Execute a registered hook based on args[0]"""
254 hook_name = os.path.basename(args[0])353 hook_name = os.path.basename(args[0])
255 if hook_name in self._hooks:354 if hook_name in self._hooks:
256 self._hooks[hook_name]()355 self._hooks[hook_name]()
@@ -258,10 +357,19 @@
258 raise UnregisteredHookError(hook_name)357 raise UnregisteredHookError(hook_name)
259358
260 def hook(self, *hook_names):359 def hook(self, *hook_names):
360 """Decorator, registering them as hooks"""
261 def wrapper(decorated):361 def wrapper(decorated):
262 for hook_name in hook_names:362 for hook_name in hook_names:
263 self.register(hook_name, decorated)363 self.register(hook_name, decorated)
264 else:364 else:
265 self.register(decorated.__name__, decorated)365 self.register(decorated.__name__, decorated)
366 if '_' in decorated.__name__:
367 self.register(
368 decorated.__name__.replace('_', '-'), decorated)
266 return decorated369 return decorated
267 return wrapper370 return wrapper
371
372
373def charm_dir():
374 """Return the root directory of the current charm"""
375 return os.environ.get('CHARM_DIR')
268376
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/core/host.py 2013-11-05 18:43:49 +0000
@@ -8,46 +8,75 @@
8import os8import os
9import pwd9import pwd
10import grp10import grp
11import random
12import string
11import subprocess13import subprocess
1214import hashlib
13from hookenv import log, execution_environment15
16from collections import OrderedDict
17
18from hookenv import log
1419
1520
16def service_start(service_name):21def service_start(service_name):
17 service('start', service_name)22 """Start a system service"""
23 return service('start', service_name)
1824
1925
20def service_stop(service_name):26def service_stop(service_name):
21 service('stop', service_name)27 """Stop a system service"""
28 return service('stop', service_name)
29
30
31def service_restart(service_name):
32 """Restart a system service"""
33 return service('restart', service_name)
34
35
36def service_reload(service_name, restart_on_failure=False):
37 """Reload a system service, optionally falling back to restart if reload fails"""
38 service_result = service('reload', service_name)
39 if not service_result and restart_on_failure:
40 service_result = service('restart', service_name)
41 return service_result
2242
2343
24def service(action, service_name):44def service(action, service_name):
25 cmd = None45 """Control a system service"""
26 if os.path.exists(os.path.join('/etc/init', '%s.conf' % service_name)):46 cmd = ['service', service_name, action]
27 cmd = ['initctl', action, service_name]47 return subprocess.call(cmd) == 0
28 elif os.path.exists(os.path.join('/etc/init.d', service_name)):48
29 cmd = [os.path.join('/etc/init.d', service_name), action]49
30 if cmd:50def service_running(service):
31 return_value = subprocess.call(cmd)51 """Determine whether a system service is running"""
32 return return_value == 052 try:
33 return False53 output = subprocess.check_output(['service', service, 'status'])
3454 except subprocess.CalledProcessError:
3555 return False
36def adduser(username, password, shell='/bin/bash'):56 else:
37 """Add a user"""57 if ("start/running" in output or "is running" in output):
38 # TODO: generate a password if none is given58 return True
59 else:
60 return False
61
62
63def adduser(username, password=None, shell='/bin/bash', system_user=False):
64 """Add a user to the system"""
39 try:65 try:
40 user_info = pwd.getpwnam(username)66 user_info = pwd.getpwnam(username)
41 log('user {0} already exists!'.format(username))67 log('user {0} already exists!'.format(username))
42 except KeyError:68 except KeyError:
43 log('creating user {0}'.format(username))69 log('creating user {0}'.format(username))
44 cmd = [70 cmd = ['useradd']
45 'useradd',71 if system_user or password is None:
46 '--create-home',72 cmd.append('--system')
47 '--shell', shell,73 else:
48 '--password', password,74 cmd.extend([
49 username75 '--create-home',
50 ]76 '--shell', shell,
77 '--password', password,
78 ])
79 cmd.append(username)
51 subprocess.check_call(cmd)80 subprocess.check_call(cmd)
52 user_info = pwd.getpwnam(username)81 user_info = pwd.getpwnam(username)
53 return user_info82 return user_info
@@ -66,36 +95,33 @@
6695
67def rsync(from_path, to_path, flags='-r', options=None):96def rsync(from_path, to_path, flags='-r', options=None):
68 """Replicate the contents of a path"""97 """Replicate the contents of a path"""
69 context = execution_environment()
70 options = options or ['--delete', '--executability']98 options = options or ['--delete', '--executability']
71 cmd = ['/usr/bin/rsync', flags]99 cmd = ['/usr/bin/rsync', flags]
72 cmd.extend(options)100 cmd.extend(options)
73 cmd.append(from_path.format(**context))101 cmd.append(from_path)
74 cmd.append(to_path.format(**context))102 cmd.append(to_path)
75 log(" ".join(cmd))103 log(" ".join(cmd))
76 return subprocess.check_output(cmd).strip()104 return subprocess.check_output(cmd).strip()
77105
78106
79def symlink(source, destination):107def symlink(source, destination):
80 """Create a symbolic link"""108 """Create a symbolic link"""
81 context = execution_environment()
82 log("Symlinking {} as {}".format(source, destination))109 log("Symlinking {} as {}".format(source, destination))
83 cmd = [110 cmd = [
84 'ln',111 'ln',
85 '-sf',112 '-sf',
86 source.format(**context),113 source,
87 destination.format(**context)114 destination,
88 ]115 ]
89 subprocess.check_call(cmd)116 subprocess.check_call(cmd)
90117
91118
92def mkdir(path, owner='root', group='root', perms=0555, force=False):119def mkdir(path, owner='root', group='root', perms=0555, force=False):
93 """Create a directory"""120 """Create a directory"""
94 context = execution_environment()
95 log("Making dir {} {}:{} {:o}".format(path, owner, group,121 log("Making dir {} {}:{} {:o}".format(path, owner, group,
96 perms))122 perms))
97 uid = pwd.getpwnam(owner.format(**context)).pw_uid123 uid = pwd.getpwnam(owner).pw_uid
98 gid = grp.getgrnam(group.format(**context)).gr_gid124 gid = grp.getgrnam(group).gr_gid
99 realpath = os.path.abspath(path)125 realpath = os.path.abspath(path)
100 if os.path.exists(realpath):126 if os.path.exists(realpath):
101 if force and not os.path.isdir(realpath):127 if force and not os.path.isdir(realpath):
@@ -106,50 +132,19 @@
106 os.chown(realpath, uid, gid)132 os.chown(realpath, uid, gid)
107133
108134
109def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs):135def write_file(path, content, owner='root', group='root', perms=0444):
110 """Create or overwrite a file with the contents of a string"""136 """Create or overwrite a file with the contents of a string"""
111 context = execution_environment()137 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
112 context.update(kwargs)138 uid = pwd.getpwnam(owner).pw_uid
113 log("Writing file {} {}:{} {:o}".format(path, owner, group,139 gid = grp.getgrnam(group).gr_gid
114 perms))140 with open(path, 'w') as target:
115 uid = pwd.getpwnam(owner.format(**context)).pw_uid
116 gid = grp.getgrnam(group.format(**context)).gr_gid
117 with open(path.format(**context), 'w') as target:
118 os.fchown(target.fileno(), uid, gid)141 os.fchown(target.fileno(), uid, gid)
119 os.fchmod(target.fileno(), perms)142 os.fchmod(target.fileno(), perms)
120 target.write(fmtstr.format(**context))143 target.write(content)
121
122
123def render_template_file(source, destination, **kwargs):
124 """Create or overwrite a file using a template"""
125 log("Rendering template {} for {}".format(source,
126 destination))
127 context = execution_environment()
128 with open(source.format(**context), 'r') as template:
129 write_file(destination.format(**context), template.read(),
130 **kwargs)
131
132
133def apt_install(packages, options=None, fatal=False):
134 """Install one or more packages"""
135 options = options or []
136 cmd = ['apt-get', '-y']
137 cmd.extend(options)
138 cmd.append('install')
139 if isinstance(packages, basestring):
140 cmd.append(packages)
141 else:
142 cmd.extend(packages)
143 log("Installing {} with options: {}".format(packages,
144 options))
145 if fatal:
146 subprocess.check_call(cmd)
147 else:
148 subprocess.call(cmd)
149144
150145
151def mount(device, mountpoint, options=None, persist=False):146def mount(device, mountpoint, options=None, persist=False):
152 '''Mount a filesystem'''147 """Mount a filesystem at a particular mountpoint"""
153 cmd_args = ['mount']148 cmd_args = ['mount']
154 if options is not None:149 if options is not None:
155 cmd_args.extend(['-o', options])150 cmd_args.extend(['-o', options])
@@ -166,7 +161,7 @@
166161
167162
168def umount(mountpoint, persist=False):163def umount(mountpoint, persist=False):
169 '''Unmount a filesystem'''164 """Unmount a filesystem"""
170 cmd_args = ['umount', mountpoint]165 cmd_args = ['umount', mountpoint]
171 try:166 try:
172 subprocess.check_output(cmd_args)167 subprocess.check_output(cmd_args)
@@ -180,9 +175,73 @@
180175
181176
182def mounts():177def mounts():
183 '''List of all mounted volumes as [[mountpoint,device],[...]]'''178 """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
184 with open('/proc/mounts') as f:179 with open('/proc/mounts') as f:
185 # [['/mount/point','/dev/path'],[...]]180 # [['/mount/point','/dev/path'],[...]]
186 system_mounts = [m[1::-1] for m in [l.strip().split()181 system_mounts = [m[1::-1] for m in [l.strip().split()
187 for l in f.readlines()]]182 for l in f.readlines()]]
188 return system_mounts183 return system_mounts
184
185
186def file_hash(path):
187 """Generate a md5 hash of the contents of 'path' or None if not found """
188 if os.path.exists(path):
189 h = hashlib.md5()
190 with open(path, 'r') as source:
191 h.update(source.read()) # IGNORE:E1101 - it does have update
192 return h.hexdigest()
193 else:
194 return None
195
196
197def restart_on_change(restart_map):
198 """Restart services based on configuration files changing
199
200 This function is used a decorator, for example
201
202 @restart_on_change({
203 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
204 })
205 def ceph_client_changed():
206 ...
207
208 In this example, the cinder-api and cinder-volume services
209 would be restarted if /etc/ceph/ceph.conf is changed by the
210 ceph_client_changed function.
211 """
212 def wrap(f):
213 def wrapped_f(*args):
214 checksums = {}
215 for path in restart_map:
216 checksums[path] = file_hash(path)
217 f(*args)
218 restarts = []
219 for path in restart_map:
220 if checksums[path] != file_hash(path):
221 restarts += restart_map[path]
222 for service_name in list(OrderedDict.fromkeys(restarts)):
223 service('restart', service_name)
224 return wrapped_f
225 return wrap
226
227
228def lsb_release():
229 """Return /etc/lsb-release in a dict"""
230 d = {}
231 with open('/etc/lsb-release', 'r') as lsb:
232 for l in lsb:
233 k, v = l.split('=')
234 d[k.strip()] = v.strip()
235 return d
236
237
238def pwgen(length=None):
239 """Generate a random pasword."""
240 if length is None:
241 length = random.choice(range(35, 45))
242 alphanumeric_chars = [
243 l for l in (string.letters + string.digits)
244 if l not in 'l0QD1vAEIOUaeiou']
245 random_chars = [
246 random.choice(alphanumeric_chars) for _ in range(length)]
247 return(''.join(random_chars))
189248
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2013-11-05 18:43:49 +0000
@@ -1,15 +1,116 @@
1import importlib
1from yaml import safe_load2from yaml import safe_load
2from core.hookenv import config_get3from charmhelpers.core.host import (
3from subprocess import check_call4 lsb_release
5)
6from urlparse import (
7 urlparse,
8 urlunparse,
9)
10import subprocess
11from charmhelpers.core.hookenv import (
12 config,
13 log,
14)
15import apt_pkg
16
17CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
18deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
19"""
20PROPOSED_POCKET = """# Proposed
21deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
22"""
23
24
25def filter_installed_packages(packages):
26 """Returns a list of packages that require installation"""
27 apt_pkg.init()
28 cache = apt_pkg.Cache()
29 _pkgs = []
30 for package in packages:
31 try:
32 p = cache[package]
33 p.current_ver or _pkgs.append(package)
34 except KeyError:
35 log('Package {} has no installation candidate.'.format(package),
36 level='WARNING')
37 _pkgs.append(package)
38 return _pkgs
39
40
41def apt_install(packages, options=None, fatal=False):
42 """Install one or more packages"""
43 options = options or []
44 cmd = ['apt-get', '-y']
45 cmd.extend(options)
46 cmd.append('install')
47 if isinstance(packages, basestring):
48 cmd.append(packages)
49 else:
50 cmd.extend(packages)
51 log("Installing {} with options: {}".format(packages,
52 options))
53 if fatal:
54 subprocess.check_call(cmd)
55 else:
56 subprocess.call(cmd)
57
58
59def apt_update(fatal=False):
60 """Update local apt cache"""
61 cmd = ['apt-get', 'update']
62 if fatal:
63 subprocess.check_call(cmd)
64 else:
65 subprocess.call(cmd)
66
67
68def apt_purge(packages, fatal=False):
69 """Purge one or more packages"""
70 cmd = ['apt-get', '-y', 'purge']
71 if isinstance(packages, basestring):
72 cmd.append(packages)
73 else:
74 cmd.extend(packages)
75 log("Purging {}".format(packages))
76 if fatal:
77 subprocess.check_call(cmd)
78 else:
79 subprocess.call(cmd)
80
81
82def apt_hold(packages, fatal=False):
83 """Hold one or more packages"""
84 cmd = ['apt-mark', 'hold']
85 if isinstance(packages, basestring):
86 cmd.append(packages)
87 else:
88 cmd.extend(packages)
89 log("Holding {}".format(packages))
90 if fatal:
91 subprocess.check_call(cmd)
92 else:
93 subprocess.call(cmd)
494
595
6def add_source(source, key=None):96def add_source(source, key=None):
7 if ((source.startswith('ppa:') or97 if (source.startswith('ppa:') or
8 source.startswith('cloud:') or98 source.startswith('http:') or
9 source.startswith('http:'))):99 source.startswith('deb ') or
10 check_call('add-apt-repository', source)100 source.startswith('cloud-archive:')):
101 subprocess.check_call(['add-apt-repository', '--yes', source])
102 elif source.startswith('cloud:'):
103 apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
104 fatal=True)
105 pocket = source.split(':')[-1]
106 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
107 apt.write(CLOUD_ARCHIVE.format(pocket))
108 elif source == 'proposed':
109 release = lsb_release()['DISTRIB_CODENAME']
110 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
111 apt.write(PROPOSED_POCKET.format(release))
11 if key:112 if key:
12 check_call('apt-key', 'import', key)113 subprocess.check_call(['apt-key', 'import', key])
13114
14115
15class SourceConfigError(Exception):116class SourceConfigError(Exception):
@@ -32,15 +133,96 @@
32133
33 Note that 'null' (a.k.a. None) should not be quoted.134 Note that 'null' (a.k.a. None) should not be quoted.
34 """135 """
35 sources = safe_load(config_get(sources_var))136 sources = safe_load(config(sources_var))
36 keys = safe_load(config_get(keys_var))137 keys = config(keys_var)
37 if isinstance(sources, basestring) and isinstance(keys, basestring):138 if keys is not None:
139 keys = safe_load(keys)
140 if isinstance(sources, basestring) and (
141 keys is None or isinstance(keys, basestring)):
38 add_source(sources, keys)142 add_source(sources, keys)
39 else:143 else:
40 if not len(sources) == len(keys):144 if not len(sources) == len(keys):
41 msg = 'Install sources and keys lists are different lengths'145 msg = 'Install sources and keys lists are different lengths'
42 raise SourceConfigError(msg)146 raise SourceConfigError(msg)
43 for src_num in range(len(sources)):147 for src_num in range(len(sources)):
44 add_source(sources[src_num], sources[src_num])148 add_source(sources[src_num], keys[src_num])
45 if update:149 if update:
46 check_call(('apt-get', 'update'))150 apt_update(fatal=True)
151
152# The order of this list is very important. Handlers should be listed in from
153# least- to most-specific URL matching.
154FETCH_HANDLERS = (
155 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
156 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
157)
158
159
160class UnhandledSource(Exception):
161 pass
162
163
164def install_remote(source):
165 """
166 Install a file tree from a remote source
167
168 The specified source should be a url of the form:
169 scheme://[host]/path[#[option=value][&...]]
170
171 Schemes supported are based on this modules submodules
172 Options supported are submodule-specific"""
173 # We ONLY check for True here because can_handle may return a string
174 # explaining why it can't handle a given source.
175 handlers = [h for h in plugins() if h.can_handle(source) is True]
176 installed_to = None
177 for handler in handlers:
178 try:
179 installed_to = handler.install(source)
180 except UnhandledSource:
181 pass
182 if not installed_to:
183 raise UnhandledSource("No handler found for source {}".format(source))
184 return installed_to
185
186
187def install_from_config(config_var_name):
188 charm_config = config()
189 source = charm_config[config_var_name]
190 return install_remote(source)
191
192
193class BaseFetchHandler(object):
194 """Base class for FetchHandler implementations in fetch plugins"""
195 def can_handle(self, source):
196 """Returns True if the source can be handled. Otherwise returns
197 a string explaining why it cannot"""
198 return "Wrong source type"
199
200 def install(self, source):
201 """Try to download and unpack the source. Return the path to the
202 unpacked files or raise UnhandledSource."""
203 raise UnhandledSource("Wrong source type {}".format(source))
204
205 def parse_url(self, url):
206 return urlparse(url)
207
208 def base_url(self, url):
209 """Return url without querystring or fragment"""
210 parts = list(self.parse_url(url))
211 parts[4:] = ['' for i in parts[4:]]
212 return urlunparse(parts)
213
214
215def plugins(fetch_handlers=None):
216 if not fetch_handlers:
217 fetch_handlers = FETCH_HANDLERS
218 plugin_list = []
219 for handler_name in fetch_handlers:
220 package, classname = handler_name.rsplit('.', 1)
221 try:
222 handler_class = getattr(importlib.import_module(package), classname)
223 plugin_list.append(handler_class())
224 except (ImportError, AttributeError):
225 # Skip missing plugins so that they can be ommitted from
226 # installation if desired
227 log("FetchHandler {} not found, skipping plugin".format(handler_name))
228 return plugin_list
47229
=== added file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2013-11-05 18:43:49 +0000
@@ -0,0 +1,48 @@
1import os
2import urllib2
3from charmhelpers.fetch import (
4 BaseFetchHandler,
5 UnhandledSource
6)
7from charmhelpers.payload.archive import (
8 get_archive_handler,
9 extract,
10)
11from charmhelpers.core.host import mkdir
12
13
14class ArchiveUrlFetchHandler(BaseFetchHandler):
15 """Handler for archives via generic URLs"""
16 def can_handle(self, source):
17 url_parts = self.parse_url(source)
18 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
19 return "Wrong source type"
20 if get_archive_handler(self.base_url(source)):
21 return True
22 return False
23
24 def download(self, source, dest):
25 # propogate all exceptions
26 # URLError, OSError, etc
27 response = urllib2.urlopen(source)
28 try:
29 with open(dest, 'w') as dest_file:
30 dest_file.write(response.read())
31 except Exception as e:
32 if os.path.isfile(dest):
33 os.unlink(dest)
34 raise e
35
36 def install(self, source):
37 url_parts = self.parse_url(source)
38 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
39 if not os.path.exists(dest_dir):
40 mkdir(dest_dir, perms=0755)
41 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
42 try:
43 self.download(source, dld_file)
44 except urllib2.URLError as e:
45 raise UnhandledSource(e.reason)
46 except OSError as e:
47 raise UnhandledSource(e.strerror)
48 return extract(dld_file)
049
=== added file 'hooks/charmhelpers/fetch/bzrurl.py'
--- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/fetch/bzrurl.py 2013-11-05 18:43:49 +0000
@@ -0,0 +1,49 @@
1import os
2from charmhelpers.fetch import (
3 BaseFetchHandler,
4 UnhandledSource
5)
6from charmhelpers.core.host import mkdir
7
8try:
9 from bzrlib.branch import Branch
10except ImportError:
11 from charmhelpers.fetch import apt_install
12 apt_install("python-bzrlib")
13 from bzrlib.branch import Branch
14
15class BzrUrlFetchHandler(BaseFetchHandler):
16 """Handler for bazaar branches via generic and lp URLs"""
17 def can_handle(self, source):
18 url_parts = self.parse_url(source)
19 if url_parts.scheme not in ('bzr+ssh', 'lp'):
20 return False
21 else:
22 return True
23
24 def branch(self, source, dest):
25 url_parts = self.parse_url(source)
26 # If we use lp:branchname scheme we need to load plugins
27 if not self.can_handle(source):
28 raise UnhandledSource("Cannot handle {}".format(source))
29 if url_parts.scheme == "lp":
30 from bzrlib.plugin import load_plugins
31 load_plugins()
32 try:
33 remote_branch = Branch.open(source)
34 remote_branch.bzrdir.sprout(dest).open_branch()
35 except Exception as e:
36 raise e
37
38 def install(self, source):
39 url_parts = self.parse_url(source)
40 branch_name = url_parts.path.strip("/").split("/")[-1]
41 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
42 if not os.path.exists(dest_dir):
43 mkdir(dest_dir, perms=0755)
44 try:
45 self.branch(source, dest_dir)
46 except OSError as e:
47 raise UnhandledSource(e.strerror)
48 return dest_dir
49
050
=== removed directory 'hooks/charmhelpers/payload'
=== removed file 'hooks/charmhelpers/payload/__init__.py'
--- hooks/charmhelpers/payload/__init__.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/payload/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,1 +0,0 @@
1"Tools for working with files injected into a charm just before deployment."
20
=== removed file 'hooks/charmhelpers/payload/execd.py'
--- hooks/charmhelpers/payload/execd.py 2013-06-07 09:39:50 +0000
+++ hooks/charmhelpers/payload/execd.py 1970-01-01 00:00:00 +0000
@@ -1,40 +0,0 @@
1#!/usr/bin/env python
2
3import os
4import sys
5import subprocess
6from charmhelpers.core import hookenv
7
8
9def default_execd_dir():
10 return os.path.join(os.environ['CHARM_DIR'],'exec.d')
11
12
13def execd_module_paths(execd_dir=None):
14 if not execd_dir:
15 execd_dir = default_execd_dir()
16 for subpath in os.listdir(execd_dir):
17 module = os.path.join(execd_dir, subpath)
18 if os.path.isdir(module):
19 yield module
20
21
22def execd_submodule_paths(submodule, execd_dir=None):
23 for module_path in execd_module_paths(execd_dir):
24 path = os.path.join(module_path, submodule)
25 if os.access(path, os.X_OK) and os.path.isfile(path):
26 yield path
27
28
29def execd_run(submodule, execd_dir=None, die_on_error=False):
30 for submodule_path in execd_submodule_paths(submodule, execd_dir):
31 try:
32 subprocess.check_call(submodule_path, shell=True)
33 except subprocess.CalledProcessError as e:
34 hookenv.log(e.output)
35 if die_on_error:
36 sys.exit(e.returncode)
37
38
39def execd_preinstall(execd_dir=None):
40 execd_run(execd_dir, 'charm-pre-install')
410
=== modified file 'hooks/hooks.py'
--- hooks/hooks.py 2013-07-03 05:54:19 +0000
+++ hooks/hooks.py 2013-11-05 18:43:49 +0000
@@ -10,12 +10,15 @@
10 service_start,10 service_start,
11 service_stop,11 service_stop,
12 adduser,12 adduser,
13 apt_install,
14 log,13 log,
15 mkdir,14 mkdir,
16 symlink,15 symlink,
17)16)
1817
18from charmhelpers.fetch import (
19 apt_install,
20)
21
19from charmhelpers.core.hookenv import (22from charmhelpers.core.hookenv import (
20 Hooks,23 Hooks,
21 relation_get,24 relation_get,
@@ -69,7 +72,7 @@
6972
70def add_extra_repos():73def add_extra_repos():
71 extra_repos = config('extra_archives')74 extra_repos = config('extra_archives')
72 if extra_repos.data: #serialize cannot be cast as boolean75 if extra_repos != None:
73 repos_added = False76 repos_added = False
74 extra_repos_added = set()77 extra_repos_added = set()
75 for repo in extra_repos.split():78 for repo in extra_repos.split():

Subscribers

People subscribed via source and target branches

to all changes: