Merge lp:~thumper/charms/trusty/python-django/clean-contrib into lp:charms/python-django

Proposed by Tim Penhey
Status: Merged
Merged at revision: 39
Proposed branch: lp:~thumper/charms/trusty/python-django/clean-contrib
Merge into: lp:charms/python-django
Diff against target: 5075 lines (+0/-4900)
29 files modified
charm-helpers.yaml (+0/-1)
hooks/charmhelpers/contrib/ansible/__init__.py (+0/-165)
hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-184)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-216)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-156)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-59)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-183)
hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602)
hooks/charmhelpers/contrib/network/ip.py (+0/-69)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-75)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-17)
hooks/charmhelpers/contrib/openstack/context.py (+0/-700)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-171)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-2)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-280)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-450)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-83)
hooks/charmhelpers/contrib/python/packages.py (+0/-76)
hooks/charmhelpers/contrib/python/version.py (+0/-18)
hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-102)
hooks/charmhelpers/contrib/ssl/__init__.py (+0/-78)
hooks/charmhelpers/contrib/ssl/service.py (+0/-267)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-387)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-62)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-88)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-35)
hooks/charmhelpers/contrib/templating/contexts.py (+0/-104)
hooks/charmhelpers/contrib/templating/pyformat.py (+0/-13)
hooks/charmhelpers/contrib/unison/__init__.py (+0/-257)
To merge this branch: bzr merge lp:~thumper/charms/trusty/python-django/clean-contrib
Reviewer Review Type Date Requested Status
Tim Van Steenburgh (community) Approve
Review via email: mp+260388@code.launchpad.net

Description of the change

This branch just removes the charmhelpers/contrib package.

It isn't used anywhere.

To post a comment you must log in.
Revision history for this message
Tim Van Steenburgh (tvansteenburgh) wrote :

+1 LGTM.

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charm-helpers.yaml'
--- charm-helpers.yaml 2013-11-26 17:12:54 +0000
+++ charm-helpers.yaml 2015-05-27 22:03:53 +0000
@@ -3,4 +3,3 @@
3include:3include:
4 - core4 - core
5 - fetch5 - fetch
6 - contrib
76
=== removed directory 'hooks/charmhelpers/contrib'
=== removed file 'hooks/charmhelpers/contrib/__init__.py'
=== removed directory 'hooks/charmhelpers/contrib/ansible'
=== removed file 'hooks/charmhelpers/contrib/ansible/__init__.py'
--- hooks/charmhelpers/contrib/ansible/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/ansible/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,165 +0,0 @@
1# Copyright 2013 Canonical Ltd.
2#
3# Authors:
4# Charm Helpers Developers <juju@lists.ubuntu.com>
5"""Charm Helpers ansible - declare the state of your machines.
6
7This helper enables you to declare your machine state, rather than
8program it procedurally (and have to test each change to your procedures).
9Your install hook can be as simple as:
10
11{{{
12import charmhelpers.contrib.ansible
13
14
15def install():
16 charmhelpers.contrib.ansible.install_ansible_support()
17 charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
18}}}
19
20and won't need to change (nor will its tests) when you change the machine
21state.
22
23All of your juju config and relation-data are available as template
24variables within your playbooks and templates. An install playbook looks
25something like:
26
27{{{
28---
29- hosts: localhost
30 user: root
31
32 tasks:
33 - name: Add private repositories.
34 template:
35 src: ../templates/private-repositories.list.jinja2
36 dest: /etc/apt/sources.list.d/private.list
37
38 - name: Update the cache.
39 apt: update_cache=yes
40
41 - name: Install dependencies.
42 apt: pkg={{ item }}
43 with_items:
44 - python-mimeparse
45 - python-webob
46 - sunburnt
47
48 - name: Setup groups.
49 group: name={{ item.name }} gid={{ item.gid }}
50 with_items:
51 - { name: 'deploy_user', gid: 1800 }
52 - { name: 'service_user', gid: 1500 }
53
54 ...
55}}}
56
57Read more online about playbooks[1] and standard ansible modules[2].
58
59[1] http://www.ansibleworks.com/docs/playbooks.html
60[2] http://www.ansibleworks.com/docs/modules.html
61"""
62import os
63import subprocess
64
65import charmhelpers.contrib.templating.contexts
66import charmhelpers.core.host
67import charmhelpers.core.hookenv
68import charmhelpers.fetch
69
70
71charm_dir = os.environ.get('CHARM_DIR', '')
72ansible_hosts_path = '/etc/ansible/hosts'
73# Ansible will automatically include any vars in the following
74# file in its inventory when run locally.
75ansible_vars_path = '/etc/ansible/host_vars/localhost'
76
77
78def install_ansible_support(from_ppa=True):
79 """Installs the ansible package.
80
81 By default it is installed from the PPA [1] linked from
82 the ansible website [2].
83
84 [1] https://launchpad.net/~rquillo/+archive/ansible
85 [2] http://www.ansibleworks.com/docs/gettingstarted.html#ubuntu-and-debian
86
87 If from_ppa is false, you must ensure that the package is available
88 from a configured repository.
89 """
90 if from_ppa:
91 charmhelpers.fetch.add_source('ppa:rquillo/ansible')
92 charmhelpers.fetch.apt_update(fatal=True)
93 charmhelpers.fetch.apt_install('ansible')
94 with open(ansible_hosts_path, 'w+') as hosts_file:
95 hosts_file.write('localhost ansible_connection=local')
96
97
98def apply_playbook(playbook, tags=None):
99 tags = tags or []
100 tags = ",".join(tags)
101 charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
102 ansible_vars_path, namespace_separator='__',
103 allow_hyphens_in_keys=False)
104 call = [
105 'ansible-playbook',
106 '-c',
107 'local',
108 playbook,
109 ]
110 if tags:
111 call.extend(['--tags', '{}'.format(tags)])
112 subprocess.check_call(call)
113
114
115class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
116 """Run a playbook with the hook-name as the tag.
117
118 This helper builds on the standard hookenv.Hooks helper,
119 but additionally runs the playbook with the hook-name specified
120 using --tags (ie. running all the tasks tagged with the hook-name).
121
122 Example:
123 hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
124
125 # All the tasks within my_machine_state.yaml tagged with 'install'
126 # will be run automatically after do_custom_work()
127 @hooks.hook()
128 def install():
129 do_custom_work()
130
131 # For most of your hooks, you won't need to do anything other
132 # than run the tagged tasks for the hook:
133 @hooks.hook('config-changed', 'start', 'stop')
134 def just_use_playbook():
135 pass
136
137 # As a convenience, you can avoid the above noop function by specifying
138 # the hooks which are handled by ansible-only and they'll be registered
139 # for you:
140 # hooks = AnsibleHooks(
141 # 'playbooks/my_machine_state.yaml',
142 # default_hooks=['config-changed', 'start', 'stop'])
143
144 if __name__ == "__main__":
145 # execute a hook based on the name the program is called by
146 hooks.execute(sys.argv)
147 """
148
149 def __init__(self, playbook_path, default_hooks=None):
150 """Register any hooks handled by ansible."""
151 super(AnsibleHooks, self).__init__()
152
153 self.playbook_path = playbook_path
154
155 default_hooks = default_hooks or []
156 noop = lambda *args, **kwargs: None
157 for hook in default_hooks:
158 self.register(hook, noop)
159
160 def execute(self, args):
161 """Execute the hook followed by the playbook using the hook as tag."""
162 super(AnsibleHooks, self).execute(args)
163 hook_name = os.path.basename(args[0])
164 charmhelpers.contrib.ansible.apply_playbook(
165 self.playbook_path, tags=[hook_name])
1660
=== removed directory 'hooks/charmhelpers/contrib/charmhelpers'
=== removed file 'hooks/charmhelpers/contrib/charmhelpers/__init__.py'
--- hooks/charmhelpers/contrib/charmhelpers/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,184 +0,0 @@
1# Copyright 2012 Canonical Ltd. This software is licensed under the
2# GNU Affero General Public License version 3 (see the file LICENSE).
3
4import warnings
5warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning)
6
7"""Helper functions for writing Juju charms in Python."""
8
9__metaclass__ = type
10__all__ = [
11 #'get_config', # core.hookenv.config()
12 #'log', # core.hookenv.log()
13 #'log_entry', # core.hookenv.log()
14 #'log_exit', # core.hookenv.log()
15 #'relation_get', # core.hookenv.relation_get()
16 #'relation_set', # core.hookenv.relation_set()
17 #'relation_ids', # core.hookenv.relation_ids()
18 #'relation_list', # core.hookenv.relation_units()
19 #'config_get', # core.hookenv.config()
20 #'unit_get', # core.hookenv.unit_get()
21 #'open_port', # core.hookenv.open_port()
22 #'close_port', # core.hookenv.close_port()
23 #'service_control', # core.host.service()
24 'unit_info', # client-side, NOT IMPLEMENTED
25 'wait_for_machine', # client-side, NOT IMPLEMENTED
26 'wait_for_page_contents', # client-side, NOT IMPLEMENTED
27 'wait_for_relation', # client-side, NOT IMPLEMENTED
28 'wait_for_unit', # client-side, NOT IMPLEMENTED
29]
30
31import operator
32from shelltoolbox import (
33 command,
34)
35import tempfile
36import time
37import urllib2
38import yaml
39
40SLEEP_AMOUNT = 0.1
41# We create a juju_status Command here because it makes testing much,
42# much easier.
43juju_status = lambda: command('juju')('status')
44
45# re-implemented as charmhelpers.fetch.configure_sources()
46#def configure_source(update=False):
47# source = config_get('source')
48# if ((source.startswith('ppa:') or
49# source.startswith('cloud:') or
50# source.startswith('http:'))):
51# run('add-apt-repository', source)
52# if source.startswith("http:"):
53# run('apt-key', 'import', config_get('key'))
54# if update:
55# run('apt-get', 'update')
56
57
58# DEPRECATED: client-side only
59def make_charm_config_file(charm_config):
60 charm_config_file = tempfile.NamedTemporaryFile()
61 charm_config_file.write(yaml.dump(charm_config))
62 charm_config_file.flush()
63 # The NamedTemporaryFile instance is returned instead of just the name
64 # because we want to take advantage of garbage collection-triggered
65 # deletion of the temp file when it goes out of scope in the caller.
66 return charm_config_file
67
68
69# DEPRECATED: client-side only
70def unit_info(service_name, item_name, data=None, unit=None):
71 if data is None:
72 data = yaml.safe_load(juju_status())
73 service = data['services'].get(service_name)
74 if service is None:
75 # XXX 2012-02-08 gmb:
76 # This allows us to cope with the race condition that we
77 # have between deploying a service and having it come up in
78 # `juju status`. We could probably do with cleaning it up so
79 # that it fails a bit more noisily after a while.
80 return ''
81 units = service['units']
82 if unit is not None:
83 item = units[unit][item_name]
84 else:
85 # It might seem odd to sort the units here, but we do it to
86 # ensure that when no unit is specified, the first unit for the
87 # service (or at least the one with the lowest number) is the
88 # one whose data gets returned.
89 sorted_unit_names = sorted(units.keys())
90 item = units[sorted_unit_names[0]][item_name]
91 return item
92
93
94# DEPRECATED: client-side only
95def get_machine_data():
96 return yaml.safe_load(juju_status())['machines']
97
98
99# DEPRECATED: client-side only
100def wait_for_machine(num_machines=1, timeout=300):
101 """Wait `timeout` seconds for `num_machines` machines to come up.
102
103 This wait_for... function can be called by other wait_for functions
104 whose timeouts might be too short in situations where only a bare
105 Juju setup has been bootstrapped.
106
107 :return: A tuple of (num_machines, time_taken). This is used for
108 testing.
109 """
110 # You may think this is a hack, and you'd be right. The easiest way
111 # to tell what environment we're working in (LXC vs EC2) is to check
112 # the dns-name of the first machine. If it's localhost we're in LXC
113 # and we can just return here.
114 if get_machine_data()[0]['dns-name'] == 'localhost':
115 return 1, 0
116 start_time = time.time()
117 while True:
118 # Drop the first machine, since it's the Zookeeper and that's
119 # not a machine that we need to wait for. This will only work
120 # for EC2 environments, which is why we return early above if
121 # we're in LXC.
122 machine_data = get_machine_data()
123 non_zookeeper_machines = [
124 machine_data[key] for key in machine_data.keys()[1:]]
125 if len(non_zookeeper_machines) >= num_machines:
126 all_machines_running = True
127 for machine in non_zookeeper_machines:
128 if machine.get('instance-state') != 'running':
129 all_machines_running = False
130 break
131 if all_machines_running:
132 break
133 if time.time() - start_time >= timeout:
134 raise RuntimeError('timeout waiting for service to start')
135 time.sleep(SLEEP_AMOUNT)
136 return num_machines, time.time() - start_time
137
138
139# DEPRECATED: client-side only
140def wait_for_unit(service_name, timeout=480):
141 """Wait `timeout` seconds for a given service name to come up."""
142 wait_for_machine(num_machines=1)
143 start_time = time.time()
144 while True:
145 state = unit_info(service_name, 'agent-state')
146 if 'error' in state or state == 'started':
147 break
148 if time.time() - start_time >= timeout:
149 raise RuntimeError('timeout waiting for service to start')
150 time.sleep(SLEEP_AMOUNT)
151 if state != 'started':
152 raise RuntimeError('unit did not start, agent-state: ' + state)
153
154
155# DEPRECATED: client-side only
156def wait_for_relation(service_name, relation_name, timeout=120):
157 """Wait `timeout` seconds for a given relation to come up."""
158 start_time = time.time()
159 while True:
160 relation = unit_info(service_name, 'relations').get(relation_name)
161 if relation is not None and relation['state'] == 'up':
162 break
163 if time.time() - start_time >= timeout:
164 raise RuntimeError('timeout waiting for relation to be up')
165 time.sleep(SLEEP_AMOUNT)
166
167
168# DEPRECATED: client-side only
169def wait_for_page_contents(url, contents, timeout=120, validate=None):
170 if validate is None:
171 validate = operator.contains
172 start_time = time.time()
173 while True:
174 try:
175 stream = urllib2.urlopen(url)
176 except (urllib2.HTTPError, urllib2.URLError):
177 pass
178 else:
179 page = stream.read()
180 if validate(page, contents):
181 return page
182 if time.time() - start_time >= timeout:
183 raise RuntimeError('timeout waiting for contents of ' + url)
184 time.sleep(SLEEP_AMOUNT)
1850
=== removed directory 'hooks/charmhelpers/contrib/charmsupport'
=== removed file 'hooks/charmhelpers/contrib/charmsupport/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py'
--- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
@@ -1,216 +0,0 @@
1"""Compatibility with the nrpe-external-master charm"""
2# Copyright 2012 Canonical Ltd.
3#
4# Authors:
5# Matthew Wedgwood <matthew.wedgwood@canonical.com>
6
7import subprocess
8import pwd
9import grp
10import os
11import re
12import shlex
13import yaml
14
15from charmhelpers.core.hookenv import (
16 config,
17 local_unit,
18 log,
19 relation_ids,
20 relation_set,
21)
22
23from charmhelpers.core.host import service
24
25# This module adds compatibility with the nrpe-external-master and plain nrpe
26# subordinate charms. To use it in your charm:
27#
28# 1. Update metadata.yaml
29#
30# provides:
31# (...)
32# nrpe-external-master:
33# interface: nrpe-external-master
34# scope: container
35#
36# and/or
37#
38# provides:
39# (...)
40# local-monitors:
41# interface: local-monitors
42# scope: container
43
44#
45# 2. Add the following to config.yaml
46#
47# nagios_context:
48# default: "juju"
49# type: string
50# description: |
51# Used by the nrpe subordinate charms.
52# A string that will be prepended to instance name to set the host name
53# in nagios. So for instance the hostname would be something like:
54# juju-myservice-0
55# If you're running multiple environments with the same services in them
56# this allows you to differentiate between them.
57#
58# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
59#
60# 4. Update your hooks.py with something like this:
61#
62# from charmsupport.nrpe import NRPE
63# (...)
64# def update_nrpe_config():
65# nrpe_compat = NRPE()
66# nrpe_compat.add_check(
67# shortname = "myservice",
68# description = "Check MyService",
69# check_cmd = "check_http -w 2 -c 10 http://localhost"
70# )
71# nrpe_compat.add_check(
72# "myservice_other",
73# "Check for widget failures",
74# check_cmd = "/srv/myapp/scripts/widget_check"
75# )
76# nrpe_compat.write()
77#
78# def config_changed():
79# (...)
80# update_nrpe_config()
81#
82# def nrpe_external_master_relation_changed():
83# update_nrpe_config()
84#
85# def local_monitors_relation_changed():
86# update_nrpe_config()
87#
88# 5. ln -s hooks.py nrpe-external-master-relation-changed
89# ln -s hooks.py local-monitors-relation-changed
90
91
92class CheckException(Exception):
93 pass
94
95
96class Check(object):
97 shortname_re = '[A-Za-z0-9-_]+$'
98 service_template = ("""
99#---------------------------------------------------
100# This file is Juju managed
101#---------------------------------------------------
102define service {{
103 use active-service
104 host_name {nagios_hostname}
105 service_description {nagios_hostname}[{shortname}] """
106 """{description}
107 check_command check_nrpe!{command}
108 servicegroups {nagios_servicegroup}
109}}
110""")
111
112 def __init__(self, shortname, description, check_cmd):
113 super(Check, self).__init__()
114 # XXX: could be better to calculate this from the service name
115 if not re.match(self.shortname_re, shortname):
116 raise CheckException("shortname must match {}".format(
117 Check.shortname_re))
118 self.shortname = shortname
119 self.command = "check_{}".format(shortname)
120 # Note: a set of invalid characters is defined by the
121 # Nagios server config
122 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
123 self.description = description
124 self.check_cmd = self._locate_cmd(check_cmd)
125
126 def _locate_cmd(self, check_cmd):
127 search_path = (
128 '/usr/lib/nagios/plugins',
129 '/usr/local/lib/nagios/plugins',
130 )
131 parts = shlex.split(check_cmd)
132 for path in search_path:
133 if os.path.exists(os.path.join(path, parts[0])):
134 command = os.path.join(path, parts[0])
135 if len(parts) > 1:
136 command += " " + " ".join(parts[1:])
137 return command
138 log('Check command not found: {}'.format(parts[0]))
139 return ''
140
141 def write(self, nagios_context, hostname):
142 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
143 self.command)
144 with open(nrpe_check_file, 'w') as nrpe_check_config:
145 nrpe_check_config.write("# check {}\n".format(self.shortname))
146 nrpe_check_config.write("command[{}]={}\n".format(
147 self.command, self.check_cmd))
148
149 if not os.path.exists(NRPE.nagios_exportdir):
150 log('Not writing service config as {} is not accessible'.format(
151 NRPE.nagios_exportdir))
152 else:
153 self.write_service_config(nagios_context, hostname)
154
155 def write_service_config(self, nagios_context, hostname):
156 for f in os.listdir(NRPE.nagios_exportdir):
157 if re.search('.*{}.cfg'.format(self.command), f):
158 os.remove(os.path.join(NRPE.nagios_exportdir, f))
159
160 templ_vars = {
161 'nagios_hostname': hostname,
162 'nagios_servicegroup': nagios_context,
163 'description': self.description,
164 'shortname': self.shortname,
165 'command': self.command,
166 }
167 nrpe_service_text = Check.service_template.format(**templ_vars)
168 nrpe_service_file = '{}/service__{}_{}.cfg'.format(
169 NRPE.nagios_exportdir, hostname, self.command)
170 with open(nrpe_service_file, 'w') as nrpe_service_config:
171 nrpe_service_config.write(str(nrpe_service_text))
172
173 def run(self):
174 subprocess.call(self.check_cmd)
175
176
177class NRPE(object):
178 nagios_logdir = '/var/log/nagios'
179 nagios_exportdir = '/var/lib/nagios/export'
180 nrpe_confdir = '/etc/nagios/nrpe.d'
181
182 def __init__(self):
183 super(NRPE, self).__init__()
184 self.config = config()
185 self.nagios_context = self.config['nagios_context']
186 self.unit_name = local_unit().replace('/', '-')
187 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
188 self.checks = []
189
190 def add_check(self, *args, **kwargs):
191 self.checks.append(Check(*args, **kwargs))
192
193 def write(self):
194 try:
195 nagios_uid = pwd.getpwnam('nagios').pw_uid
196 nagios_gid = grp.getgrnam('nagios').gr_gid
197 except:
198 log("Nagios user not set up, nrpe checks not updated")
199 return
200
201 if not os.path.exists(NRPE.nagios_logdir):
202 os.mkdir(NRPE.nagios_logdir)
203 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
204
205 nrpe_monitors = {}
206 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
207 for nrpecheck in self.checks:
208 nrpecheck.write(self.nagios_context, self.hostname)
209 nrpe_monitors[nrpecheck.shortname] = {
210 "command": nrpecheck.command,
211 }
212
213 service('restart', 'nagios-nrpe-server')
214
215 for rid in relation_ids("local-monitors"):
216 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
2170
=== removed file 'hooks/charmhelpers/contrib/charmsupport/volumes.py'
--- hooks/charmhelpers/contrib/charmsupport/volumes.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
@@ -1,156 +0,0 @@
1'''
2Functions for managing volumes in juju units. One volume is supported per unit.
3Subordinates may have their own storage, provided it is on its own partition.
4
5Configuration stanzas:
6 volume-ephemeral:
7 type: boolean
8 default: true
9 description: >
10 If false, a volume is mounted as sepecified in "volume-map"
11 If true, ephemeral storage will be used, meaning that log data
12 will only exist as long as the machine. YOU HAVE BEEN WARNED.
13 volume-map:
14 type: string
15 default: {}
16 description: >
17 YAML map of units to device names, e.g:
18 "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
19 Service units will raise a configure-error if volume-ephemeral
20 is 'true' and no volume-map value is set. Use 'juju set' to set a
21 value and 'juju resolved' to complete configuration.
22
23Usage:
24 from charmsupport.volumes import configure_volume, VolumeConfigurationError
25 from charmsupport.hookenv import log, ERROR
26 def post_mount_hook():
27 stop_service('myservice')
28 def post_mount_hook():
29 start_service('myservice')
30
31 if __name__ == '__main__':
32 try:
33 configure_volume(before_change=pre_mount_hook,
34 after_change=post_mount_hook)
35 except VolumeConfigurationError:
36 log('Storage could not be configured', ERROR)
37'''
38
39# XXX: Known limitations
40# - fstab is neither consulted nor updated
41
42import os
43from charmhelpers.core import hookenv
44from charmhelpers.core import host
45import yaml
46
47
48MOUNT_BASE = '/srv/juju/volumes'
49
50
51class VolumeConfigurationError(Exception):
52 '''Volume configuration data is missing or invalid'''
53 pass
54
55
56def get_config():
57 '''Gather and sanity-check volume configuration data'''
58 volume_config = {}
59 config = hookenv.config()
60
61 errors = False
62
63 if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
64 volume_config['ephemeral'] = True
65 else:
66 volume_config['ephemeral'] = False
67
68 try:
69 volume_map = yaml.safe_load(config.get('volume-map', '{}'))
70 except yaml.YAMLError as e:
71 hookenv.log("Error parsing YAML volume-map: {}".format(e),
72 hookenv.ERROR)
73 errors = True
74 if volume_map is None:
75 # probably an empty string
76 volume_map = {}
77 elif not isinstance(volume_map, dict):
78 hookenv.log("Volume-map should be a dictionary, not {}".format(
79 type(volume_map)))
80 errors = True
81
82 volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
83 if volume_config['device'] and volume_config['ephemeral']:
84 # asked for ephemeral storage but also defined a volume ID
85 hookenv.log('A volume is defined for this unit, but ephemeral '
86 'storage was requested', hookenv.ERROR)
87 errors = True
88 elif not volume_config['device'] and not volume_config['ephemeral']:
89 # asked for permanent storage but did not define volume ID
90 hookenv.log('Ephemeral storage was requested, but there is no volume '
91 'defined for this unit.', hookenv.ERROR)
92 errors = True
93
94 unit_mount_name = hookenv.local_unit().replace('/', '-')
95 volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
96
97 if errors:
98 return None
99 return volume_config
100
101
102def mount_volume(config):
103 if os.path.exists(config['mountpoint']):
104 if not os.path.isdir(config['mountpoint']):
105 hookenv.log('Not a directory: {}'.format(config['mountpoint']))
106 raise VolumeConfigurationError()
107 else:
108 host.mkdir(config['mountpoint'])
109 if os.path.ismount(config['mountpoint']):
110 unmount_volume(config)
111 if not host.mount(config['device'], config['mountpoint'], persist=True):
112 raise VolumeConfigurationError()
113
114
115def unmount_volume(config):
116 if os.path.ismount(config['mountpoint']):
117 if not host.umount(config['mountpoint'], persist=True):
118 raise VolumeConfigurationError()
119
120
121def managed_mounts():
122 '''List of all mounted managed volumes'''
123 return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
124
125
126def configure_volume(before_change=lambda: None, after_change=lambda: None):
127 '''Set up storage (or don't) according to the charm's volume configuration.
128 Returns the mount point or "ephemeral". before_change and after_change
129 are optional functions to be called if the volume configuration changes.
130 '''
131
132 config = get_config()
133 if not config:
134 hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
135 raise VolumeConfigurationError()
136
137 if config['ephemeral']:
138 if os.path.ismount(config['mountpoint']):
139 before_change()
140 unmount_volume(config)
141 after_change()
142 return 'ephemeral'
143 else:
144 # persistent storage
145 if os.path.ismount(config['mountpoint']):
146 mounts = dict(managed_mounts())
147 if mounts.get(config['mountpoint']) != config['device']:
148 before_change()
149 unmount_volume(config)
150 mount_volume(config)
151 after_change()
152 else:
153 before_change()
154 mount_volume(config)
155 after_change()
156 return config['mountpoint']
1570
=== removed directory 'hooks/charmhelpers/contrib/hahelpers'
=== removed file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
@@ -1,59 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import subprocess
12
13from charmhelpers.core.hookenv import (
14 config as config_get,
15 relation_get,
16 relation_ids,
17 related_units as relation_list,
18 log,
19 INFO,
20)
21
22
23def get_cert():
24 cert = config_get('ssl_cert')
25 key = config_get('ssl_key')
26 if not (cert and key):
27 log("Inspecting identity-service relations for SSL certificate.",
28 level=INFO)
29 cert = key = None
30 for r_id in relation_ids('identity-service'):
31 for unit in relation_list(r_id):
32 if not cert:
33 cert = relation_get('ssl_cert',
34 rid=r_id, unit=unit)
35 if not key:
36 key = relation_get('ssl_key',
37 rid=r_id, unit=unit)
38 return (cert, key)
39
40
41def get_ca_cert():
42 ca_cert = config_get('ssl_ca')
43 if ca_cert is None:
44 log("Inspecting identity-service relations for CA SSL certificate.",
45 level=INFO)
46 for r_id in relation_ids('identity-service'):
47 for unit in relation_list(r_id):
48 if ca_cert is None:
49 ca_cert = relation_get('ca_cert',
50 rid=r_id, unit=unit)
51 return ca_cert
52
53
54def install_ca_cert(ca_cert):
55 if ca_cert:
56 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
57 'w') as crt:
58 crt.write(ca_cert)
59 subprocess.check_call(['update-ca-certificates', '--fresh'])
600
=== removed file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
@@ -1,183 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# Authors:
5# James Page <james.page@ubuntu.com>
6# Adam Gandelman <adamg@ubuntu.com>
7#
8
9import subprocess
10import os
11
12from socket import gethostname as get_unit_hostname
13
14from charmhelpers.core.hookenv import (
15 log,
16 relation_ids,
17 related_units as relation_list,
18 relation_get,
19 config as config_get,
20 INFO,
21 ERROR,
22 unit_get,
23)
24
25
26class HAIncompleteConfig(Exception):
27 pass
28
29
30def is_clustered():
31 for r_id in (relation_ids('ha') or []):
32 for unit in (relation_list(r_id) or []):
33 clustered = relation_get('clustered',
34 rid=r_id,
35 unit=unit)
36 if clustered:
37 return True
38 return False
39
40
41def is_leader(resource):
42 cmd = [
43 "crm", "resource",
44 "show", resource
45 ]
46 try:
47 status = subprocess.check_output(cmd)
48 except subprocess.CalledProcessError:
49 return False
50 else:
51 if get_unit_hostname() in status:
52 return True
53 else:
54 return False
55
56
57def peer_units():
58 peers = []
59 for r_id in (relation_ids('cluster') or []):
60 for unit in (relation_list(r_id) or []):
61 peers.append(unit)
62 return peers
63
64
65def oldest_peer(peers):
66 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
67 for peer in peers:
68 remote_unit_no = int(peer.split('/')[1])
69 if remote_unit_no < local_unit_no:
70 return False
71 return True
72
73
74def eligible_leader(resource):
75 if is_clustered():
76 if not is_leader(resource):
77 log('Deferring action to CRM leader.', level=INFO)
78 return False
79 else:
80 peers = peer_units()
81 if peers and not oldest_peer(peers):
82 log('Deferring action to oldest service unit.', level=INFO)
83 return False
84 return True
85
86
87def https():
88 '''
89 Determines whether enough data has been provided in configuration
90 or relation data to configure HTTPS
91 .
92 returns: boolean
93 '''
94 if config_get('use-https') == "yes":
95 return True
96 if config_get('ssl_cert') and config_get('ssl_key'):
97 return True
98 for r_id in relation_ids('identity-service'):
99 for unit in relation_list(r_id):
100 rel_state = [
101 relation_get('https_keystone', rid=r_id, unit=unit),
102 relation_get('ssl_cert', rid=r_id, unit=unit),
103 relation_get('ssl_key', rid=r_id, unit=unit),
104 relation_get('ca_cert', rid=r_id, unit=unit),
105 ]
106 # NOTE: works around (LP: #1203241)
107 if (None not in rel_state) and ('' not in rel_state):
108 return True
109 return False
110
111
112def determine_api_port(public_port):
113 '''
114 Determine correct API server listening port based on
115 existence of HTTPS reverse proxy and/or haproxy.
116
117 public_port: int: standard public port for given service
118
119 returns: int: the correct listening port for the API service
120 '''
121 i = 0
122 if len(peer_units()) > 0 or is_clustered():
123 i += 1
124 if https():
125 i += 1
126 return public_port - (i * 10)
127
128
129def determine_apache_port(public_port):
130 '''
131 Description: Determine correct apache listening port based on public IP +
132 state of the cluster.
133
134 public_port: int: standard public port for given service
135
136 returns: int: the correct listening port for the HAProxy service
137 '''
138 i = 0
139 if len(peer_units()) > 0 or is_clustered():
140 i += 1
141 return public_port - (i * 10)
142
143
144def get_hacluster_config():
145 '''
146 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:
148
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
150
151 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.
153 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
155 conf = {}
156 for setting in settings:
157 conf[setting] = config_get(setting)
158 missing = []
159 [missing.append(s) for s, v in conf.iteritems() if v is None]
160 if missing:
161 log('Insufficient config data to configure hacluster.', level=ERROR)
162 raise HAIncompleteConfig
163 return conf
164
165
166def canonical_url(configs, vip_setting='vip'):
167 '''
168 Returns the correct HTTP URL to this host given the state of HTTPS
169 configuration and hacluster.
170
171 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.
173 :vip_setting: str: Setting in charm config that specifies
174 VIP address.
175 '''
176 scheme = 'http'
177 if 'https' in configs.complete_contexts():
178 scheme = 'https'
179 if is_clustered():
180 addr = config_get(vip_setting)
181 else:
182 addr = unit_get('private-address')
183 return '%s://%s' % (scheme, addr)
1840
=== removed directory 'hooks/charmhelpers/contrib/jujugui'
=== removed file 'hooks/charmhelpers/contrib/jujugui/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/jujugui/utils.py'
--- hooks/charmhelpers/contrib/jujugui/utils.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/jujugui/utils.py 1970-01-01 00:00:00 +0000
@@ -1,602 +0,0 @@
1"""Juju GUI charm utilities."""
2
3__all__ = [
4 'AGENT',
5 'APACHE',
6 'API_PORT',
7 'CURRENT_DIR',
8 'HAPROXY',
9 'IMPROV',
10 'JUJU_DIR',
11 'JUJU_GUI_DIR',
12 'JUJU_GUI_SITE',
13 'JUJU_PEM',
14 'WEB_PORT',
15 'bzr_checkout',
16 'chain',
17 'cmd_log',
18 'fetch_api',
19 'fetch_gui',
20 'find_missing_packages',
21 'first_path_in_dir',
22 'get_api_address',
23 'get_npm_cache_archive_url',
24 'get_release_file_url',
25 'get_staging_dependencies',
26 'get_zookeeper_address',
27 'legacy_juju',
28 'log_hook',
29 'merge',
30 'parse_source',
31 'prime_npm_cache',
32 'render_to_file',
33 'save_or_create_certificates',
34 'setup_apache',
35 'setup_gui',
36 'start_agent',
37 'start_gui',
38 'start_improv',
39 'write_apache_config',
40]
41
42from contextlib import contextmanager
43import errno
44import json
45import os
46import logging
47import shutil
48from subprocess import CalledProcessError
49import tempfile
50from urlparse import urlparse
51
52import apt
53import tempita
54
55from launchpadlib.launchpad import Launchpad
56from shelltoolbox import (
57 Serializer,
58 apt_get_install,
59 command,
60 environ,
61 install_extra_repositories,
62 run,
63 script_name,
64 search_file,
65 su,
66)
67from charmhelpers.core.host import (
68 service_start,
69)
70from charmhelpers.core.hookenv import (
71 log,
72 config,
73 unit_get,
74)
75
76
77AGENT = 'juju-api-agent'
78APACHE = 'apache2'
79IMPROV = 'juju-api-improv'
80HAPROXY = 'haproxy'
81
82API_PORT = 8080
83WEB_PORT = 8000
84
85CURRENT_DIR = os.getcwd()
86JUJU_DIR = os.path.join(CURRENT_DIR, 'juju')
87JUJU_GUI_DIR = os.path.join(CURRENT_DIR, 'juju-gui')
88JUJU_GUI_SITE = '/etc/apache2/sites-available/juju-gui'
89JUJU_GUI_PORTS = '/etc/apache2/ports.conf'
90JUJU_PEM = 'juju.includes-private-key.pem'
91BUILD_REPOSITORIES = ('ppa:chris-lea/node.js-legacy',)
92DEB_BUILD_DEPENDENCIES = (
93 'bzr', 'imagemagick', 'make', 'nodejs', 'npm',
94)
95DEB_STAGE_DEPENDENCIES = (
96 'zookeeper',
97)
98
99
100# Store the configuration from on invocation to the next.
101config_json = Serializer('/tmp/config.json')
102# Bazaar checkout command.
103bzr_checkout = command('bzr', 'co', '--lightweight')
104# Whether or not the charm is deployed using juju-core.
105# If juju-core has been used to deploy the charm, an agent.conf file must
106# be present in the charm parent directory.
107legacy_juju = lambda: not os.path.exists(
108 os.path.join(CURRENT_DIR, '..', 'agent.conf'))
109
110
111def _get_build_dependencies():
112 """Install deb dependencies for building."""
113 log('Installing build dependencies.')
114 cmd_log(install_extra_repositories(*BUILD_REPOSITORIES))
115 cmd_log(apt_get_install(*DEB_BUILD_DEPENDENCIES))
116
117
118def get_api_address(unit_dir):
119 """Return the Juju API address stored in the uniter agent.conf file."""
120 import yaml # python-yaml is only installed if juju-core is used.
121 # XXX 2013-03-27 frankban bug=1161443:
122 # currently the uniter agent.conf file does not include the API
123 # address. For now retrieve it from the machine agent file.
124 base_dir = os.path.abspath(os.path.join(unit_dir, '..'))
125 for dirname in os.listdir(base_dir):
126 if dirname.startswith('machine-'):
127 agent_conf = os.path.join(base_dir, dirname, 'agent.conf')
128 break
129 else:
130 raise IOError('Juju agent configuration file not found.')
131 contents = yaml.load(open(agent_conf))
132 return contents['apiinfo']['addrs'][0]
133
134
135def get_staging_dependencies():
136 """Install deb dependencies for the stage (improv) environment."""
137 log('Installing stage dependencies.')
138 cmd_log(apt_get_install(*DEB_STAGE_DEPENDENCIES))
139
140
141def first_path_in_dir(directory):
142 """Return the full path of the first file/dir in *directory*."""
143 return os.path.join(directory, os.listdir(directory)[0])
144
145
146def _get_by_attr(collection, attr, value):
147 """Return the first item in collection having attr == value.
148
149 Return None if the item is not found.
150 """
151 for item in collection:
152 if getattr(item, attr) == value:
153 return item
154
155
156def get_release_file_url(project, series_name, release_version):
157 """Return the URL of the release file hosted in Launchpad.
158
159 The returned URL points to a release file for the given project, series
160 name and release version.
161 The argument *project* is a project object as returned by launchpadlib.
162 The arguments *series_name* and *release_version* are strings. If
163 *release_version* is None, the URL of the latest release will be returned.
164 """
165 series = _get_by_attr(project.series, 'name', series_name)
166 if series is None:
167 raise ValueError('%r: series not found' % series_name)
168 # Releases are returned by Launchpad in reverse date order.
169 releases = list(series.releases)
170 if not releases:
171 raise ValueError('%r: series does not contain releases' % series_name)
172 if release_version is not None:
173 release = _get_by_attr(releases, 'version', release_version)
174 if release is None:
175 raise ValueError('%r: release not found' % release_version)
176 releases = [release]
177 for release in releases:
178 for file_ in release.files:
179 if str(file_).endswith('.tgz'):
180 return file_.file_link
181 raise ValueError('%r: file not found' % release_version)
182
183
184def get_zookeeper_address(agent_file_path):
185 """Retrieve the Zookeeper address contained in the given *agent_file_path*.
186
187 The *agent_file_path* is a path to a file containing a line similar to the
188 following::
189
190 env JUJU_ZOOKEEPER="address"
191 """
192 line = search_file('JUJU_ZOOKEEPER', agent_file_path).strip()
193 return line.split('=')[1].strip('"')
194
195
196@contextmanager
197def log_hook():
198 """Log when a hook starts and stops its execution.
199
200 Also log to stdout possible CalledProcessError exceptions raised executing
201 the hook.
202 """
203 script = script_name()
204 log(">>> Entering {}".format(script))
205 try:
206 yield
207 except CalledProcessError as err:
208 log('Exception caught:')
209 log(err.output)
210 raise
211 finally:
212 log("<<< Exiting {}".format(script))
213
214
215def parse_source(source):
216 """Parse the ``juju-gui-source`` option.
217
218 Return a tuple of two elements representing info on how to deploy Juju GUI.
219 Examples:
220 - ('stable', None): latest stable release;
221 - ('stable', '0.1.0'): stable release v0.1.0;
222 - ('trunk', None): latest trunk release;
223 - ('trunk', '0.1.0+build.1'): trunk release v0.1.0 bzr revision 1;
224 - ('branch', 'lp:juju-gui'): release is made from a branch;
225 - ('url', 'http://example.com/gui'): release from a downloaded file.
226 """
227 if source.startswith('url:'):
228 source = source[4:]
229 # Support file paths, including relative paths.
230 if urlparse(source).scheme == '':
231 if not source.startswith('/'):
232 source = os.path.join(os.path.abspath(CURRENT_DIR), source)
233 source = "file://%s" % source
234 return 'url', source
235 if source in ('stable', 'trunk'):
236 return source, None
237 if source.startswith('lp:') or source.startswith('http://'):
238 return 'branch', source
239 if 'build' in source:
240 return 'trunk', source
241 return 'stable', source
242
243
244def render_to_file(template_name, context, destination):
245 """Render the given *template_name* into *destination* using *context*.
246
247 The tempita template language is used to render contents
248 (see http://pythonpaste.org/tempita/).
249 The argument *template_name* is the name or path of the template file:
250 it may be either a path relative to ``../config`` or an absolute path.
251 The argument *destination* is a file path.
252 The argument *context* is a dict-like object.
253 """
254 template_path = os.path.abspath(template_name)
255 template = tempita.Template.from_filename(template_path)
256 with open(destination, 'w') as stream:
257 stream.write(template.substitute(context))
258
259
260results_log = None
261
262
263def _setupLogging():
264 global results_log
265 if results_log is not None:
266 return
267 cfg = config()
268 logging.basicConfig(
269 filename=cfg['command-log-file'],
270 level=logging.INFO,
271 format="%(asctime)s: %(name)s@%(levelname)s %(message)s")
272 results_log = logging.getLogger('juju-gui')
273
274
275def cmd_log(results):
276 global results_log
277 if not results:
278 return
279 if results_log is None:
280 _setupLogging()
281 # Since 'results' may be multi-line output, start it on a separate line
282 # from the logger timestamp, etc.
283 results_log.info('\n' + results)
284
285
286def start_improv(staging_env, ssl_cert_path,
287 config_path='/etc/init/juju-api-improv.conf'):
288 """Start a simulated juju environment using ``improv.py``."""
289 log('Setting up staging start up script.')
290 context = {
291 'juju_dir': JUJU_DIR,
292 'keys': ssl_cert_path,
293 'port': API_PORT,
294 'staging_env': staging_env,
295 }
296 render_to_file('config/juju-api-improv.conf.template', context, config_path)
297 log('Starting the staging backend.')
298 with su('root'):
299 service_start(IMPROV)
300
301
302def start_agent(
303 ssl_cert_path, config_path='/etc/init/juju-api-agent.conf',
304 read_only=False):
305 """Start the Juju agent and connect to the current environment."""
306 # Retrieve the Zookeeper address from the start up script.
307 unit_dir = os.path.realpath(os.path.join(CURRENT_DIR, '..'))
308 agent_file = '/etc/init/juju-{0}.conf'.format(os.path.basename(unit_dir))
309 zookeeper = get_zookeeper_address(agent_file)
310 log('Setting up API agent start up script.')
311 context = {
312 'juju_dir': JUJU_DIR,
313 'keys': ssl_cert_path,
314 'port': API_PORT,
315 'zookeeper': zookeeper,
316 'read_only': read_only
317 }
318 render_to_file('config/juju-api-agent.conf.template', context, config_path)
319 log('Starting API agent.')
320 with su('root'):
321 service_start(AGENT)
322
323
324def start_gui(
325 console_enabled, login_help, readonly, in_staging, ssl_cert_path,
326 charmworld_url, serve_tests, haproxy_path='/etc/haproxy/haproxy.cfg',
327 config_js_path=None, secure=True, sandbox=False):
328 """Set up and start the Juju GUI server."""
329 with su('root'):
330 run('chown', '-R', 'ubuntu:', JUJU_GUI_DIR)
331 # XXX 2013-02-05 frankban bug=1116320:
332 # External insecure resources are still loaded when testing in the
333 # debug environment. For now, switch to the production environment if
334 # the charm is configured to serve tests.
335 if in_staging and not serve_tests:
336 build_dirname = 'build-debug'
337 else:
338 build_dirname = 'build-prod'
339 build_dir = os.path.join(JUJU_GUI_DIR, build_dirname)
340 log('Generating the Juju GUI configuration file.')
341 is_legacy_juju = legacy_juju()
342 user, password = None, None
343 if (is_legacy_juju and in_staging) or sandbox:
344 user, password = 'admin', 'admin'
345 else:
346 user, password = None, None
347
348 api_backend = 'python' if is_legacy_juju else 'go'
349 if secure:
350 protocol = 'wss'
351 else:
352 log('Running in insecure mode! Port 80 will serve unencrypted.')
353 protocol = 'ws'
354
355 context = {
356 'raw_protocol': protocol,
357 'address': unit_get('public-address'),
358 'console_enabled': json.dumps(console_enabled),
359 'login_help': json.dumps(login_help),
360 'password': json.dumps(password),
361 'api_backend': json.dumps(api_backend),
362 'readonly': json.dumps(readonly),
363 'user': json.dumps(user),
364 'protocol': json.dumps(protocol),
365 'sandbox': json.dumps(sandbox),
366 'charmworld_url': json.dumps(charmworld_url),
367 }
368 if config_js_path is None:
369 config_js_path = os.path.join(
370 build_dir, 'juju-ui', 'assets', 'config.js')
371 render_to_file('config/config.js.template', context, config_js_path)
372
373 write_apache_config(build_dir, serve_tests)
374
375 log('Generating haproxy configuration file.')
376 if is_legacy_juju:
377 # The PyJuju API agent is listening on localhost.
378 api_address = '127.0.0.1:{0}'.format(API_PORT)
379 else:
380 # Retrieve the juju-core API server address.
381 api_address = get_api_address(os.path.join(CURRENT_DIR, '..'))
382 context = {
383 'api_address': api_address,
384 'api_pem': JUJU_PEM,
385 'legacy_juju': is_legacy_juju,
386 'ssl_cert_path': ssl_cert_path,
387 # In PyJuju environments, use the same certificate for both HTTPS and
388 # WebSocket connections. In juju-core the system already has the proper
389 # certificate installed.
390 'web_pem': JUJU_PEM,
391 'web_port': WEB_PORT,
392 'secure': secure
393 }
394 render_to_file('config/haproxy.cfg.template', context, haproxy_path)
395 log('Starting Juju GUI.')
396
397
398def write_apache_config(build_dir, serve_tests=False):
399 log('Generating the apache site configuration file.')
400 context = {
401 'port': WEB_PORT,
402 'serve_tests': serve_tests,
403 'server_root': build_dir,
404 'tests_root': os.path.join(JUJU_GUI_DIR, 'test', ''),
405 }
406 render_to_file('config/apache-ports.template', context, JUJU_GUI_PORTS)
407 render_to_file('config/apache-site.template', context, JUJU_GUI_SITE)
408
409
410def get_npm_cache_archive_url(Launchpad=Launchpad):
411 """Figure out the URL of the most recent NPM cache archive on Launchpad."""
412 launchpad = Launchpad.login_anonymously('Juju GUI charm', 'production')
413 project = launchpad.projects['juju-gui']
414 # Find the URL of the most recently created NPM cache archive.
415 npm_cache_url = get_release_file_url(project, 'npm-cache', None)
416 return npm_cache_url
417
418
419def prime_npm_cache(npm_cache_url):
420 """Download NPM cache archive and prime the NPM cache with it."""
421 # Download the cache archive and then uncompress it into the NPM cache.
422 npm_cache_archive = os.path.join(CURRENT_DIR, 'npm-cache.tgz')
423 cmd_log(run('curl', '-L', '-o', npm_cache_archive, npm_cache_url))
424 npm_cache_dir = os.path.expanduser('~/.npm')
425 # The NPM cache directory probably does not exist, so make it if not.
426 try:
427 os.mkdir(npm_cache_dir)
428 except OSError, e:
429 # If the directory already exists then ignore the error.
430 if e.errno != errno.EEXIST: # File exists.
431 raise
432 uncompress = command('tar', '-x', '-z', '-C', npm_cache_dir, '-f')
433 cmd_log(uncompress(npm_cache_archive))
434
435
436def fetch_gui(juju_gui_source, logpath):
437 """Retrieve the Juju GUI release/branch."""
438 # Retrieve a Juju GUI release.
439 origin, version_or_branch = parse_source(juju_gui_source)
440 if origin == 'branch':
441 # Make sure we have the dependencies necessary for us to actually make
442 # a build.
443 _get_build_dependencies()
444 # Create a release starting from a branch.
445 juju_gui_source_dir = os.path.join(CURRENT_DIR, 'juju-gui-source')
446 log('Retrieving Juju GUI source checkout from %s.' % version_or_branch)
447 cmd_log(run('rm', '-rf', juju_gui_source_dir))
448 cmd_log(bzr_checkout(version_or_branch, juju_gui_source_dir))
449 log('Preparing a Juju GUI release.')
450 logdir = os.path.dirname(logpath)
451 fd, name = tempfile.mkstemp(prefix='make-distfile-', dir=logdir)
452 log('Output from "make distfile" sent to %s' % name)
453 with environ(NO_BZR='1'):
454 run('make', '-C', juju_gui_source_dir, 'distfile',
455 stdout=fd, stderr=fd)
456 release_tarball = first_path_in_dir(
457 os.path.join(juju_gui_source_dir, 'releases'))
458 else:
459 log('Retrieving Juju GUI release.')
460 if origin == 'url':
461 file_url = version_or_branch
462 else:
463 # Retrieve a release from Launchpad.
464 launchpad = Launchpad.login_anonymously(
465 'Juju GUI charm', 'production')
466 project = launchpad.projects['juju-gui']
467 file_url = get_release_file_url(project, origin, version_or_branch)
468 log('Downloading release file from %s.' % file_url)
469 release_tarball = os.path.join(CURRENT_DIR, 'release.tgz')
470 cmd_log(run('curl', '-L', '-o', release_tarball, file_url))
471 return release_tarball
472
473
474def fetch_api(juju_api_branch):
475 """Retrieve the Juju branch."""
476 # Retrieve Juju API source checkout.
477 log('Retrieving Juju API source checkout.')
478 cmd_log(run('rm', '-rf', JUJU_DIR))
479 cmd_log(bzr_checkout(juju_api_branch, JUJU_DIR))
480
481
482def setup_gui(release_tarball):
483 """Set up Juju GUI."""
484 # Uncompress the release tarball.
485 log('Installing Juju GUI.')
486 release_dir = os.path.join(CURRENT_DIR, 'release')
487 cmd_log(run('rm', '-rf', release_dir))
488 os.mkdir(release_dir)
489 uncompress = command('tar', '-x', '-z', '-C', release_dir, '-f')
490 cmd_log(uncompress(release_tarball))
491 # Link the Juju GUI dir to the contents of the release tarball.
492 cmd_log(run('ln', '-sf', first_path_in_dir(release_dir), JUJU_GUI_DIR))
493
494
495def setup_apache():
496 """Set up apache."""
497 log('Setting up apache.')
498 if not os.path.exists(JUJU_GUI_SITE):
499 cmd_log(run('touch', JUJU_GUI_SITE))
500 cmd_log(run('chown', 'ubuntu:', JUJU_GUI_SITE))
501 cmd_log(
502 run('ln', '-s', JUJU_GUI_SITE,
503 '/etc/apache2/sites-enabled/juju-gui'))
504
505 if not os.path.exists(JUJU_GUI_PORTS):
506 cmd_log(run('touch', JUJU_GUI_PORTS))
507 cmd_log(run('chown', 'ubuntu:', JUJU_GUI_PORTS))
508
509 with su('root'):
510 run('a2dissite', 'default')
511 run('a2ensite', 'juju-gui')
512
513
514def save_or_create_certificates(
515 ssl_cert_path, ssl_cert_contents, ssl_key_contents):
516 """Generate the SSL certificates.
517
518 If both *ssl_cert_contents* and *ssl_key_contents* are provided, use them
519 as certificates; otherwise, generate them.
520
521 Also create a pem file, suitable for use in the haproxy configuration,
522 concatenating the key and the certificate files.
523 """
524 crt_path = os.path.join(ssl_cert_path, 'juju.crt')
525 key_path = os.path.join(ssl_cert_path, 'juju.key')
526 if not os.path.exists(ssl_cert_path):
527 os.makedirs(ssl_cert_path)
528 if ssl_cert_contents and ssl_key_contents:
529 # Save the provided certificates.
530 with open(crt_path, 'w') as cert_file:
531 cert_file.write(ssl_cert_contents)
532 with open(key_path, 'w') as key_file:
533 key_file.write(ssl_key_contents)
534 else:
535 # Generate certificates.
536 # See http://superuser.com/questions/226192/openssl-without-prompt
537 cmd_log(run(
538 'openssl', 'req', '-new', '-newkey', 'rsa:4096',
539 '-days', '365', '-nodes', '-x509', '-subj',
540 # These are arbitrary test values for the certificate.
541 '/C=GB/ST=Juju/L=GUI/O=Ubuntu/CN=juju.ubuntu.com',
542 '-keyout', key_path, '-out', crt_path))
543 # Generate the pem file.
544 pem_path = os.path.join(ssl_cert_path, JUJU_PEM)
545 if os.path.exists(pem_path):
546 os.remove(pem_path)
547 with open(pem_path, 'w') as pem_file:
548 shutil.copyfileobj(open(key_path), pem_file)
549 shutil.copyfileobj(open(crt_path), pem_file)
550
551
552def find_missing_packages(*packages):
553 """Given a list of packages, return the packages which are not installed.
554 """
555 cache = apt.Cache()
556 missing = set()
557 for pkg_name in packages:
558 try:
559 pkg = cache[pkg_name]
560 except KeyError:
561 missing.add(pkg_name)
562 continue
563 if pkg.is_installed:
564 continue
565 missing.add(pkg_name)
566 return missing
567
568
569## Backend support decorators
570
571def chain(name):
572 """Helper method to compose a set of mixin objects into a callable.
573
574 Each method is called in the context of its mixin instance, and its
575 argument is the Backend instance.
576 """
577 # Chain method calls through all implementing mixins.
578 def method(self):
579 for mixin in self.mixins:
580 a_callable = getattr(type(mixin), name, None)
581 if a_callable:
582 a_callable(mixin, self)
583
584 method.__name__ = name
585 return method
586
587
588def merge(name):
589 """Helper to merge a property from a set of strategy objects
590 into a unified set.
591 """
592 # Return merged property from every providing mixin as a set.
593 @property
594 def method(self):
595 result = set()
596 for mixin in self.mixins:
597 segment = getattr(type(mixin), name, None)
598 if segment and isinstance(segment, (list, tuple, set)):
599 result |= set(segment)
600
601 return result
602 return method
6030
=== removed directory 'hooks/charmhelpers/contrib/network'
=== removed file 'hooks/charmhelpers/contrib/network/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
@@ -1,69 +0,0 @@
1import sys
2
3from charmhelpers.fetch import apt_install
4from charmhelpers.core.hookenv import (
5 ERROR, log,
6)
7
8try:
9 import netifaces
10except ImportError:
11 apt_install('python-netifaces')
12 import netifaces
13
14try:
15 import netaddr
16except ImportError:
17 apt_install('python-netaddr')
18 import netaddr
19
20
21def _validate_cidr(network):
22 try:
23 netaddr.IPNetwork(network)
24 except (netaddr.core.AddrFormatError, ValueError):
25 raise ValueError("Network (%s) is not in CIDR presentation format" %
26 network)
27
28
29def get_address_in_network(network, fallback=None, fatal=False):
30 """
31 Get an IPv4 address within the network from the host.
32
33 Args:
34 network (str): CIDR presentation format. For example,
35 '192.168.1.0/24'.
36 fallback (str): If no address is found, return fallback.
37 fatal (boolean): If no address is found, fallback is not
38 set and fatal is True then exit(1).
39 """
40
41 def not_found_error_out():
42 log("No IP address found in network: %s" % network,
43 level=ERROR)
44 sys.exit(1)
45
46 if network is None:
47 if fallback is not None:
48 return fallback
49 else:
50 if fatal:
51 not_found_error_out()
52
53 _validate_cidr(network)
54 for iface in netifaces.interfaces():
55 addresses = netifaces.ifaddresses(iface)
56 if netifaces.AF_INET in addresses:
57 addr = addresses[netifaces.AF_INET][0]['addr']
58 netmask = addresses[netifaces.AF_INET][0]['netmask']
59 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
60 if cidr in netaddr.IPNetwork(network):
61 return str(cidr.ip)
62
63 if fallback is not None:
64 return fallback
65
66 if fatal:
67 not_found_error_out()
68
69 return None
700
=== removed directory 'hooks/charmhelpers/contrib/network/ovs'
=== removed file 'hooks/charmhelpers/contrib/network/ovs/__init__.py'
--- hooks/charmhelpers/contrib/network/ovs/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/network/ovs/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,75 +0,0 @@
1''' Helpers for interacting with OpenvSwitch '''
2import subprocess
3import os
4from charmhelpers.core.hookenv import (
5 log, WARNING
6)
7from charmhelpers.core.host import (
8 service
9)
10
11
12def add_bridge(name):
13 ''' Add the named bridge to openvswitch '''
14 log('Creating bridge {}'.format(name))
15 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
16
17
18def del_bridge(name):
19 ''' Delete the named bridge from openvswitch '''
20 log('Deleting bridge {}'.format(name))
21 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
22
23
24def add_bridge_port(name, port):
25 ''' Add a port to the named openvswitch bridge '''
26 log('Adding port {} to bridge {}'.format(port, name))
27 subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
28 name, port])
29 subprocess.check_call(["ip", "link", "set", port, "up"])
30
31
32def del_bridge_port(name, port):
33 ''' Delete a port from the named openvswitch bridge '''
34 log('Deleting port {} from bridge {}'.format(port, name))
35 subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
36 name, port])
37 subprocess.check_call(["ip", "link", "set", port, "down"])
38
39
40def set_manager(manager):
41 ''' Set the controller for the local openvswitch '''
42 log('Setting manager for local ovs to {}'.format(manager))
43 subprocess.check_call(['ovs-vsctl', 'set-manager',
44 'ssl:{}'.format(manager)])
45
46
47CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
48
49
50def get_certificate():
51 ''' Read openvswitch certificate from disk '''
52 if os.path.exists(CERT_PATH):
53 log('Reading ovs certificate from {}'.format(CERT_PATH))
54 with open(CERT_PATH, 'r') as cert:
55 full_cert = cert.read()
56 begin_marker = "-----BEGIN CERTIFICATE-----"
57 end_marker = "-----END CERTIFICATE-----"
58 begin_index = full_cert.find(begin_marker)
59 end_index = full_cert.rfind(end_marker)
60 if end_index == -1 or begin_index == -1:
61 raise RuntimeError("Certificate does not contain valid begin"
62 " and end markers.")
63 full_cert = full_cert[begin_index:(end_index + len(end_marker))]
64 return full_cert
65 else:
66 log('Certificate not found', level=WARNING)
67 return None
68
69
70def full_restart():
71 ''' Full restart and reload of openvswitch '''
72 if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
73 service('start', 'openvswitch-force-reload-kmod')
74 else:
75 service('force-reload-kmod', 'openvswitch-switch')
760
=== removed directory 'hooks/charmhelpers/contrib/openstack'
=== removed file 'hooks/charmhelpers/contrib/openstack/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/openstack/alternatives.py'
--- hooks/charmhelpers/contrib/openstack/alternatives.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
@@ -1,17 +0,0 @@
1''' Helper for managing alternatives for file conflict resolution '''
2
3import subprocess
4import shutil
5import os
6
7
8def install_alternative(name, target, source, priority=50):
9 ''' Install alternative configuration '''
10 if (os.path.exists(target) and not os.path.islink(target)):
11 # Move existing file/directory away before installing
12 shutil.move(target, '{}.bak'.format(target))
13 cmd = [
14 'update-alternatives', '--force', '--install',
15 target, name, source, str(priority)
16 ]
17 subprocess.check_call(cmd)
180
=== removed file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
@@ -1,700 +0,0 @@
1import json
2import os
3import time
4
5from base64 import b64decode
6
7from subprocess import (
8 check_call
9)
10
11
12from charmhelpers.fetch import (
13 apt_install,
14 filter_installed_packages,
15)
16
17from charmhelpers.core.hookenv import (
18 config,
19 local_unit,
20 log,
21 relation_get,
22 relation_ids,
23 related_units,
24 unit_get,
25 unit_private_ip,
26 ERROR,
27)
28
29from charmhelpers.contrib.hahelpers.cluster import (
30 determine_apache_port,
31 determine_api_port,
32 https,
33 is_clustered
34)
35
36from charmhelpers.contrib.hahelpers.apache import (
37 get_cert,
38 get_ca_cert,
39)
40
41from charmhelpers.contrib.openstack.neutron import (
42 neutron_plugin_attribute,
43)
44
45CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
46
47
48class OSContextError(Exception):
49 pass
50
51
52def ensure_packages(packages):
53 '''Install but do not upgrade required plugin packages'''
54 required = filter_installed_packages(packages)
55 if required:
56 apt_install(required, fatal=True)
57
58
59def context_complete(ctxt):
60 _missing = []
61 for k, v in ctxt.iteritems():
62 if v is None or v == '':
63 _missing.append(k)
64 if _missing:
65 log('Missing required data: %s' % ' '.join(_missing), level='INFO')
66 return False
67 return True
68
69
70def config_flags_parser(config_flags):
71 if config_flags.find('==') >= 0:
72 log("config_flags is not in expected format (key=value)",
73 level=ERROR)
74 raise OSContextError
75 # strip the following from each value.
76 post_strippers = ' ,'
77 # we strip any leading/trailing '=' or ' ' from the string then
78 # split on '='.
79 split = config_flags.strip(' =').split('=')
80 limit = len(split)
81 flags = {}
82 for i in xrange(0, limit - 1):
83 current = split[i]
84 next = split[i + 1]
85 vindex = next.rfind(',')
86 if (i == limit - 2) or (vindex < 0):
87 value = next
88 else:
89 value = next[:vindex]
90
91 if i == 0:
92 key = current
93 else:
94 # if this not the first entry, expect an embedded key.
95 index = current.rfind(',')
96 if index < 0:
97 log("invalid config value(s) at index %s" % (i),
98 level=ERROR)
99 raise OSContextError
100 key = current[index + 1:]
101
102 # Add to collection.
103 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
104 return flags
105
106
107class OSContextGenerator(object):
108 interfaces = []
109
110 def __call__(self):
111 raise NotImplementedError
112
113
114class SharedDBContext(OSContextGenerator):
115 interfaces = ['shared-db']
116
117 def __init__(self,
118 database=None, user=None, relation_prefix=None, ssl_dir=None):
119 '''
120 Allows inspecting relation for settings prefixed with relation_prefix.
121 This is useful for parsing access for multiple databases returned via
122 the shared-db interface (eg, nova_password, quantum_password)
123 '''
124 self.relation_prefix = relation_prefix
125 self.database = database
126 self.user = user
127 self.ssl_dir = ssl_dir
128
129 def __call__(self):
130 self.database = self.database or config('database')
131 self.user = self.user or config('database-user')
132 if None in [self.database, self.user]:
133 log('Could not generate shared_db context. '
134 'Missing required charm config options. '
135 '(database name and user)')
136 raise OSContextError
137 ctxt = {}
138
139 password_setting = 'password'
140 if self.relation_prefix:
141 password_setting = self.relation_prefix + '_password'
142
143 for rid in relation_ids('shared-db'):
144 for unit in related_units(rid):
145 rdata = relation_get(rid=rid, unit=unit)
146 ctxt = {
147 'database_host': rdata.get('db_host'),
148 'database': self.database,
149 'database_user': self.user,
150 'database_password': rdata.get(password_setting),
151 'database_type': 'mysql'
152 }
153 if context_complete(ctxt):
154 db_ssl(rdata, ctxt, self.ssl_dir)
155 return ctxt
156 return {}
157
158
159class PostgresqlDBContext(OSContextGenerator):
160 interfaces = ['pgsql-db']
161
162 def __init__(self, database=None):
163 self.database = database
164
165 def __call__(self):
166 self.database = self.database or config('database')
167 if self.database is None:
168 log('Could not generate postgresql_db context. '
169 'Missing required charm config options. '
170 '(database name)')
171 raise OSContextError
172 ctxt = {}
173
174 for rid in relation_ids(self.interfaces[0]):
175 for unit in related_units(rid):
176 ctxt = {
177 'database_host': relation_get('host', rid=rid, unit=unit),
178 'database': self.database,
179 'database_user': relation_get('user', rid=rid, unit=unit),
180 'database_password': relation_get('password', rid=rid, unit=unit),
181 'database_type': 'postgresql',
182 }
183 if context_complete(ctxt):
184 return ctxt
185 return {}
186
187
188def db_ssl(rdata, ctxt, ssl_dir):
189 if 'ssl_ca' in rdata and ssl_dir:
190 ca_path = os.path.join(ssl_dir, 'db-client.ca')
191 with open(ca_path, 'w') as fh:
192 fh.write(b64decode(rdata['ssl_ca']))
193 ctxt['database_ssl_ca'] = ca_path
194 elif 'ssl_ca' in rdata:
195 log("Charm not setup for ssl support but ssl ca found")
196 return ctxt
197 if 'ssl_cert' in rdata:
198 cert_path = os.path.join(
199 ssl_dir, 'db-client.cert')
200 if not os.path.exists(cert_path):
201 log("Waiting 1m for ssl client cert validity")
202 time.sleep(60)
203 with open(cert_path, 'w') as fh:
204 fh.write(b64decode(rdata['ssl_cert']))
205 ctxt['database_ssl_cert'] = cert_path
206 key_path = os.path.join(ssl_dir, 'db-client.key')
207 with open(key_path, 'w') as fh:
208 fh.write(b64decode(rdata['ssl_key']))
209 ctxt['database_ssl_key'] = key_path
210 return ctxt
211
212
213class IdentityServiceContext(OSContextGenerator):
214 interfaces = ['identity-service']
215
216 def __call__(self):
217 log('Generating template context for identity-service')
218 ctxt = {}
219
220 for rid in relation_ids('identity-service'):
221 for unit in related_units(rid):
222 rdata = relation_get(rid=rid, unit=unit)
223 ctxt = {
224 'service_port': rdata.get('service_port'),
225 'service_host': rdata.get('service_host'),
226 'auth_host': rdata.get('auth_host'),
227 'auth_port': rdata.get('auth_port'),
228 'admin_tenant_name': rdata.get('service_tenant'),
229 'admin_user': rdata.get('service_username'),
230 'admin_password': rdata.get('service_password'),
231 'service_protocol':
232 rdata.get('service_protocol') or 'http',
233 'auth_protocol':
234 rdata.get('auth_protocol') or 'http',
235 }
236 if context_complete(ctxt):
237 # NOTE(jamespage) this is required for >= icehouse
238 # so a missing value just indicates keystone needs
239 # upgrading
240 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
241 return ctxt
242 return {}
243
244
245class AMQPContext(OSContextGenerator):
246 interfaces = ['amqp']
247
248 def __init__(self, ssl_dir=None):
249 self.ssl_dir = ssl_dir
250
251 def __call__(self):
252 log('Generating template context for amqp')
253 conf = config()
254 try:
255 username = conf['rabbit-user']
256 vhost = conf['rabbit-vhost']
257 except KeyError as e:
258 log('Could not generate shared_db context. '
259 'Missing required charm config options: %s.' % e)
260 raise OSContextError
261 ctxt = {}
262 for rid in relation_ids('amqp'):
263 ha_vip_only = False
264 for unit in related_units(rid):
265 if relation_get('clustered', rid=rid, unit=unit):
266 ctxt['clustered'] = True
267 ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
268 unit=unit)
269 else:
270 ctxt['rabbitmq_host'] = relation_get('private-address',
271 rid=rid, unit=unit)
272 ctxt.update({
273 'rabbitmq_user': username,
274 'rabbitmq_password': relation_get('password', rid=rid,
275 unit=unit),
276 'rabbitmq_virtual_host': vhost,
277 })
278
279 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
280 if ssl_port:
281 ctxt['rabbit_ssl_port'] = ssl_port
282 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
283 if ssl_ca:
284 ctxt['rabbit_ssl_ca'] = ssl_ca
285
286 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
287 ctxt['rabbitmq_ha_queues'] = True
288
289 ha_vip_only = relation_get('ha-vip-only',
290 rid=rid, unit=unit) is not None
291
292 if context_complete(ctxt):
293 if 'rabbit_ssl_ca' in ctxt:
294 if not self.ssl_dir:
295 log(("Charm not setup for ssl support "
296 "but ssl ca found"))
297 break
298 ca_path = os.path.join(
299 self.ssl_dir, 'rabbit-client-ca.pem')
300 with open(ca_path, 'w') as fh:
301 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
302 ctxt['rabbit_ssl_ca'] = ca_path
303 # Sufficient information found = break out!
304 break
305 # Used for active/active rabbitmq >= grizzly
306 if ('clustered' not in ctxt or ha_vip_only) \
307 and len(related_units(rid)) > 1:
308 rabbitmq_hosts = []
309 for unit in related_units(rid):
310 rabbitmq_hosts.append(relation_get('private-address',
311 rid=rid, unit=unit))
312 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
313 if not context_complete(ctxt):
314 return {}
315 else:
316 return ctxt
317
318
319class CephContext(OSContextGenerator):
320 interfaces = ['ceph']
321
322 def __call__(self):
323 '''This generates context for /etc/ceph/ceph.conf templates'''
324 if not relation_ids('ceph'):
325 return {}
326
327 log('Generating template context for ceph')
328
329 mon_hosts = []
330 auth = None
331 key = None
332 use_syslog = str(config('use-syslog')).lower()
333 for rid in relation_ids('ceph'):
334 for unit in related_units(rid):
335 mon_hosts.append(relation_get('private-address', rid=rid,
336 unit=unit))
337 auth = relation_get('auth', rid=rid, unit=unit)
338 key = relation_get('key', rid=rid, unit=unit)
339
340 ctxt = {
341 'mon_hosts': ' '.join(mon_hosts),
342 'auth': auth,
343 'key': key,
344 'use_syslog': use_syslog
345 }
346
347 if not os.path.isdir('/etc/ceph'):
348 os.mkdir('/etc/ceph')
349
350 if not context_complete(ctxt):
351 return {}
352
353 ensure_packages(['ceph-common'])
354
355 return ctxt
356
357
358class HAProxyContext(OSContextGenerator):
359 interfaces = ['cluster']
360
361 def __call__(self):
362 '''
363 Builds half a context for the haproxy template, which describes
364 all peers to be included in the cluster. Each charm needs to include
365 its own context generator that describes the port mapping.
366 '''
367 if not relation_ids('cluster'):
368 return {}
369
370 cluster_hosts = {}
371 l_unit = local_unit().replace('/', '-')
372 cluster_hosts[l_unit] = unit_get('private-address')
373
374 for rid in relation_ids('cluster'):
375 for unit in related_units(rid):
376 _unit = unit.replace('/', '-')
377 addr = relation_get('private-address', rid=rid, unit=unit)
378 cluster_hosts[_unit] = addr
379
380 ctxt = {
381 'units': cluster_hosts,
382 }
383 if len(cluster_hosts.keys()) > 1:
384 # Enable haproxy when we have enough peers.
385 log('Ensuring haproxy enabled in /etc/default/haproxy.')
386 with open('/etc/default/haproxy', 'w') as out:
387 out.write('ENABLED=1\n')
388 return ctxt
389 log('HAProxy context is incomplete, this unit has no peers.')
390 return {}
391
392
393class ImageServiceContext(OSContextGenerator):
394 interfaces = ['image-service']
395
396 def __call__(self):
397 '''
398 Obtains the glance API server from the image-service relation. Useful
399 in nova and cinder (currently).
400 '''
401 log('Generating template context for image-service.')
402 rids = relation_ids('image-service')
403 if not rids:
404 return {}
405 for rid in rids:
406 for unit in related_units(rid):
407 api_server = relation_get('glance-api-server',
408 rid=rid, unit=unit)
409 if api_server:
410 return {'glance_api_servers': api_server}
411 log('ImageService context is incomplete. '
412 'Missing required relation data.')
413 return {}
414
415
416class ApacheSSLContext(OSContextGenerator):
417
418 """
419 Generates a context for an apache vhost configuration that configures
420 HTTPS reverse proxying for one or many endpoints. Generated context
421 looks something like:
422 {
423 'namespace': 'cinder',
424 'private_address': 'iscsi.mycinderhost.com',
425 'endpoints': [(8776, 8766), (8777, 8767)]
426 }
427
428 The endpoints list consists of a tuples mapping external ports
429 to internal ports.
430 """
431 interfaces = ['https']
432
433 # charms should inherit this context and set external ports
434 # and service namespace accordingly.
435 external_ports = []
436 service_namespace = None
437
438 def enable_modules(self):
439 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
440 check_call(cmd)
441
442 def configure_cert(self):
443 if not os.path.isdir('/etc/apache2/ssl'):
444 os.mkdir('/etc/apache2/ssl')
445 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
446 if not os.path.isdir(ssl_dir):
447 os.mkdir(ssl_dir)
448 cert, key = get_cert()
449 with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
450 cert_out.write(b64decode(cert))
451 with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
452 key_out.write(b64decode(key))
453 ca_cert = get_ca_cert()
454 if ca_cert:
455 with open(CA_CERT_PATH, 'w') as ca_out:
456 ca_out.write(b64decode(ca_cert))
457 check_call(['update-ca-certificates'])
458
459 def __call__(self):
460 if isinstance(self.external_ports, basestring):
461 self.external_ports = [self.external_ports]
462 if (not self.external_ports or not https()):
463 return {}
464
465 self.configure_cert()
466 self.enable_modules()
467
468 ctxt = {
469 'namespace': self.service_namespace,
470 'private_address': unit_get('private-address'),
471 'endpoints': []
472 }
473 if is_clustered():
474 ctxt['private_address'] = config('vip')
475 for api_port in self.external_ports:
476 ext_port = determine_apache_port(api_port)
477 int_port = determine_api_port(api_port)
478 portmap = (int(ext_port), int(int_port))
479 ctxt['endpoints'].append(portmap)
480 return ctxt
481
482
483class NeutronContext(OSContextGenerator):
484 interfaces = []
485
486 @property
487 def plugin(self):
488 return None
489
490 @property
491 def network_manager(self):
492 return None
493
494 @property
495 def packages(self):
496 return neutron_plugin_attribute(
497 self.plugin, 'packages', self.network_manager)
498
499 @property
500 def neutron_security_groups(self):
501 return None
502
503 def _ensure_packages(self):
504 [ensure_packages(pkgs) for pkgs in self.packages]
505
506 def _save_flag_file(self):
507 if self.network_manager == 'quantum':
508 _file = '/etc/nova/quantum_plugin.conf'
509 else:
510 _file = '/etc/nova/neutron_plugin.conf'
511 with open(_file, 'wb') as out:
512 out.write(self.plugin + '\n')
513
514 def ovs_ctxt(self):
515 driver = neutron_plugin_attribute(self.plugin, 'driver',
516 self.network_manager)
517 config = neutron_plugin_attribute(self.plugin, 'config',
518 self.network_manager)
519 ovs_ctxt = {
520 'core_plugin': driver,
521 'neutron_plugin': 'ovs',
522 'neutron_security_groups': self.neutron_security_groups,
523 'local_ip': unit_private_ip(),
524 'config': config
525 }
526
527 return ovs_ctxt
528
529 def nvp_ctxt(self):
530 driver = neutron_plugin_attribute(self.plugin, 'driver',
531 self.network_manager)
532 config = neutron_plugin_attribute(self.plugin, 'config',
533 self.network_manager)
534 nvp_ctxt = {
535 'core_plugin': driver,
536 'neutron_plugin': 'nvp',
537 'neutron_security_groups': self.neutron_security_groups,
538 'local_ip': unit_private_ip(),
539 'config': config
540 }
541
542 return nvp_ctxt
543
544 def neutron_ctxt(self):
545 if https():
546 proto = 'https'
547 else:
548 proto = 'http'
549 if is_clustered():
550 host = config('vip')
551 else:
552 host = unit_get('private-address')
553 url = '%s://%s:%s' % (proto, host, '9696')
554 ctxt = {
555 'network_manager': self.network_manager,
556 'neutron_url': url,
557 }
558 return ctxt
559
560 def __call__(self):
561 self._ensure_packages()
562
563 if self.network_manager not in ['quantum', 'neutron']:
564 return {}
565
566 if not self.plugin:
567 return {}
568
569 ctxt = self.neutron_ctxt()
570
571 if self.plugin == 'ovs':
572 ctxt.update(self.ovs_ctxt())
573 elif self.plugin == 'nvp':
574 ctxt.update(self.nvp_ctxt())
575
576 alchemy_flags = config('neutron-alchemy-flags')
577 if alchemy_flags:
578 flags = config_flags_parser(alchemy_flags)
579 ctxt['neutron_alchemy_flags'] = flags
580
581 self._save_flag_file()
582 return ctxt
583
584
585class OSConfigFlagContext(OSContextGenerator):
586
587 """
588 Responsible for adding user-defined config-flags in charm config to a
589 template context.
590
591 NOTE: the value of config-flags may be a comma-separated list of
592 key=value pairs and some Openstack config files support
593 comma-separated lists as values.
594 """
595
596 def __call__(self):
597 config_flags = config('config-flags')
598 if not config_flags:
599 return {}
600
601 flags = config_flags_parser(config_flags)
602 return {'user_config_flags': flags}
603
604
605class SubordinateConfigContext(OSContextGenerator):
606
607 """
608 Responsible for inspecting relations to subordinates that
609 may be exporting required config via a json blob.
610
611 The subordinate interface allows subordinates to export their
612 configuration requirements to the principle for multiple config
613 files and multiple serivces. Ie, a subordinate that has interfaces
614 to both glance and nova may export to following yaml blob as json:
615
616 glance:
617 /etc/glance/glance-api.conf:
618 sections:
619 DEFAULT:
620 - [key1, value1]
621 /etc/glance/glance-registry.conf:
622 MYSECTION:
623 - [key2, value2]
624 nova:
625 /etc/nova/nova.conf:
626 sections:
627 DEFAULT:
628 - [key3, value3]
629
630
631 It is then up to the principle charms to subscribe this context to
632 the service+config file it is interestd in. Configuration data will
633 be available in the template context, in glance's case, as:
634 ctxt = {
635 ... other context ...
636 'subordinate_config': {
637 'DEFAULT': {
638 'key1': 'value1',
639 },
640 'MYSECTION': {
641 'key2': 'value2',
642 },
643 }
644 }
645
646 """
647
648 def __init__(self, service, config_file, interface):
649 """
650 :param service : Service name key to query in any subordinate
651 data found
652 :param config_file : Service's config file to query sections
653 :param interface : Subordinate interface to inspect
654 """
655 self.service = service
656 self.config_file = config_file
657 self.interface = interface
658
659 def __call__(self):
660 ctxt = {}
661 for rid in relation_ids(self.interface):
662 for unit in related_units(rid):
663 sub_config = relation_get('subordinate_configuration',
664 rid=rid, unit=unit)
665 if sub_config and sub_config != '':
666 try:
667 sub_config = json.loads(sub_config)
668 except:
669 log('Could not parse JSON from subordinate_config '
670 'setting from %s' % rid, level=ERROR)
671 continue
672
673 if self.service not in sub_config:
674 log('Found subordinate_config on %s but it contained'
675 'nothing for %s service' % (rid, self.service))
676 continue
677
678 sub_config = sub_config[self.service]
679 if self.config_file not in sub_config:
680 log('Found subordinate_config on %s but it contained'
681 'nothing for %s' % (rid, self.config_file))
682 continue
683
684 sub_config = sub_config[self.config_file]
685 for k, v in sub_config.iteritems():
686 ctxt[k] = v
687
688 if not ctxt:
689 ctxt['sections'] = {}
690
691 return ctxt
692
693
694class SyslogContext(OSContextGenerator):
695
696 def __call__(self):
697 ctxt = {
698 'use_syslog': config('use-syslog')
699 }
700 return ctxt
7010
=== removed file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
@@ -1,171 +0,0 @@
1# Various utilies for dealing with Neutron and the renaming from Quantum.
2
3from subprocess import check_output
4
5from charmhelpers.core.hookenv import (
6 config,
7 log,
8 ERROR,
9)
10
11from charmhelpers.contrib.openstack.utils import os_release
12
13
14def headers_package():
15 """Ensures correct linux-headers for running kernel are installed,
16 for building DKMS package"""
17 kver = check_output(['uname', '-r']).strip()
18 return 'linux-headers-%s' % kver
19
20QUANTUM_CONF_DIR = '/etc/quantum'
21
22
23def kernel_version():
24 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
25 kver = check_output(['uname', '-r']).strip()
26 kver = kver.split('.')
27 return (int(kver[0]), int(kver[1]))
28
29
30def determine_dkms_package():
31 """ Determine which DKMS package should be used based on kernel version """
32 # NOTE: 3.13 kernels have support for GRE and VXLAN native
33 if kernel_version() >= (3, 13):
34 return []
35 else:
36 return ['openvswitch-datapath-dkms']
37
38
39# legacy
40
41
42def quantum_plugins():
43 from charmhelpers.contrib.openstack import context
44 return {
45 'ovs': {
46 'config': '/etc/quantum/plugins/openvswitch/'
47 'ovs_quantum_plugin.ini',
48 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
49 'OVSQuantumPluginV2',
50 'contexts': [
51 context.SharedDBContext(user=config('neutron-database-user'),
52 database=config('neutron-database'),
53 relation_prefix='neutron',
54 ssl_dir=QUANTUM_CONF_DIR)],
55 'services': ['quantum-plugin-openvswitch-agent'],
56 'packages': [[headers_package()] + determine_dkms_package(),
57 ['quantum-plugin-openvswitch-agent']],
58 'server_packages': ['quantum-server',
59 'quantum-plugin-openvswitch'],
60 'server_services': ['quantum-server']
61 },
62 'nvp': {
63 'config': '/etc/quantum/plugins/nicira/nvp.ini',
64 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
65 'QuantumPlugin.NvpPluginV2',
66 'contexts': [
67 context.SharedDBContext(user=config('neutron-database-user'),
68 database=config('neutron-database'),
69 relation_prefix='neutron',
70 ssl_dir=QUANTUM_CONF_DIR)],
71 'services': [],
72 'packages': [],
73 'server_packages': ['quantum-server',
74 'quantum-plugin-nicira'],
75 'server_services': ['quantum-server']
76 }
77 }
78
79NEUTRON_CONF_DIR = '/etc/neutron'
80
81
82def neutron_plugins():
83 from charmhelpers.contrib.openstack import context
84 release = os_release('nova-common')
85 plugins = {
86 'ovs': {
87 'config': '/etc/neutron/plugins/openvswitch/'
88 'ovs_neutron_plugin.ini',
89 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
90 'OVSNeutronPluginV2',
91 'contexts': [
92 context.SharedDBContext(user=config('neutron-database-user'),
93 database=config('neutron-database'),
94 relation_prefix='neutron',
95 ssl_dir=NEUTRON_CONF_DIR)],
96 'services': ['neutron-plugin-openvswitch-agent'],
97 'packages': [[headers_package()] + determine_dkms_package(),
98 ['neutron-plugin-openvswitch-agent']],
99 'server_packages': ['neutron-server',
100 'neutron-plugin-openvswitch'],
101 'server_services': ['neutron-server']
102 },
103 'nvp': {
104 'config': '/etc/neutron/plugins/nicira/nvp.ini',
105 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
106 'NeutronPlugin.NvpPluginV2',
107 'contexts': [
108 context.SharedDBContext(user=config('neutron-database-user'),
109 database=config('neutron-database'),
110 relation_prefix='neutron',
111 ssl_dir=NEUTRON_CONF_DIR)],
112 'services': [],
113 'packages': [],
114 'server_packages': ['neutron-server',
115 'neutron-plugin-nicira'],
116 'server_services': ['neutron-server']
117 }
118 }
119 # NOTE: patch in ml2 plugin for icehouse onwards
120 if release >= 'icehouse':
121 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
122 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
123 plugins['ovs']['server_packages'] = ['neutron-server',
124 'neutron-plugin-ml2']
125 return plugins
126
127
128def neutron_plugin_attribute(plugin, attr, net_manager=None):
129 manager = net_manager or network_manager()
130 if manager == 'quantum':
131 plugins = quantum_plugins()
132 elif manager == 'neutron':
133 plugins = neutron_plugins()
134 else:
135 log('Error: Network manager does not support plugins.')
136 raise Exception
137
138 try:
139 _plugin = plugins[plugin]
140 except KeyError:
141 log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
142 raise Exception
143
144 try:
145 return _plugin[attr]
146 except KeyError:
147 return None
148
149
150def network_manager():
151 '''
152 Deals with the renaming of Quantum to Neutron in H and any situations
153 that require compatability (eg, deploying H with network-manager=quantum,
154 upgrading from G).
155 '''
156 release = os_release('nova-common')
157 manager = config('network-manager').lower()
158
159 if manager not in ['quantum', 'neutron']:
160 return manager
161
162 if release in ['essex']:
163 # E does not support neutron
164 log('Neutron networking not supported in Essex.', level=ERROR)
165 raise Exception
166 elif release in ['folsom', 'grizzly']:
167 # neutron is named quantum in F and G
168 return 'quantum'
169 else:
170 # ensure accurate naming for all releases post-H
171 return 'neutron'
1720
=== removed directory 'hooks/charmhelpers/contrib/openstack/templates'
=== removed file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,2 +0,0 @@
1# dummy __init__.py to fool syncer into thinking this is a syncable python
2# module
30
=== removed file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
@@ -1,280 +0,0 @@
1import os
2
3from charmhelpers.fetch import apt_install
4
5from charmhelpers.core.hookenv import (
6 log,
7 ERROR,
8 INFO
9)
10
11from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
12
13try:
14 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
15except ImportError:
16 # python-jinja2 may not be installed yet, or we're running unittests.
17 FileSystemLoader = ChoiceLoader = Environment = exceptions = None
18
19
20class OSConfigException(Exception):
21 pass
22
23
24def get_loader(templates_dir, os_release):
25 """
26 Create a jinja2.ChoiceLoader containing template dirs up to
27 and including os_release. If directory template directory
28 is missing at templates_dir, it will be omitted from the loader.
29 templates_dir is added to the bottom of the search list as a base
30 loading dir.
31
32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:
34 hooks/charmhelpers/contrib/openstack/templates.
35
36 :param templates_dir: str: Base template directory containing release
37 sub-directories.
38 :param os_release : str: OpenStack release codename to construct template
39 loader.
40
41 :returns : jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.
44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]
47
48 if not os.path.isdir(templates_dir):
49 log('Templates directory not found @ %s.' % templates_dir,
50 level=ERROR)
51 raise OSConfigException
52
53 # the bottom contains tempaltes_dir and possibly a common templates dir
54 # shipped with the helper.
55 loaders = [FileSystemLoader(templates_dir)]
56 helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
57 if os.path.isdir(helper_templates):
58 loaders.append(FileSystemLoader(helper_templates))
59
60 for rel, tmpl_dir in tmpl_dirs:
61 if os.path.isdir(tmpl_dir):
62 loaders.insert(0, FileSystemLoader(tmpl_dir))
63 if rel == os_release:
64 break
65 log('Creating choice loader with dirs: %s' %
66 [l.searchpath for l in loaders], level=INFO)
67 return ChoiceLoader(loaders)
68
69
70class OSConfigTemplate(object):
71 """
72 Associates a config file template with a list of context generators.
73 Responsible for constructing a template context based on those generators.
74 """
75 def __init__(self, config_file, contexts):
76 self.config_file = config_file
77
78 if hasattr(contexts, '__call__'):
79 self.contexts = [contexts]
80 else:
81 self.contexts = contexts
82
83 self._complete_contexts = []
84
85 def context(self):
86 ctxt = {}
87 for context in self.contexts:
88 _ctxt = context()
89 if _ctxt:
90 ctxt.update(_ctxt)
91 # track interfaces for every complete context.
92 [self._complete_contexts.append(interface)
93 for interface in context.interfaces
94 if interface not in self._complete_contexts]
95 return ctxt
96
97 def complete_contexts(self):
98 '''
99 Return a list of interfaces that have atisfied contexts.
100 '''
101 if self._complete_contexts:
102 return self._complete_contexts
103 self.context()
104 return self._complete_contexts
105
106
107class OSConfigRenderer(object):
108 """
109 This class provides a common templating system to be used by OpenStack
110 charms. It is intended to help charms share common code and templates,
111 and ease the burden of managing config templates across multiple OpenStack
112 releases.
113
114 Basic usage:
115 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context
117
118 # Create a renderer object for a specific OS release.
119 configs = OSConfigRenderer(templates_dir='/tmp/templates',
120 openstack_release='folsom')
121 # register some config files with context generators.
122 configs.register(config_file='/etc/nova/nova.conf',
123 contexts=[context.SharedDBContext(),
124 context.AMQPContext()])
125 configs.register(config_file='/etc/nova/api-paste.ini',
126 contexts=[context.IdentityServiceContext()])
127 configs.register(config_file='/etc/haproxy/haproxy.conf',
128 contexts=[context.HAProxyContext()])
129 # write out a single config
130 configs.write('/etc/nova/nova.conf')
131 # write out all registered configs
132 configs.write_all()
133
134 Details:
135
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.
140
141 The constructed loader attempts to load the template from several places
142 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.
146
147
148 For the example above, '/tmp/templates' contains the following structure:
149 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini
152 /tmp/templates/havana/api-paste.ini
153
154 Since it was registered with the grizzly release, it first seraches
155 the grizzly directory for nova.conf, then the templates dir.
156
157 When writing api-paste.ini, it will find the template in the grizzly
158 directory.
159
160 If the object were created with folsom, it would fall back to the
161 base templates dir for its api-paste.ini template.
162
163 This system should help manage changes in config files through
164 openstack releases, allowing charms to fall back to the most recently
165 updated config template for a given release
166
167 The haproxy.conf, since it is not shipped in the templates dir, will
168 be loaded from the module directory's template directory, eg
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.
171
172 Context generators
173 ---------------------------------------
174 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list
177 of generators. When a template is rendered and written, all context
178 generates are called in a chain to generate the context dictionary
179 passed to the jinja2 template. See context.py for more info.
180 """
181 def __init__(self, templates_dir, openstack_release):
182 if not os.path.isdir(templates_dir):
183 log('Could not locate templates dir %s' % templates_dir,
184 level=ERROR)
185 raise OSConfigException
186
187 self.templates_dir = templates_dir
188 self.openstack_release = openstack_release
189 self.templates = {}
190 self._tmpl_env = None
191
192 if None in [Environment, ChoiceLoader, FileSystemLoader]:
193 # if this code is running, the object is created pre-install hook.
194 # jinja2 shouldn't get touched until the module is reloaded on next
195 # hook execution, with proper jinja2 bits successfully imported.
196 apt_install('python-jinja2')
197
198 def register(self, config_file, contexts):
199 """
200 Register a config file with a list of context generators to be called
201 during rendering.
202 """
203 self.templates[config_file] = OSConfigTemplate(config_file=config_file,
204 contexts=contexts)
205 log('Registered config file: %s' % config_file, level=INFO)
206
207 def _get_tmpl_env(self):
208 if not self._tmpl_env:
209 loader = get_loader(self.templates_dir, self.openstack_release)
210 self._tmpl_env = Environment(loader=loader)
211
212 def _get_template(self, template):
213 self._get_tmpl_env()
214 template = self._tmpl_env.get_template(template)
215 log('Loaded template from %s' % template.filename, level=INFO)
216 return template
217
218 def render(self, config_file):
219 if config_file not in self.templates:
220 log('Config not registered: %s' % config_file, level=ERROR)
221 raise OSConfigException
222 ctxt = self.templates[config_file].context()
223
224 _tmpl = os.path.basename(config_file)
225 try:
226 template = self._get_template(_tmpl)
227 except exceptions.TemplateNotFound:
228 # if no template is found with basename, try looking for it
229 # using a munged full path, eg:
230 # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
231 _tmpl = '_'.join(config_file.split('/')[1:])
232 try:
233 template = self._get_template(_tmpl)
234 except exceptions.TemplateNotFound as e:
235 log('Could not load template from %s by %s or %s.' %
236 (self.templates_dir, os.path.basename(config_file), _tmpl),
237 level=ERROR)
238 raise e
239
240 log('Rendering from template: %s' % _tmpl, level=INFO)
241 return template.render(ctxt)
242
243 def write(self, config_file):
244 """
245 Write a single config file, raises if config file is not registered.
246 """
247 if config_file not in self.templates:
248 log('Config not registered: %s' % config_file, level=ERROR)
249 raise OSConfigException
250
251 _out = self.render(config_file)
252
253 with open(config_file, 'wb') as out:
254 out.write(_out)
255
256 log('Wrote template %s.' % config_file, level=INFO)
257
258 def write_all(self):
259 """
260 Write out all registered config files.
261 """
262 [self.write(k) for k in self.templates.iterkeys()]
263
264 def set_release(self, openstack_release):
265 """
266 Resets the template environment and generates a new template loader
267 based on a the new openstack release.
268 """
269 self._tmpl_env = None
270 self.openstack_release = openstack_release
271 self._get_tmpl_env()
272
273 def complete_contexts(self):
274 '''
275 Returns a list of context interfaces that yield a complete context.
276 '''
277 interfaces = []
278 [interfaces.extend(i.complete_contexts())
279 for i in self.templates.itervalues()]
280 return interfaces
2810
=== removed file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
@@ -1,450 +0,0 @@
1#!/usr/bin/python
2
3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict
5
6import apt_pkg as apt
7import subprocess
8import os
9import socket
10import sys
11
12from charmhelpers.core.hookenv import (
13 config,
14 log as juju_log,
15 charm_dir,
16 ERROR,
17 INFO
18)
19
20from charmhelpers.contrib.storage.linux.lvm import (
21 deactivate_lvm_volume_group,
22 is_lvm_physical_volume,
23 remove_lvm_physical_volume,
24)
25
26from charmhelpers.core.host import lsb_release, mounts, umount
27from charmhelpers.fetch import apt_install
28from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
29from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
30
31CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
32CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
33
34DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
35 'restricted main multiverse universe')
36
37
38UBUNTU_OPENSTACK_RELEASE = OrderedDict([
39 ('oneiric', 'diablo'),
40 ('precise', 'essex'),
41 ('quantal', 'folsom'),
42 ('raring', 'grizzly'),
43 ('saucy', 'havana'),
44 ('trusty', 'icehouse')
45])
46
47
48OPENSTACK_CODENAMES = OrderedDict([
49 ('2011.2', 'diablo'),
50 ('2012.1', 'essex'),
51 ('2012.2', 'folsom'),
52 ('2013.1', 'grizzly'),
53 ('2013.2', 'havana'),
54 ('2014.1', 'icehouse'),
55])
56
57# The ugly duckling
58SWIFT_CODENAMES = OrderedDict([
59 ('1.4.3', 'diablo'),
60 ('1.4.8', 'essex'),
61 ('1.7.4', 'folsom'),
62 ('1.8.0', 'grizzly'),
63 ('1.7.7', 'grizzly'),
64 ('1.7.6', 'grizzly'),
65 ('1.10.0', 'havana'),
66 ('1.9.1', 'havana'),
67 ('1.9.0', 'havana'),
68 ('1.13.1', 'icehouse'),
69 ('1.13.0', 'icehouse'),
70 ('1.12.0', 'icehouse'),
71 ('1.11.0', 'icehouse'),
72])
73
74DEFAULT_LOOPBACK_SIZE = '5G'
75
76
77def error_out(msg):
78 juju_log("FATAL ERROR: %s" % msg, level='ERROR')
79 sys.exit(1)
80
81
82def get_os_codename_install_source(src):
83 '''Derive OpenStack release codename from a given installation source.'''
84 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
85 rel = ''
86 if src in ['distro', 'distro-proposed']:
87 try:
88 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
89 except KeyError:
90 e = 'Could not derive openstack release for '\
91 'this Ubuntu release: %s' % ubuntu_rel
92 error_out(e)
93 return rel
94
95 if src.startswith('cloud:'):
96 ca_rel = src.split(':')[1]
97 ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
98 return ca_rel
99
100 # Best guess match based on deb string provided
101 if src.startswith('deb') or src.startswith('ppa'):
102 for k, v in OPENSTACK_CODENAMES.iteritems():
103 if v in src:
104 return v
105
106
107def get_os_version_install_source(src):
108 codename = get_os_codename_install_source(src)
109 return get_os_version_codename(codename)
110
111
112def get_os_codename_version(vers):
113 '''Determine OpenStack codename from version number.'''
114 try:
115 return OPENSTACK_CODENAMES[vers]
116 except KeyError:
117 e = 'Could not determine OpenStack codename for version %s' % vers
118 error_out(e)
119
120
121def get_os_version_codename(codename):
122 '''Determine OpenStack version number from codename.'''
123 for k, v in OPENSTACK_CODENAMES.iteritems():
124 if v == codename:
125 return k
126 e = 'Could not derive OpenStack version for '\
127 'codename: %s' % codename
128 error_out(e)
129
130
131def get_os_codename_package(package, fatal=True):
132 '''Derive OpenStack release codename from an installed package.'''
133 apt.init()
134 cache = apt.Cache()
135
136 try:
137 pkg = cache[package]
138 except:
139 if not fatal:
140 return None
141 # the package is unknown to the current apt cache.
142 e = 'Could not determine version of package with no installation '\
143 'candidate: %s' % package
144 error_out(e)
145
146 if not pkg.current_ver:
147 if not fatal:
148 return None
149 # package is known, but no version is currently installed.
150 e = 'Could not determine version of uninstalled package: %s' % package
151 error_out(e)
152
153 vers = apt.upstream_version(pkg.current_ver.ver_str)
154
155 try:
156 if 'swift' in pkg.name:
157 swift_vers = vers[:5]
158 if swift_vers not in SWIFT_CODENAMES:
159 # Deal with 1.10.0 upward
160 swift_vers = vers[:6]
161 return SWIFT_CODENAMES[swift_vers]
162 else:
163 vers = vers[:6]
164 return OPENSTACK_CODENAMES[vers]
165 except KeyError:
166 e = 'Could not determine OpenStack codename for version %s' % vers
167 error_out(e)
168
169
170def get_os_version_package(pkg, fatal=True):
171 '''Derive OpenStack version number from an installed package.'''
172 codename = get_os_codename_package(pkg, fatal=fatal)
173
174 if not codename:
175 return None
176
177 if 'swift' in pkg:
178 vers_map = SWIFT_CODENAMES
179 else:
180 vers_map = OPENSTACK_CODENAMES
181
182 for version, cname in vers_map.iteritems():
183 if cname == codename:
184 return version
185 #e = "Could not determine OpenStack version for package: %s" % pkg
186 #error_out(e)
187
188
189os_rel = None
190
191
192def os_release(package, base='essex'):
193 '''
194 Returns OpenStack release codename from a cached global.
195 If the codename can not be determined from either an installed package or
196 the installation source, the earliest release supported by the charm should
197 be returned.
198 '''
199 global os_rel
200 if os_rel:
201 return os_rel
202 os_rel = (get_os_codename_package(package, fatal=False) or
203 get_os_codename_install_source(config('openstack-origin')) or
204 base)
205 return os_rel
206
207
208def import_key(keyid):
209 cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
210 "--recv-keys %s" % keyid
211 try:
212 subprocess.check_call(cmd.split(' '))
213 except subprocess.CalledProcessError:
214 error_out("Error importing repo key %s" % keyid)
215
216
217def configure_installation_source(rel):
218 '''Configure apt installation source.'''
219 if rel == 'distro':
220 return
221 elif rel == 'distro-proposed':
222 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
223 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
224 f.write(DISTRO_PROPOSED % ubuntu_rel)
225 elif rel[:4] == "ppa:":
226 src = rel
227 subprocess.check_call(["add-apt-repository", "-y", src])
228 elif rel[:3] == "deb":
229 l = len(rel.split('|'))
230 if l == 2:
231 src, key = rel.split('|')
232 juju_log("Importing PPA key from keyserver for %s" % src)
233 import_key(key)
234 elif l == 1:
235 src = rel
236 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
237 f.write(src)
238 elif rel[:6] == 'cloud:':
239 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
240 rel = rel.split(':')[1]
241 u_rel = rel.split('-')[0]
242 ca_rel = rel.split('-')[1]
243
244 if u_rel != ubuntu_rel:
245 e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
246 'version (%s)' % (ca_rel, ubuntu_rel)
247 error_out(e)
248
249 if 'staging' in ca_rel:
250 # staging is just a regular PPA.
251 os_rel = ca_rel.split('/')[0]
252 ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
253 cmd = 'add-apt-repository -y %s' % ppa
254 subprocess.check_call(cmd.split(' '))
255 return
256
257 # map charm config options to actual archive pockets.
258 pockets = {
259 'folsom': 'precise-updates/folsom',
260 'folsom/updates': 'precise-updates/folsom',
261 'folsom/proposed': 'precise-proposed/folsom',
262 'grizzly': 'precise-updates/grizzly',
263 'grizzly/updates': 'precise-updates/grizzly',
264 'grizzly/proposed': 'precise-proposed/grizzly',
265 'havana': 'precise-updates/havana',
266 'havana/updates': 'precise-updates/havana',
267 'havana/proposed': 'precise-proposed/havana',
268 'icehouse': 'precise-updates/icehouse',
269 'icehouse/updates': 'precise-updates/icehouse',
270 'icehouse/proposed': 'precise-proposed/icehouse',
271 }
272
273 try:
274 pocket = pockets[ca_rel]
275 except KeyError:
276 e = 'Invalid Cloud Archive release specified: %s' % rel
277 error_out(e)
278
279 src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
280 apt_install('ubuntu-cloud-keyring', fatal=True)
281
282 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
283 f.write(src)
284 else:
285 error_out("Invalid openstack-release specified: %s" % rel)
286
287
288def save_script_rc(script_path="scripts/scriptrc", **env_vars):
289 """
290 Write an rc file in the charm-delivered directory containing
291 exported environment variables provided by env_vars. Any charm scripts run
292 outside the juju hook environment can source this scriptrc to obtain
293 updated config information necessary to perform health checks or
294 service changes.
295 """
296 juju_rc_path = "%s/%s" % (charm_dir(), script_path)
297 if not os.path.exists(os.path.dirname(juju_rc_path)):
298 os.mkdir(os.path.dirname(juju_rc_path))
299 with open(juju_rc_path, 'wb') as rc_script:
300 rc_script.write(
301 "#!/bin/bash\n")
302 [rc_script.write('export %s=%s\n' % (u, p))
303 for u, p in env_vars.iteritems() if u != "script_path"]
304
305
306def openstack_upgrade_available(package):
307 """
308 Determines if an OpenStack upgrade is available from installation
309 source, based on version of installed package.
310
311 :param package: str: Name of installed package.
312
313 :returns: bool: : Returns True if configured installation source offers
314 a newer version of package.
315
316 """
317
318 src = config('openstack-origin')
319 cur_vers = get_os_version_package(package)
320 available_vers = get_os_version_install_source(src)
321 apt.init()
322 return apt.version_compare(available_vers, cur_vers) == 1
323
324
325def ensure_block_device(block_device):
326 '''
327 Confirm block_device, create as loopback if necessary.
328
329 :param block_device: str: Full path of block device to ensure.
330
331 :returns: str: Full path of ensured block device.
332 '''
333 _none = ['None', 'none', None]
334 if (block_device in _none):
335 error_out('prepare_storage(): Missing required input: '
336 'block_device=%s.' % block_device, level=ERROR)
337
338 if block_device.startswith('/dev/'):
339 bdev = block_device
340 elif block_device.startswith('/'):
341 _bd = block_device.split('|')
342 if len(_bd) == 2:
343 bdev, size = _bd
344 else:
345 bdev = block_device
346 size = DEFAULT_LOOPBACK_SIZE
347 bdev = ensure_loopback_device(bdev, size)
348 else:
349 bdev = '/dev/%s' % block_device
350
351 if not is_block_device(bdev):
352 error_out('Failed to locate valid block device at %s' % bdev,
353 level=ERROR)
354
355 return bdev
356
357
358def clean_storage(block_device):
359 '''
360 Ensures a block device is clean. That is:
361 - unmounted
362 - any lvm volume groups are deactivated
363 - any lvm physical device signatures removed
364 - partition table wiped
365
366 :param block_device: str: Full path to block device to clean.
367 '''
368 for mp, d in mounts():
369 if d == block_device:
370 juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
371 (d, mp), level=INFO)
372 umount(mp, persist=True)
373
374 if is_lvm_physical_volume(block_device):
375 deactivate_lvm_volume_group(block_device)
376 remove_lvm_physical_volume(block_device)
377 else:
378 zap_disk(block_device)
379
380
381def is_ip(address):
382 """
383 Returns True if address is a valid IP address.
384 """
385 try:
386 # Test to see if already an IPv4 address
387 socket.inet_aton(address)
388 return True
389 except socket.error:
390 return False
391
392
393def ns_query(address):
394 try:
395 import dns.resolver
396 except ImportError:
397 apt_install('python-dnspython')
398 import dns.resolver
399
400 if isinstance(address, dns.name.Name):
401 rtype = 'PTR'
402 elif isinstance(address, basestring):
403 rtype = 'A'
404 else:
405 return None
406
407 answers = dns.resolver.query(address, rtype)
408 if answers:
409 return str(answers[0])
410 return None
411
412
413def get_host_ip(hostname):
414 """
415 Resolves the IP for a given hostname, or returns
416 the input if it is already an IP.
417 """
418 if is_ip(hostname):
419 return hostname
420
421 return ns_query(hostname)
422
423
424def get_hostname(address, fqdn=True):
425 """
426 Resolves hostname for given IP, or returns the input
427 if it is already a hostname.
428 """
429 if is_ip(address):
430 try:
431 import dns.reversename
432 except ImportError:
433 apt_install('python-dnspython')
434 import dns.reversename
435
436 rev = dns.reversename.from_address(address)
437 result = ns_query(rev)
438 if not result:
439 return None
440 else:
441 result = address
442
443 if fqdn:
444 # strip trailing .
445 if result.endswith('.'):
446 return result[:-1]
447 else:
448 return result
449 else:
450 return result.split('.')[0]
4510
=== removed directory 'hooks/charmhelpers/contrib/peerstorage'
=== removed file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
--- hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,83 +0,0 @@
1from charmhelpers.core.hookenv import (
2 relation_ids,
3 relation_get,
4 local_unit,
5 relation_set,
6)
7
8"""
9This helper provides functions to support use of a peer relation
10for basic key/value storage, with the added benefit that all storage
11can be replicated across peer units, so this is really useful for
12services that issue usernames/passwords to remote services.
13
14def shared_db_changed()
15 # Only the lead unit should create passwords
16 if not is_leader():
17 return
18 username = relation_get('username')
19 key = '{}.password'.format(username)
20 # Attempt to retrieve any existing password for this user
21 password = peer_retrieve(key)
22 if password is None:
23 # New user, create password and store
24 password = pwgen(length=64)
25 peer_store(key, password)
26 create_access(username, password)
27 relation_set(password=password)
28
29
30def cluster_changed()
31 # Echo any relation data other that *-address
32 # back onto the peer relation so all units have
33 # all *.password keys stored on their local relation
34 # for later retrieval.
35 peer_echo()
36
37"""
38
39
40def peer_retrieve(key, relation_name='cluster'):
41 """ Retrieve a named key from peer relation relation_name """
42 cluster_rels = relation_ids(relation_name)
43 if len(cluster_rels) > 0:
44 cluster_rid = cluster_rels[0]
45 return relation_get(attribute=key, rid=cluster_rid,
46 unit=local_unit())
47 else:
48 raise ValueError('Unable to detect'
49 'peer relation {}'.format(relation_name))
50
51
52def peer_store(key, value, relation_name='cluster'):
53 """ Store the key/value pair on the named peer relation relation_name """
54 cluster_rels = relation_ids(relation_name)
55 if len(cluster_rels) > 0:
56 cluster_rid = cluster_rels[0]
57 relation_set(relation_id=cluster_rid,
58 relation_settings={key: value})
59 else:
60 raise ValueError('Unable to detect '
61 'peer relation {}'.format(relation_name))
62
63
64def peer_echo(includes=None):
65 """Echo filtered attributes back onto the same relation for storage
66
67 Note that this helper must only be called within a peer relation
68 changed hook
69 """
70 rdata = relation_get()
71 echo_data = {}
72 if includes is None:
73 echo_data = rdata.copy()
74 for ex in ['private-address', 'public-address']:
75 if ex in echo_data:
76 echo_data.pop(ex)
77 else:
78 for attribute, value in rdata.iteritems():
79 for include in includes:
80 if include in attribute:
81 echo_data[attribute] = value
82 if len(echo_data) > 0:
83 relation_set(relation_settings=echo_data)
840
=== removed directory 'hooks/charmhelpers/contrib/python'
=== removed file 'hooks/charmhelpers/contrib/python/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/python/packages.py'
--- hooks/charmhelpers/contrib/python/packages.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
@@ -1,76 +0,0 @@
1#!/usr/bin/env python
2# coding: utf-8
3
4__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
5
6from charmhelpers.fetch import apt_install
7from charmhelpers.core.hookenv import log
8
9try:
10 from pip import main as pip_execute
11except ImportError:
12 apt_install('python-pip')
13 from pip import main as pip_execute
14
15
16def parse_options(given, available):
17 """Given a set of options, check if available"""
18 for key, value in given.items():
19 if key in available:
20 yield "--{0}={1}".format(key, value)
21
22
23def pip_install_requirements(requirements, **options):
24 """Install a requirements file """
25 command = ["install"]
26
27 available_options = ('proxy', 'src', 'log', )
28 for option in parse_options(options, available_options):
29 command.append(option)
30
31 command.append("-r {0}".format(requirements))
32 log("Installing from file: {} with options: {}".format(requirements,
33 command))
34 pip_execute(command)
35
36
37def pip_install(package, fatal=False, **options):
38 """Install a python package"""
39 command = ["install"]
40
41 available_options = ('proxy', 'src', 'log', "index-url", )
42 for option in parse_options(options, available_options):
43 command.append(option)
44
45 if isinstance(package, list):
46 command.extend(package)
47 else:
48 command.append(package)
49
50 log("Installing {} package with options: {}".format(package,
51 command))
52 pip_execute(command)
53
54
55def pip_uninstall(package, **options):
56 """Uninstall a python package"""
57 command = ["uninstall", "-q", "-y"]
58
59 available_options = ('proxy', 'log', )
60 for option in parse_options(options, available_options):
61 command.append(option)
62
63 if isinstance(package, list):
64 command.extend(package)
65 else:
66 command.append(package)
67
68 log("Uninstalling {} package with options: {}".format(package,
69 command))
70 pip_execute(command)
71
72
73def pip_list():
74 """Returns the list of current python installed packages
75 """
76 return pip_execute(["list"])
770
=== removed file 'hooks/charmhelpers/contrib/python/version.py'
--- hooks/charmhelpers/contrib/python/version.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/python/version.py 1970-01-01 00:00:00 +0000
@@ -1,18 +0,0 @@
1#!/usr/bin/env python
2# coding: utf-8
3
4__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
5
6import sys
7
8
9def current_version():
10 """Current system python version"""
11 return sys.version_info
12
13
14def current_version_string():
15 """Current system python version as string major.minor.micro"""
16 return "{0}.{1}.{2}".format(sys.version_info.major,
17 sys.version_info.minor,
18 sys.version_info.micro)
190
=== removed directory 'hooks/charmhelpers/contrib/saltstack'
=== removed file 'hooks/charmhelpers/contrib/saltstack/__init__.py'
--- hooks/charmhelpers/contrib/saltstack/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/saltstack/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,102 +0,0 @@
1"""Charm Helpers saltstack - declare the state of your machines.
2
3This helper enables you to declare your machine state, rather than
4program it procedurally (and have to test each change to your procedures).
5Your install hook can be as simple as:
6
7{{{
8from charmhelpers.contrib.saltstack import (
9 install_salt_support,
10 update_machine_state,
11)
12
13
14def install():
15 install_salt_support()
16 update_machine_state('machine_states/dependencies.yaml')
17 update_machine_state('machine_states/installed.yaml')
18}}}
19
20and won't need to change (nor will its tests) when you change the machine
21state.
22
23It's using a python package called salt-minion which allows various formats for
24specifying resources, such as:
25
26{{{
27/srv/{{ basedir }}:
28 file.directory:
29 - group: ubunet
30 - user: ubunet
31 - require:
32 - user: ubunet
33 - recurse:
34 - user
35 - group
36
37ubunet:
38 group.present:
39 - gid: 1500
40 user.present:
41 - uid: 1500
42 - gid: 1500
43 - createhome: False
44 - require:
45 - group: ubunet
46}}}
47
48The docs for all the different state definitions are at:
49 http://docs.saltstack.com/ref/states/all/
50
51
52TODO:
53 * Add test helpers which will ensure that machine state definitions
54 are functionally (but not necessarily logically) correct (ie. getting
55 salt to parse all state defs.
56 * Add a link to a public bootstrap charm example / blogpost.
57 * Find a way to obviate the need to use the grains['charm_dir'] syntax
58 in templates.
59"""
60# Copyright 2013 Canonical Ltd.
61#
62# Authors:
63# Charm Helpers Developers <juju@lists.ubuntu.com>
64import subprocess
65
66import charmhelpers.contrib.templating.contexts
67import charmhelpers.core.host
68import charmhelpers.core.hookenv
69
70
71salt_grains_path = '/etc/salt/grains'
72
73
74def install_salt_support(from_ppa=True):
75 """Installs the salt-minion helper for machine state.
76
77 By default the salt-minion package is installed from
78 the saltstack PPA. If from_ppa is False you must ensure
79 that the salt-minion package is available in the apt cache.
80 """
81 if from_ppa:
82 subprocess.check_call([
83 '/usr/bin/add-apt-repository',
84 '--yes',
85 'ppa:saltstack/salt',
86 ])
87 subprocess.check_call(['/usr/bin/apt-get', 'update'])
88 # We install salt-common as salt-minion would run the salt-minion
89 # daemon.
90 charmhelpers.fetch.apt_install('salt-common')
91
92
93def update_machine_state(state_path):
94 """Update the machine state using the provided state declaration."""
95 charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
96 salt_grains_path)
97 subprocess.check_call([
98 'salt-call',
99 '--local',
100 'state.template',
101 state_path,
102 ])
1030
=== removed directory 'hooks/charmhelpers/contrib/ssl'
=== removed file 'hooks/charmhelpers/contrib/ssl/__init__.py'
--- hooks/charmhelpers/contrib/ssl/__init__.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/ssl/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,78 +0,0 @@
1import subprocess
2from charmhelpers.core import hookenv
3
4
5def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
6 """Generate selfsigned SSL keypair
7
8 You must provide one of the 3 optional arguments:
9 config, subject or cn
10 If more than one is provided the leftmost will be used
11
12 Arguments:
13 keyfile -- (required) full path to the keyfile to be created
14 certfile -- (required) full path to the certfile to be created
15 keysize -- (optional) SSL key length
16 config -- (optional) openssl configuration file
17 subject -- (optional) dictionary with SSL subject variables
18 cn -- (optional) cerfificate common name
19
20 Required keys in subject dict:
21 cn -- Common name (eq. FQDN)
22
23 Optional keys in subject dict
24 country -- Country Name (2 letter code)
25 state -- State or Province Name (full name)
26 locality -- Locality Name (eg, city)
27 organization -- Organization Name (eg, company)
28 organizational_unit -- Organizational Unit Name (eg, section)
29 email -- Email Address
30 """
31
32 cmd = []
33 if config:
34 cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
35 "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
36 "-keyout", keyfile,
37 "-out", certfile, "-config", config]
38 elif subject:
39 ssl_subject = ""
40 if "country" in subject:
41 ssl_subject = ssl_subject + "/C={}".format(subject["country"])
42 if "state" in subject:
43 ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
44 if "locality" in subject:
45 ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
46 if "organization" in subject:
47 ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
48 if "organizational_unit" in subject:
49 ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
50 if "cn" in subject:
51 ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
52 else:
53 hookenv.log("When using \"subject\" argument you must "
54 "provide \"cn\" field at very least")
55 return False
56 if "email" in subject:
57 ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
58
59 cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
60 "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
61 "-keyout", keyfile,
62 "-out", certfile, "-subj", ssl_subject]
63 elif cn:
64 cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
65 "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
66 "-keyout", keyfile,
67 "-out", certfile, "-subj", "/CN={}".format(cn)]
68
69 if not cmd:
70 hookenv.log("No config, subject or cn provided,"
71 "unable to generate self signed SSL certificates")
72 return False
73 try:
74 subprocess.check_call(cmd)
75 return True
76 except Exception as e:
77 print "Execution of openssl command failed:\n{}".format(e)
78 return False
790
=== removed file 'hooks/charmhelpers/contrib/ssl/service.py'
--- hooks/charmhelpers/contrib/ssl/service.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/ssl/service.py 1970-01-01 00:00:00 +0000
@@ -1,267 +0,0 @@
1import logging
2import os
3from os.path import join as path_join
4from os.path import exists
5import subprocess
6
7
8log = logging.getLogger("service_ca")
9
10logging.basicConfig(level=logging.DEBUG)
11
12STD_CERT = "standard"
13
14# Mysql server is fairly picky about cert creation
15# and types, spec its creation separately for now.
16MYSQL_CERT = "mysql"
17
18
19class ServiceCA(object):
20
21 default_expiry = str(365 * 2)
22 default_ca_expiry = str(365 * 6)
23
24 def __init__(self, name, ca_dir, cert_type=STD_CERT):
25 self.name = name
26 self.ca_dir = ca_dir
27 self.cert_type = cert_type
28
29 ###############
30 # Hook Helper API
31 @staticmethod
32 def get_ca(type=STD_CERT):
33 service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
34 ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
35 ca = ServiceCA(service_name, ca_path, type)
36 ca.init()
37 return ca
38
39 @classmethod
40 def get_service_cert(cls, type=STD_CERT):
41 service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
42 ca = cls.get_ca()
43 crt, key = ca.get_or_create_cert(service_name)
44 return crt, key, ca.get_ca_bundle()
45
46 ###############
47
48 def init(self):
49 log.debug("initializing service ca")
50 if not exists(self.ca_dir):
51 self._init_ca_dir(self.ca_dir)
52 self._init_ca()
53
54 @property
55 def ca_key(self):
56 return path_join(self.ca_dir, 'private', 'cacert.key')
57
58 @property
59 def ca_cert(self):
60 return path_join(self.ca_dir, 'cacert.pem')
61
62 @property
63 def ca_conf(self):
64 return path_join(self.ca_dir, 'ca.cnf')
65
66 @property
67 def signing_conf(self):
68 return path_join(self.ca_dir, 'signing.cnf')
69
70 def _init_ca_dir(self, ca_dir):
71 os.mkdir(ca_dir)
72 for i in ['certs', 'crl', 'newcerts', 'private']:
73 sd = path_join(ca_dir, i)
74 if not exists(sd):
75 os.mkdir(sd)
76
77 if not exists(path_join(ca_dir, 'serial')):
78 with open(path_join(ca_dir, 'serial'), 'wb') as fh:
79 fh.write('02\n')
80
81 if not exists(path_join(ca_dir, 'index.txt')):
82 with open(path_join(ca_dir, 'index.txt'), 'wb') as fh:
83 fh.write('')
84
85 def _init_ca(self):
86 """Generate the root ca's cert and key.
87 """
88 if not exists(path_join(self.ca_dir, 'ca.cnf')):
89 with open(path_join(self.ca_dir, 'ca.cnf'), 'wb') as fh:
90 fh.write(
91 CA_CONF_TEMPLATE % (self.get_conf_variables()))
92
93 if not exists(path_join(self.ca_dir, 'signing.cnf')):
94 with open(path_join(self.ca_dir, 'signing.cnf'), 'wb') as fh:
95 fh.write(
96 SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
97
98 if exists(self.ca_cert) or exists(self.ca_key):
99 raise RuntimeError("Initialized called when CA already exists")
100 cmd = ['openssl', 'req', '-config', self.ca_conf,
101 '-x509', '-nodes', '-newkey', 'rsa',
102 '-days', self.default_ca_expiry,
103 '-keyout', self.ca_key, '-out', self.ca_cert,
104 '-outform', 'PEM']
105 output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
106 log.debug("CA Init:\n %s", output)
107
108 def get_conf_variables(self):
109 return dict(
110 org_name="juju",
111 org_unit_name="%s service" % self.name,
112 common_name=self.name,
113 ca_dir=self.ca_dir)
114
115 def get_or_create_cert(self, common_name):
116 if common_name in self:
117 return self.get_certificate(common_name)
118 return self.create_certificate(common_name)
119
120 def create_certificate(self, common_name):
121 if common_name in self:
122 return self.get_certificate(common_name)
123 key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
124 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
125 csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
126 self._create_certificate(common_name, key_p, csr_p, crt_p)
127 return self.get_certificate(common_name)
128
129 def get_certificate(self, common_name):
130 if not common_name in self:
131 raise ValueError("No certificate for %s" % common_name)
132 key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
133 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
134 with open(crt_p) as fh:
135 crt = fh.read()
136 with open(key_p) as fh:
137 key = fh.read()
138 return crt, key
139
140 def __contains__(self, common_name):
141 crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
142 return exists(crt_p)
143
144 def _create_certificate(self, common_name, key_p, csr_p, crt_p):
145 template_vars = self.get_conf_variables()
146 template_vars['common_name'] = common_name
147 subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
148 template_vars)
149
150 log.debug("CA Create Cert %s", common_name)
151 cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
152 '-nodes', '-days', self.default_expiry,
153 '-keyout', key_p, '-out', csr_p, '-subj', subj]
154 subprocess.check_call(cmd)
155 cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
156 subprocess.check_call(cmd)
157
158 log.debug("CA Sign Cert %s", common_name)
159 if self.cert_type == MYSQL_CERT:
160 cmd = ['openssl', 'x509', '-req',
161 '-in', csr_p, '-days', self.default_expiry,
162 '-CA', self.ca_cert, '-CAkey', self.ca_key,
163 '-set_serial', '01', '-out', crt_p]
164 else:
165 cmd = ['openssl', 'ca', '-config', self.signing_conf,
166 '-extensions', 'req_extensions',
167 '-days', self.default_expiry, '-notext',
168 '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
169 log.debug("running %s", " ".join(cmd))
170 subprocess.check_call(cmd)
171
172 def get_ca_bundle(self):
173 with open(self.ca_cert) as fh:
174 return fh.read()
175
176
177CA_CONF_TEMPLATE = """
178[ ca ]
179default_ca = CA_default
180
181[ CA_default ]
182dir = %(ca_dir)s
183policy = policy_match
184database = $dir/index.txt
185serial = $dir/serial
186certs = $dir/certs
187crl_dir = $dir/crl
188new_certs_dir = $dir/newcerts
189certificate = $dir/cacert.pem
190private_key = $dir/private/cacert.key
191RANDFILE = $dir/private/.rand
192default_md = default
193
194[ req ]
195default_bits = 1024
196default_md = sha1
197
198prompt = no
199distinguished_name = ca_distinguished_name
200
201x509_extensions = ca_extensions
202
203[ ca_distinguished_name ]
204organizationName = %(org_name)s
205organizationalUnitName = %(org_unit_name)s Certificate Authority
206
207
208[ policy_match ]
209countryName = optional
210stateOrProvinceName = optional
211organizationName = match
212organizationalUnitName = optional
213commonName = supplied
214
215[ ca_extensions ]
216basicConstraints = critical,CA:true
217subjectKeyIdentifier = hash
218authorityKeyIdentifier = keyid:always, issuer
219keyUsage = cRLSign, keyCertSign
220"""
221
222
223SIGNING_CONF_TEMPLATE = """
224[ ca ]
225default_ca = CA_default
226
227[ CA_default ]
228dir = %(ca_dir)s
229policy = policy_match
230database = $dir/index.txt
231serial = $dir/serial
232certs = $dir/certs
233crl_dir = $dir/crl
234new_certs_dir = $dir/newcerts
235certificate = $dir/cacert.pem
236private_key = $dir/private/cacert.key
237RANDFILE = $dir/private/.rand
238default_md = default
239
240[ req ]
241default_bits = 1024
242default_md = sha1
243
244prompt = no
245distinguished_name = req_distinguished_name
246
247x509_extensions = req_extensions
248
249[ req_distinguished_name ]
250organizationName = %(org_name)s
251organizationalUnitName = %(org_unit_name)s machine resources
252commonName = %(common_name)s
253
254[ policy_match ]
255countryName = optional
256stateOrProvinceName = optional
257organizationName = match
258organizationalUnitName = optional
259commonName = supplied
260
261[ req_extensions ]
262basicConstraints = CA:false
263subjectKeyIdentifier = hash
264authorityKeyIdentifier = keyid:always, issuer
265keyUsage = digitalSignature, keyEncipherment, keyAgreement
266extendedKeyUsage = serverAuth, clientAuth
267"""
2680
=== removed directory 'hooks/charmhelpers/contrib/storage'
=== removed file 'hooks/charmhelpers/contrib/storage/__init__.py'
=== removed directory 'hooks/charmhelpers/contrib/storage/linux'
=== removed file 'hooks/charmhelpers/contrib/storage/linux/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
@@ -1,387 +0,0 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import os
12import shutil
13import json
14import time
15
16from subprocess import (
17 check_call,
18 check_output,
19 CalledProcessError
20)
21
22from charmhelpers.core.hookenv import (
23 relation_get,
24 relation_ids,
25 related_units,
26 log,
27 INFO,
28 WARNING,
29 ERROR
30)
31
32from charmhelpers.core.host import (
33 mount,
34 mounts,
35 service_start,
36 service_stop,
37 service_running,
38 umount,
39)
40
41from charmhelpers.fetch import (
42 apt_install,
43)
44
45KEYRING = '/etc/ceph/ceph.client.{}.keyring'
46KEYFILE = '/etc/ceph/ceph.client.{}.key'
47
48CEPH_CONF = """[global]
49 auth supported = {auth}
50 keyring = {keyring}
51 mon host = {mon_hosts}
52 log to syslog = {use_syslog}
53 err to syslog = {use_syslog}
54 clog to syslog = {use_syslog}
55"""
56
57
58def install():
59 ''' Basic Ceph client installation '''
60 ceph_dir = "/etc/ceph"
61 if not os.path.exists(ceph_dir):
62 os.mkdir(ceph_dir)
63 apt_install('ceph-common', fatal=True)
64
65
66def rbd_exists(service, pool, rbd_img):
67 ''' Check to see if a RADOS block device exists '''
68 try:
69 out = check_output(['rbd', 'list', '--id', service,
70 '--pool', pool])
71 except CalledProcessError:
72 return False
73 else:
74 return rbd_img in out
75
76
77def create_rbd_image(service, pool, image, sizemb):
78 ''' Create a new RADOS block device '''
79 cmd = [
80 'rbd',
81 'create',
82 image,
83 '--size',
84 str(sizemb),
85 '--id',
86 service,
87 '--pool',
88 pool
89 ]
90 check_call(cmd)
91
92
93def pool_exists(service, name):
94 ''' Check to see if a RADOS pool already exists '''
95 try:
96 out = check_output(['rados', '--id', service, 'lspools'])
97 except CalledProcessError:
98 return False
99 else:
100 return name in out
101
102
103def get_osds(service):
104 '''
105 Return a list of all Ceph Object Storage Daemons
106 currently in the cluster
107 '''
108 version = ceph_version()
109 if version and version >= '0.56':
110 return json.loads(check_output(['ceph', '--id', service,
111 'osd', 'ls', '--format=json']))
112 else:
113 return None
114
115
116def create_pool(service, name, replicas=2):
117 ''' Create a new RADOS pool '''
118 if pool_exists(service, name):
119 log("Ceph pool {} already exists, skipping creation".format(name),
120 level=WARNING)
121 return
122 # Calculate the number of placement groups based
123 # on upstream recommended best practices.
124 osds = get_osds(service)
125 if osds:
126 pgnum = (len(osds) * 100 / replicas)
127 else:
128 # NOTE(james-page): Default to 200 for older ceph versions
129 # which don't support OSD query from cli
130 pgnum = 200
131 cmd = [
132 'ceph', '--id', service,
133 'osd', 'pool', 'create',
134 name, str(pgnum)
135 ]
136 check_call(cmd)
137 cmd = [
138 'ceph', '--id', service,
139 'osd', 'pool', 'set', name,
140 'size', str(replicas)
141 ]
142 check_call(cmd)
143
144
145def delete_pool(service, name):
146 ''' Delete a RADOS pool from ceph '''
147 cmd = [
148 'ceph', '--id', service,
149 'osd', 'pool', 'delete',
150 name, '--yes-i-really-really-mean-it'
151 ]
152 check_call(cmd)
153
154
155def _keyfile_path(service):
156 return KEYFILE.format(service)
157
158
159def _keyring_path(service):
160 return KEYRING.format(service)
161
162
163def create_keyring(service, key):
164 ''' Create a new Ceph keyring containing key'''
165 keyring = _keyring_path(service)
166 if os.path.exists(keyring):
167 log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
168 return
169 cmd = [
170 'ceph-authtool',
171 keyring,
172 '--create-keyring',
173 '--name=client.{}'.format(service),
174 '--add-key={}'.format(key)
175 ]
176 check_call(cmd)
177 log('ceph: Created new ring at %s.' % keyring, level=INFO)
178
179
180def create_key_file(service, key):
181 ''' Create a file containing key '''
182 keyfile = _keyfile_path(service)
183 if os.path.exists(keyfile):
184 log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
185 return
186 with open(keyfile, 'w') as fd:
187 fd.write(key)
188 log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
189
190
191def get_ceph_nodes():
192 ''' Query named relation 'ceph' to detemine current nodes '''
193 hosts = []
194 for r_id in relation_ids('ceph'):
195 for unit in related_units(r_id):
196 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
197 return hosts
198
199
200def configure(service, key, auth, use_syslog):
201 ''' Perform basic configuration of Ceph '''
202 create_keyring(service, key)
203 create_key_file(service, key)
204 hosts = get_ceph_nodes()
205 with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
206 ceph_conf.write(CEPH_CONF.format(auth=auth,
207 keyring=_keyring_path(service),
208 mon_hosts=",".join(map(str, hosts)),
209 use_syslog=use_syslog))
210 modprobe('rbd')
211
212
213def image_mapped(name):
214 ''' Determine whether a RADOS block device is mapped locally '''
215 try:
216 out = check_output(['rbd', 'showmapped'])
217 except CalledProcessError:
218 return False
219 else:
220 return name in out
221
222
223def map_block_storage(service, pool, image):
224 ''' Map a RADOS block device for local use '''
225 cmd = [
226 'rbd',
227 'map',
228 '{}/{}'.format(pool, image),
229 '--user',
230 service,
231 '--secret',
232 _keyfile_path(service),
233 ]
234 check_call(cmd)
235
236
237def filesystem_mounted(fs):
238 ''' Determine whether a filesytems is already mounted '''
239 return fs in [f for f, m in mounts()]
240
241
242def make_filesystem(blk_device, fstype='ext4', timeout=10):
243 ''' Make a new filesystem on the specified block device '''
244 count = 0
245 e_noent = os.errno.ENOENT
246 while not os.path.exists(blk_device):
247 if count >= timeout:
248 log('ceph: gave up waiting on block device %s' % blk_device,
249 level=ERROR)
250 raise IOError(e_noent, os.strerror(e_noent), blk_device)
251 log('ceph: waiting for block device %s to appear' % blk_device,
252 level=INFO)
253 count += 1
254 time.sleep(1)
255 else:
256 log('ceph: Formatting block device %s as filesystem %s.' %
257 (blk_device, fstype), level=INFO)
258 check_call(['mkfs', '-t', fstype, blk_device])
259
260
261def place_data_on_block_device(blk_device, data_src_dst):
262 ''' Migrate data in data_src_dst to blk_device and then remount '''
263 # mount block device into /mnt
264 mount(blk_device, '/mnt')
265 # copy data to /mnt
266 copy_files(data_src_dst, '/mnt')
267 # umount block device
268 umount('/mnt')
269 # Grab user/group ID's from original source
270 _dir = os.stat(data_src_dst)
271 uid = _dir.st_uid
272 gid = _dir.st_gid
273 # re-mount where the data should originally be
274 # TODO: persist is currently a NO-OP in core.host
275 mount(blk_device, data_src_dst, persist=True)
276 # ensure original ownership of new mount.
277 os.chown(data_src_dst, uid, gid)
278
279
280# TODO: re-use
281def modprobe(module):
282 ''' Load a kernel module and configure for auto-load on reboot '''
283 log('ceph: Loading kernel module', level=INFO)
284 cmd = ['modprobe', module]
285 check_call(cmd)
286 with open('/etc/modules', 'r+') as modules:
287 if module not in modules.read():
288 modules.write(module)
289
290
291def copy_files(src, dst, symlinks=False, ignore=None):
292 ''' Copy files from src to dst '''
293 for item in os.listdir(src):
294 s = os.path.join(src, item)
295 d = os.path.join(dst, item)
296 if os.path.isdir(s):
297 shutil.copytree(s, d, symlinks, ignore)
298 else:
299 shutil.copy2(s, d)
300
301
302def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
303 blk_device, fstype, system_services=[]):
304 """
305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.
307
308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.
310
311 If formatting a device for the first time, data existing at mount_point
312 will be migrated to the RBD device before being re-mounted.
313
314 All services listed in system_services will be stopped prior to data
315 migration and restarted when complete.
316 """
317 # Ensure pool, RBD image, RBD mappings are in place.
318 if not pool_exists(service, pool):
319 log('ceph: Creating new pool {}.'.format(pool))
320 create_pool(service, pool)
321
322 if not rbd_exists(service, pool, rbd_img):
323 log('ceph: Creating RBD image ({}).'.format(rbd_img))
324 create_rbd_image(service, pool, rbd_img, sizemb)
325
326 if not image_mapped(rbd_img):
327 log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
328 map_block_storage(service, pool, rbd_img)
329
330 # make file system
331 # TODO: What happens if for whatever reason this is run again and
332 # the data is already in the rbd device and/or is mounted??
333 # When it is mounted already, it will fail to make the fs
334 # XXX: This is really sketchy! Need to at least add an fstab entry
335 # otherwise this hook will blow away existing data if its executed
336 # after a reboot.
337 if not filesystem_mounted(mount_point):
338 make_filesystem(blk_device, fstype)
339
340 for svc in system_services:
341 if service_running(svc):
342 log('ceph: Stopping services {} prior to migrating data.'
343 .format(svc))
344 service_stop(svc)
345
346 place_data_on_block_device(blk_device, mount_point)
347
348 for svc in system_services:
349 log('ceph: Starting service {} after migrating data.'
350 .format(svc))
351 service_start(svc)
352
353
354def ensure_ceph_keyring(service, user=None, group=None):
355 '''
356 Ensures a ceph keyring is created for a named service
357 and optionally ensures user and group ownership.
358
359 Returns False if no ceph key is available in relation state.
360 '''
361 key = None
362 for rid in relation_ids('ceph'):
363 for unit in related_units(rid):
364 key = relation_get('key', rid=rid, unit=unit)
365 if key:
366 break
367 if not key:
368 return False
369 create_keyring(service=service, key=key)
370 keyring = _keyring_path(service)
371 if user and group:
372 check_call(['chown', '%s.%s' % (user, group), keyring])
373 return True
374
375
376def ceph_version():
377 ''' Retrieve the local version of ceph '''
378 if os.path.exists('/usr/bin/ceph'):
379 cmd = ['ceph', '-v']
380 output = check_output(cmd)
381 output = output.split()
382 if len(output) > 3:
383 return output[2]
384 else:
385 return None
386 else:
387 return None
3880
=== removed file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
@@ -1,62 +0,0 @@
1
2import os
3import re
4
5from subprocess import (
6 check_call,
7 check_output,
8)
9
10
11##################################################
12# loopback device helpers.
13##################################################
14def loopback_devices():
15 '''
16 Parse through 'losetup -a' output to determine currently mapped
17 loopback devices. Output is expected to look like:
18
19 /dev/loop0: [0807]:961814 (/tmp/my.img)
20
21 :returns: dict: a dict mapping {loopback_dev: backing_file}
22 '''
23 loopbacks = {}
24 cmd = ['losetup', '-a']
25 devs = [d.strip().split(' ') for d in
26 check_output(cmd).splitlines() if d != '']
27 for dev, _, f in devs:
28 loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
29 return loopbacks
30
31
32def create_loopback(file_path):
33 '''
34 Create a loopback device for a given backing file.
35
36 :returns: str: Full path to new loopback device (eg, /dev/loop0)
37 '''
38 file_path = os.path.abspath(file_path)
39 check_call(['losetup', '--find', file_path])
40 for d, f in loopback_devices().iteritems():
41 if f == file_path:
42 return d
43
44
45def ensure_loopback_device(path, size):
46 '''
47 Ensure a loopback device exists for a given backing file path and size.
48 If it a loopback device is not mapped to file, a new one will be created.
49
50 TODO: Confirm size of found loopback device.
51
52 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
53 '''
54 for d, f in loopback_devices().iteritems():
55 if f == path:
56 return d
57
58 if not os.path.exists(path):
59 cmd = ['truncate', '--size', size, path]
60 check_call(cmd)
61
62 return create_loopback(path)
630
=== removed file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
@@ -1,88 +0,0 @@
1from subprocess import (
2 CalledProcessError,
3 check_call,
4 check_output,
5 Popen,
6 PIPE,
7)
8
9
10##################################################
11# LVM helpers.
12##################################################
13def deactivate_lvm_volume_group(block_device):
14 '''
15 Deactivate any volume gruop associated with an LVM physical volume.
16
17 :param block_device: str: Full path to LVM physical volume
18 '''
19 vg = list_lvm_volume_group(block_device)
20 if vg:
21 cmd = ['vgchange', '-an', vg]
22 check_call(cmd)
23
24
25def is_lvm_physical_volume(block_device):
26 '''
27 Determine whether a block device is initialized as an LVM PV.
28
29 :param block_device: str: Full path of block device to inspect.
30
31 :returns: boolean: True if block device is a PV, False if not.
32 '''
33 try:
34 check_output(['pvdisplay', block_device])
35 return True
36 except CalledProcessError:
37 return False
38
39
40def remove_lvm_physical_volume(block_device):
41 '''
42 Remove LVM PV signatures from a given block device.
43
44 :param block_device: str: Full path of block device to scrub.
45 '''
46 p = Popen(['pvremove', '-ff', block_device],
47 stdin=PIPE)
48 p.communicate(input='y\n')
49
50
51def list_lvm_volume_group(block_device):
52 '''
53 List LVM volume group associated with a given block device.
54
55 Assumes block device is a valid LVM PV.
56
57 :param block_device: str: Full path of block device to inspect.
58
59 :returns: str: Name of volume group associated with block device or None
60 '''
61 vg = None
62 pvd = check_output(['pvdisplay', block_device]).splitlines()
63 for l in pvd:
64 if l.strip().startswith('VG Name'):
65 vg = ' '.join(l.split()).split(' ').pop()
66 return vg
67
68
69def create_lvm_physical_volume(block_device):
70 '''
71 Initialize a block device as an LVM physical volume.
72
73 :param block_device: str: Full path of block device to initialize.
74
75 '''
76 check_call(['pvcreate', block_device])
77
78
79def create_lvm_volume_group(volume_group, block_device):
80 '''
81 Create an LVM volume group backed by a given block device.
82
83 Assumes block device has already been initialized as an LVM PV.
84
85 :param volume_group: str: Name of volume group to create.
86 :block_device: str: Full path of PV-initialized block device.
87 '''
88 check_call(['vgcreate', volume_group, block_device])
890
=== removed file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000
@@ -1,35 +0,0 @@
1from os import stat
2from stat import S_ISBLK
3
4from subprocess import (
5 check_call,
6 check_output,
7 call
8)
9
10
11def is_block_device(path):
12 '''
13 Confirm device at path is a valid block device node.
14
15 :returns: boolean: True if path is a block device, False if not.
16 '''
17 return S_ISBLK(stat(path).st_mode)
18
19
20def zap_disk(block_device):
21 '''
22 Clear a block device of partition table. Relies on sgdisk, which is
23 installed as pat of the 'gdisk' package in Ubuntu.
24
25 :param block_device: str: Full path of block device to clean.
26 '''
27 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
28 call(['sgdisk', '--zap-all', '--mbrtogpt',
29 '--clear', block_device])
30 dev_end = check_output(['blockdev', '--getsz', block_device])
31 gpt_end = int(dev_end.split()[0]) - 100
32 check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
33 'bs=1M', 'count=1'])
34 check_call(['dd', 'if=/dev/zero', 'of=%s'%(block_device),
35 'bs=512', 'count=100', 'seek=%s'%(gpt_end)])
360
=== removed directory 'hooks/charmhelpers/contrib/templating'
=== removed file 'hooks/charmhelpers/contrib/templating/__init__.py'
=== removed file 'hooks/charmhelpers/contrib/templating/contexts.py'
--- hooks/charmhelpers/contrib/templating/contexts.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/templating/contexts.py 1970-01-01 00:00:00 +0000
@@ -1,104 +0,0 @@
1# Copyright 2013 Canonical Ltd.
2#
3# Authors:
4# Charm Helpers Developers <juju@lists.ubuntu.com>
5"""A helper to create a yaml cache of config with namespaced relation data."""
6import os
7import yaml
8
9import charmhelpers.core.hookenv
10
11
12charm_dir = os.environ.get('CHARM_DIR', '')
13
14
15def dict_keys_without_hyphens(a_dict):
16 """Return the a new dict with underscores instead of hyphens in keys."""
17 return dict(
18 (key.replace('-', '_'), val) for key, val in a_dict.items())
19
20
21def update_relations(context, namespace_separator=':'):
22 """Update the context with the relation data."""
23 # Add any relation data prefixed with the relation type.
24 relation_type = charmhelpers.core.hookenv.relation_type()
25 relations = []
26 context['current_relation'] = {}
27 if relation_type is not None:
28 relation_data = charmhelpers.core.hookenv.relation_get()
29 context['current_relation'] = relation_data
30 # Deprecated: the following use of relation data as keys
31 # directly in the context will be removed.
32 relation_data = dict(
33 ("{relation_type}{namespace_separator}{key}".format(
34 relation_type=relation_type,
35 key=key,
36 namespace_separator=namespace_separator), val)
37 for key, val in relation_data.items())
38 relation_data = dict_keys_without_hyphens(relation_data)
39 context.update(relation_data)
40 relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
41 relations = [dict_keys_without_hyphens(rel) for rel in relations]
42
43 if 'relations_deprecated' not in context:
44 context['relations_deprecated'] = {}
45 if relation_type is not None:
46 relation_type = relation_type.replace('-', '_')
47 context['relations_deprecated'][relation_type] = relations
48
49 context['relations'] = charmhelpers.core.hookenv.relations()
50
51
52def juju_state_to_yaml(yaml_path, namespace_separator=':',
53 allow_hyphens_in_keys=True):
54 """Update the juju config and state in a yaml file.
55
56 This includes any current relation-get data, and the charm
57 directory.
58
59 This function was created for the ansible and saltstack
60 support, as those libraries can use a yaml file to supply
61 context to templates, but it may be useful generally to
62 create and update an on-disk cache of all the config, including
63 previous relation data.
64
65 By default, hyphens are allowed in keys as this is supported
66 by yaml, but for tools like ansible, hyphens are not valid [1].
67
68 [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
69 """
70 config = charmhelpers.core.hookenv.config()
71
72 # Add the charm_dir which we will need to refer to charm
73 # file resources etc.
74 config['charm_dir'] = charm_dir
75 config['local_unit'] = charmhelpers.core.hookenv.local_unit()
76 config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
77 config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
78 'public-address'
79 )
80
81 # Don't use non-standard tags for unicode which will not
82 # work when salt uses yaml.load_safe.
83 yaml.add_representer(unicode, lambda dumper,
84 value: dumper.represent_scalar(
85 u'tag:yaml.org,2002:str', value))
86
87 yaml_dir = os.path.dirname(yaml_path)
88 if not os.path.exists(yaml_dir):
89 os.makedirs(yaml_dir)
90
91 if os.path.exists(yaml_path):
92 with open(yaml_path, "r") as existing_vars_file:
93 existing_vars = yaml.load(existing_vars_file.read())
94 else:
95 existing_vars = {}
96
97 if not allow_hyphens_in_keys:
98 config = dict_keys_without_hyphens(config)
99 existing_vars.update(config)
100
101 update_relations(existing_vars, namespace_separator)
102
103 with open(yaml_path, "w+") as fp:
104 fp.write(yaml.dump(existing_vars, default_flow_style=False))
1050
=== removed file 'hooks/charmhelpers/contrib/templating/pyformat.py'
--- hooks/charmhelpers/contrib/templating/pyformat.py 2013-11-26 17:12:54 +0000
+++ hooks/charmhelpers/contrib/templating/pyformat.py 1970-01-01 00:00:00 +0000
@@ -1,13 +0,0 @@
1'''
2Templating using standard Python str.format() method.
3'''
4
5from charmhelpers.core import hookenv
6
7
8def render(template, extra={}, **kwargs):
9 """Return the template rendered using Python's str.format()."""
10 context = hookenv.execution_environment()
11 context.update(extra)
12 context.update(kwargs)
13 return template.format(**context)
140
=== removed directory 'hooks/charmhelpers/contrib/unison'
=== removed file 'hooks/charmhelpers/contrib/unison/__init__.py'
--- hooks/charmhelpers/contrib/unison/__init__.py 2014-05-09 20:11:59 +0000
+++ hooks/charmhelpers/contrib/unison/__init__.py 1970-01-01 00:00:00 +0000
@@ -1,257 +0,0 @@
1# Easy file synchronization among peer units using ssh + unison.
2#
3# From *both* peer relation -joined and -changed, add a call to
4# ssh_authorized_peers() describing the peer relation and the desired
5# user + group. After all peer relations have settled, all hosts should
6# be able to connect to on another via key auth'd ssh as the specified user.
7#
8# Other hooks are then free to synchronize files and directories using
9# sync_to_peers().
10#
11# For a peer relation named 'cluster', for example:
12#
13# cluster-relation-joined:
14# ...
15# ssh_authorized_peers(peer_interface='cluster',
16# user='juju_ssh', group='juju_ssh',
17# ensure_user=True)
18# ...
19#
20# cluster-relation-changed:
21# ...
22# ssh_authorized_peers(peer_interface='cluster',
23# user='juju_ssh', group='juju_ssh',
24# ensure_user=True)
25# ...
26#
27# Hooks are now free to sync files as easily as:
28#
29# files = ['/etc/fstab', '/etc/apt.conf.d/']
30# sync_to_peers(peer_interface='cluster',
31# user='juju_ssh, paths=[files])
32#
33# It is assumed the charm itself has setup permissions on each unit
34# such that 'juju_ssh' has read + write permissions. Also assumed
35# that the calling charm takes care of leader delegation.
36#
37# Additionally files can be synchronized only to an specific unit:
38# sync_to_peer(slave_address, user='juju_ssh',
39# paths=[files], verbose=False)
40
41import os
42import pwd
43
44from copy import copy
45from subprocess import check_call, check_output
46
47from charmhelpers.core.host import (
48 adduser,
49 add_user_to_group,
50)
51
52from charmhelpers.core.hookenv import (
53 log,
54 hook_name,
55 relation_ids,
56 related_units,
57 relation_set,
58 relation_get,
59 unit_private_ip,
60 ERROR,
61)
62
63BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
64 '-fastcheck=true', '-group=false', '-owner=false',
65 '-prefer=newer', '-times=true']
66
67
68def get_homedir(user):
69 try:
70 user = pwd.getpwnam(user)
71 return user.pw_dir
72 except KeyError:
73 log('Could not get homedir for user %s: user exists?', ERROR)
74 raise Exception
75
76
77def create_private_key(user, priv_key_path):
78 if not os.path.isfile(priv_key_path):
79 log('Generating new SSH key for user %s.' % user)
80 cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
81 '-f', priv_key_path]
82 check_call(cmd)
83 else:
84 log('SSH key already exists at %s.' % priv_key_path)
85 check_call(['chown', user, priv_key_path])
86 check_call(['chmod', '0600', priv_key_path])
87
88
89def create_public_key(user, priv_key_path, pub_key_path):
90 if not os.path.isfile(pub_key_path):
91 log('Generating missing ssh public key @ %s.' % pub_key_path)
92 cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
93 p = check_output(cmd).strip()
94 with open(pub_key_path, 'wb') as out:
95 out.write(p)
96 check_call(['chown', user, pub_key_path])
97
98
99def get_keypair(user):
100 home_dir = get_homedir(user)
101 ssh_dir = os.path.join(home_dir, '.ssh')
102 priv_key = os.path.join(ssh_dir, 'id_rsa')
103 pub_key = '%s.pub' % priv_key
104
105 if not os.path.isdir(ssh_dir):
106 os.mkdir(ssh_dir)
107 check_call(['chown', '-R', user, ssh_dir])
108
109 create_private_key(user, priv_key)
110 create_public_key(user, priv_key, pub_key)
111
112 with open(priv_key, 'r') as p:
113 _priv = p.read().strip()
114
115 with open(pub_key, 'r') as p:
116 _pub = p.read().strip()
117
118 return (_priv, _pub)
119
120
121def write_authorized_keys(user, keys):
122 home_dir = get_homedir(user)
123 ssh_dir = os.path.join(home_dir, '.ssh')
124 auth_keys = os.path.join(ssh_dir, 'authorized_keys')
125 log('Syncing authorized_keys @ %s.' % auth_keys)
126 with open(auth_keys, 'wb') as out:
127 for k in keys:
128 out.write('%s\n' % k)
129
130
131def write_known_hosts(user, hosts):
132 home_dir = get_homedir(user)
133 ssh_dir = os.path.join(home_dir, '.ssh')
134 known_hosts = os.path.join(ssh_dir, 'known_hosts')
135 khosts = []
136 for host in hosts:
137 cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
138 remote_key = check_output(cmd).strip()
139 khosts.append(remote_key)
140 log('Syncing known_hosts @ %s.' % known_hosts)
141 with open(known_hosts, 'wb') as out:
142 for host in khosts:
143 out.write('%s\n' % host)
144
145
146def ensure_user(user, group=None):
147 adduser(user)
148 if group:
149 add_user_to_group(user, group)
150
151
152def ssh_authorized_peers(peer_interface, user, group=None,
153 ensure_local_user=False):
154 """
155 Main setup function, should be called from both peer -changed and -joined
156 hooks with the same parameters.
157 """
158 if ensure_local_user:
159 ensure_user(user, group)
160 priv_key, pub_key = get_keypair(user)
161 hook = hook_name()
162 if hook == '%s-relation-joined' % peer_interface:
163 relation_set(ssh_pub_key=pub_key)
164 elif hook == '%s-relation-changed' % peer_interface:
165 hosts = []
166 keys = []
167
168 for r_id in relation_ids(peer_interface):
169 for unit in related_units(r_id):
170 ssh_pub_key = relation_get('ssh_pub_key',
171 rid=r_id,
172 unit=unit)
173 priv_addr = relation_get('private-address',
174 rid=r_id,
175 unit=unit)
176 if ssh_pub_key:
177 keys.append(ssh_pub_key)
178 hosts.append(priv_addr)
179 else:
180 log('ssh_authorized_peers(): ssh_pub_key '
181 'missing for unit %s, skipping.' % unit)
182 write_authorized_keys(user, keys)
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches