Merge lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356 into lp:~openstack-charmers/charms/trusty/glance-simplestreams-sync/next

Proposed by Felipe Reyes
Status: Merged
Merged at revision: 56
Proposed branch: lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356
Merge into: lp:~openstack-charmers/charms/trusty/glance-simplestreams-sync/next
Diff against target: 25126 lines (+11877/-12381)
152 files modified
.bzrignore (+4/-0)
.testr.conf (+8/-0)
Makefile (+21/-5)
charm-helpers-sync.yaml (+4/-1)
charmhelpers/__init__.py (+38/-0)
charmhelpers/contrib/__init__.py (+15/-0)
charmhelpers/contrib/charmsupport/__init__.py (+15/-0)
charmhelpers/contrib/charmsupport/nrpe.py (+360/-0)
charmhelpers/contrib/charmsupport/volumes.py (+175/-0)
charmhelpers/contrib/hahelpers/__init__.py (+15/-0)
charmhelpers/contrib/hahelpers/apache.py (+82/-0)
charmhelpers/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers/contrib/network/__init__.py (+15/-0)
charmhelpers/contrib/network/ip.py (+456/-0)
charmhelpers/contrib/openstack/__init__.py (+15/-0)
charmhelpers/contrib/openstack/alternatives.py (+33/-0)
charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0)
charmhelpers/contrib/openstack/amulet/deployment.py (+197/-0)
charmhelpers/contrib/openstack/amulet/utils.py (+963/-0)
charmhelpers/contrib/openstack/context.py (+1427/-0)
charmhelpers/contrib/openstack/files/__init__.py (+18/-0)
charmhelpers/contrib/openstack/files/check_haproxy.sh (+32/-0)
charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+30/-0)
charmhelpers/contrib/openstack/ip.py (+151/-0)
charmhelpers/contrib/openstack/neutron.py (+356/-0)
charmhelpers/contrib/openstack/templates/__init__.py (+18/-0)
charmhelpers/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers/contrib/openstack/templates/git.upstart (+17/-0)
charmhelpers/contrib/openstack/templates/haproxy.cfg (+58/-0)
charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0)
charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0)
charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+9/-0)
charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+22/-0)
charmhelpers/contrib/openstack/templates/section-zeromq (+14/-0)
charmhelpers/contrib/openstack/templating.py (+323/-0)
charmhelpers/contrib/openstack/utils.py (+977/-0)
charmhelpers/contrib/python/__init__.py (+15/-0)
charmhelpers/contrib/python/packages.py (+121/-0)
charmhelpers/contrib/storage/__init__.py (+15/-0)
charmhelpers/contrib/storage/linux/__init__.py (+15/-0)
charmhelpers/contrib/storage/linux/ceph.py (+657/-0)
charmhelpers/contrib/storage/linux/loopback.py (+78/-0)
charmhelpers/contrib/storage/linux/lvm.py (+105/-0)
charmhelpers/contrib/storage/linux/utils.py (+71/-0)
charmhelpers/core/__init__.py (+15/-0)
charmhelpers/core/decorators.py (+57/-0)
charmhelpers/core/files.py (+45/-0)
charmhelpers/core/fstab.py (+134/-0)
charmhelpers/core/hookenv.py (+930/-0)
charmhelpers/core/host.py (+586/-0)
charmhelpers/core/hugepage.py (+69/-0)
charmhelpers/core/kernel.py (+68/-0)
charmhelpers/core/services/__init__.py (+18/-0)
charmhelpers/core/services/base.py (+353/-0)
charmhelpers/core/services/helpers.py (+283/-0)
charmhelpers/core/strutils.py (+72/-0)
charmhelpers/core/sysctl.py (+56/-0)
charmhelpers/core/templating.py (+68/-0)
charmhelpers/core/unitdata.py (+521/-0)
charmhelpers/fetch/__init__.py (+456/-0)
charmhelpers/fetch/archiveurl.py (+167/-0)
charmhelpers/fetch/bzrurl.py (+78/-0)
charmhelpers/fetch/giturl.py (+73/-0)
charmhelpers/payload/__init__.py (+17/-0)
charmhelpers/payload/archive.py (+73/-0)
charmhelpers/payload/execd.py (+66/-0)
hooks/charmhelpers/__init__.py (+0/-38)
hooks/charmhelpers/cli/README.rst (+0/-57)
hooks/charmhelpers/cli/__init__.py (+0/-147)
hooks/charmhelpers/cli/commands.py (+0/-2)
hooks/charmhelpers/cli/host.py (+0/-15)
hooks/charmhelpers/contrib/__init__.py (+0/-15)
hooks/charmhelpers/contrib/ansible/__init__.py (+0/-165)
hooks/charmhelpers/contrib/charmhelpers/IMPORT (+0/-4)
hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-184)
hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-59)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-183)
hooks/charmhelpers/contrib/jujugui/IMPORT (+0/-4)
hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602)
hooks/charmhelpers/contrib/network/__init__.py (+0/-15)
hooks/charmhelpers/contrib/network/ip.py (+0/-454)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-75)
hooks/charmhelpers/contrib/openstack/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-33)
hooks/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-183)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+0/-604)
hooks/charmhelpers/contrib/openstack/context.py (+0/-1372)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+0/-32)
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+0/-30)
hooks/charmhelpers/contrib/openstack/ip.py (+0/-151)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-356)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+0/-15)
hooks/charmhelpers/contrib/openstack/templates/git.upstart (+0/-17)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+0/-58)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+0/-9)
hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+0/-22)
hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+0/-14)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-295)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-751)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-83)
hooks/charmhelpers/contrib/python/__init__.py (+0/-15)
hooks/charmhelpers/contrib/python/packages.py (+0/-121)
hooks/charmhelpers/contrib/python/version.py (+0/-18)
hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-102)
hooks/charmhelpers/contrib/ssl/__init__.py (+0/-78)
hooks/charmhelpers/contrib/ssl/service.py (+0/-267)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-387)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-62)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-88)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-49)
hooks/charmhelpers/contrib/templating/contexts.py (+0/-104)
hooks/charmhelpers/contrib/templating/pyformat.py (+0/-13)
hooks/charmhelpers/contrib/unison/__init__.py (+0/-257)
hooks/charmhelpers/core/__init__.py (+0/-15)
hooks/charmhelpers/core/decorators.py (+0/-57)
hooks/charmhelpers/core/files.py (+0/-45)
hooks/charmhelpers/core/fstab.py (+0/-134)
hooks/charmhelpers/core/hookenv.py (+0/-898)
hooks/charmhelpers/core/host.py (+0/-586)
hooks/charmhelpers/core/hugepage.py (+0/-62)
hooks/charmhelpers/core/kernel.py (+0/-68)
hooks/charmhelpers/core/services/__init__.py (+0/-18)
hooks/charmhelpers/core/services/base.py (+0/-353)
hooks/charmhelpers/core/services/helpers.py (+0/-283)
hooks/charmhelpers/core/strutils.py (+0/-42)
hooks/charmhelpers/core/sysctl.py (+0/-56)
hooks/charmhelpers/core/templating.py (+0/-68)
hooks/charmhelpers/core/unitdata.py (+0/-521)
hooks/charmhelpers/fetch/__init__.py (+0/-456)
hooks/charmhelpers/fetch/archiveurl.py (+0/-167)
hooks/charmhelpers/fetch/bzrurl.py (+0/-78)
hooks/charmhelpers/fetch/giturl.py (+0/-73)
hooks/charmhelpers/payload/__init__.py (+0/-1)
hooks/charmhelpers/payload/archive.py (+0/-57)
hooks/charmhelpers/payload/execd.py (+0/-50)
hooks/hooks.py (+23/-17)
metadata.yaml (+2/-1)
requirements/requirements-precise.txt (+8/-0)
requirements/requirements-trusty.txt (+8/-0)
requirements/test-requirements.txt (+6/-0)
setup.cfg (+6/-0)
tox.ini (+36/-0)
unit_tests/test_hooks.py (+105/-0)
unit_tests/test_utils.py (+133/-0)
To merge this branch: bzr merge lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356
Reviewer Review Type Date Requested Status
Billy Olsen Approve
Review via email: mp+271384@code.launchpad.net

Description of the change

Dear OpenStack Charmers,

This MP refactors the config-changed hook handler to fix LP: #1434356 and unit tests.

Best Regards,

To post a comment you must log in.
Revision history for this message
Billy Olsen (billy-olsen) wrote :

Felipe, thanks for the submission! A few comments inline below, thanks!

review: Needs Fixing
Revision history for this message
Felipe Reyes (freyes) wrote :

Billy, thanks for taking the time to review it, I'll integrate tox.

Revision history for this message
Felipe Reyes (freyes) wrote :

Billy, I included tox support into this MP and it's ready for review.

Best,

67. By Felipe Reyes

Use ostestr when running 'make unit_tests'

This change keeps consistency between Makefile and tox

68. By Felipe Reyes

Adjust mock patches from 'charmhelpers' to 'hooks.charmhelpers'

69. By Felipe Reyes

Add sitepackages=True to tox.ini

Revision history for this message
Billy Olsen (billy-olsen) wrote :

LGTM. Approved - thanks Felipe!

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file '.bzrignore'
--- .bzrignore 2015-09-08 16:25:57 +0000
+++ .bzrignore 2015-10-08 20:44:37 +0000
@@ -1,1 +1,5 @@
1bin1bin
2.coverage
3.venv
4.testrepository/
5.tox/
26
=== added file '.testr.conf'
--- .testr.conf 1970-01-01 00:00:00 +0000
+++ .testr.conf 2015-10-08 20:44:37 +0000
@@ -0,0 +1,8 @@
1[DEFAULT]
2test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
3 OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
4 OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
5 ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
6
7test_id_option=--load-list $IDFILE
8test_list_option=--list
09
=== modified file 'Makefile'
--- Makefile 2014-06-18 17:32:48 +0000
+++ Makefile 2015-10-08 20:44:37 +0000
@@ -1,13 +1,29 @@
1#!/usr/bin/make1#!/usr/bin/make
2PYTHON := /usr/bin/env python2PYTHON := /usr/bin/env python
33CHARM_DIR := $(PWD)
4lint:4HOOKS_DIR := $(PWD)/hooks
5 @pyflakes hooks/*.py scripts/*.py5TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
6
7clean:
8 rm -f .coverage
9 find . -name '*.pyc' -delete
10 rm -rf .venv
11 (which dh_clean && dh_clean) || true
12
13.venv:
14 dpkg -s gcc python-dev python-virtualenv python-apt > /dev/null || sudo apt-get install -y gcc python-dev python-virtualenv python-apt
15 virtualenv .venv --system-site-packages
16 .venv/bin/pip install -I \
17 -r requirements/requirements-trusty.txt \
18 -r requirements/test-requirements.txt
19
20lint: clean .venv
21 .venv/bin/flake8 hooks/*.py scripts/*.py unit_tests/*.py
6 @charm proof22 @charm proof
723
8test:24unit_tests: clean .venv
9 @echo Starting tests...25 @echo Starting tests...
10 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage -v unit_tests26 env CHARM_DIR=$(CHARM_DIR) $(TEST_PREFIX) .venv/bin/ostestr
1127
1228
13bin/charm_helpers_sync.py:29bin/charm_helpers_sync.py:
1430
=== modified file 'charm-helpers-sync.yaml'
--- charm-helpers-sync.yaml 2015-09-08 16:39:40 +0000
+++ charm-helpers-sync.yaml 2015-10-08 20:44:37 +0000
@@ -1,9 +1,12 @@
1branch: lp:charm-helpers1branch: lp:charm-helpers
2destination: hooks/charmhelpers2destination: charmhelpers
3include:3include:
4 - core4 - core
5 - fetch5 - fetch
6 - payload
6 - contrib.openstack|inc=*7 - contrib.openstack|inc=*
7 - contrib.charmsupport8 - contrib.charmsupport
8 - contrib.network.ip9 - contrib.network.ip
9 - contrib.python.packages10 - contrib.python.packages
11 - contrib.hahelpers
12 - contrib.storage
1013
=== added directory 'charmhelpers'
=== added file 'charmhelpers/__init__.py'
--- charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,38 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Bootstrap charm-helpers, installing its dependencies if necessary using
18# only standard libraries.
19import subprocess
20import sys
21
22try:
23 import six # flake8: noqa
24except ImportError:
25 if sys.version_info.major == 2:
26 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
27 else:
28 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
29 import six # flake8: noqa
30
31try:
32 import yaml # flake8: noqa
33except ImportError:
34 if sys.version_info.major == 2:
35 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
36 else:
37 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
38 import yaml # flake8: noqa
039
=== added directory 'charmhelpers/contrib'
=== added file 'charmhelpers/contrib/__init__.py'
--- charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added directory 'charmhelpers/contrib/charmsupport'
=== added file 'charmhelpers/contrib/charmsupport/__init__.py'
--- charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/charmsupport/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'charmhelpers/contrib/charmsupport/nrpe.py'
--- charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/charmsupport/nrpe.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,360 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""Compatibility with the nrpe-external-master charm"""
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
23import subprocess
24import pwd
25import grp
26import os
27import glob
28import shutil
29import re
30import shlex
31import yaml
32
33from charmhelpers.core.hookenv import (
34 config,
35 local_unit,
36 log,
37 relation_ids,
38 relation_set,
39 relations_of_type,
40)
41
42from charmhelpers.core.host import service
43
44# This module adds compatibility with the nrpe-external-master and plain nrpe
45# subordinate charms. To use it in your charm:
46#
47# 1. Update metadata.yaml
48#
49# provides:
50# (...)
51# nrpe-external-master:
52# interface: nrpe-external-master
53# scope: container
54#
55# and/or
56#
57# provides:
58# (...)
59# local-monitors:
60# interface: local-monitors
61# scope: container
62
63#
64# 2. Add the following to config.yaml
65#
66# nagios_context:
67# default: "juju"
68# type: string
69# description: |
70# Used by the nrpe subordinate charms.
71# A string that will be prepended to instance name to set the host name
72# in nagios. So for instance the hostname would be something like:
73# juju-myservice-0
74# If you're running multiple environments with the same services in them
75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
82#
83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84#
85# 4. Update your hooks.py with something like this:
86#
87# from charmsupport.nrpe import NRPE
88# (...)
89# def update_nrpe_config():
90# nrpe_compat = NRPE()
91# nrpe_compat.add_check(
92# shortname = "myservice",
93# description = "Check MyService",
94# check_cmd = "check_http -w 2 -c 10 http://localhost"
95# )
96# nrpe_compat.add_check(
97# "myservice_other",
98# "Check for widget failures",
99# check_cmd = "/srv/myapp/scripts/widget_check"
100# )
101# nrpe_compat.write()
102#
103# def config_changed():
104# (...)
105# update_nrpe_config()
106#
107# def nrpe_external_master_relation_changed():
108# update_nrpe_config()
109#
110# def local_monitors_relation_changed():
111# update_nrpe_config()
112#
113# 5. ln -s hooks.py nrpe-external-master-relation-changed
114# ln -s hooks.py local-monitors-relation-changed
115
116
117class CheckException(Exception):
118 pass
119
120
121class Check(object):
122 shortname_re = '[A-Za-z0-9-_]+$'
123 service_template = ("""
124#---------------------------------------------------
125# This file is Juju managed
126#---------------------------------------------------
127define service {{
128 use active-service
129 host_name {nagios_hostname}
130 service_description {nagios_hostname}[{shortname}] """
131 """{description}
132 check_command check_nrpe!{command}
133 servicegroups {nagios_servicegroup}
134}}
135""")
136
137 def __init__(self, shortname, description, check_cmd):
138 super(Check, self).__init__()
139 # XXX: could be better to calculate this from the service name
140 if not re.match(self.shortname_re, shortname):
141 raise CheckException("shortname must match {}".format(
142 Check.shortname_re))
143 self.shortname = shortname
144 self.command = "check_{}".format(shortname)
145 # Note: a set of invalid characters is defined by the
146 # Nagios server config
147 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd)
150
151 def _locate_cmd(self, check_cmd):
152 search_path = (
153 '/usr/lib/nagios/plugins',
154 '/usr/local/lib/nagios/plugins',
155 )
156 parts = shlex.split(check_cmd)
157 for path in search_path:
158 if os.path.exists(os.path.join(path, parts[0])):
159 command = os.path.join(path, parts[0])
160 if len(parts) > 1:
161 command += " " + " ".join(parts[1:])
162 return command
163 log('Check command not found: {}'.format(parts[0]))
164 return ''
165
166 def write(self, nagios_context, hostname, nagios_servicegroups):
167 nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
168 self.command)
169 with open(nrpe_check_file, 'w') as nrpe_check_config:
170 nrpe_check_config.write("# check {}\n".format(self.shortname))
171 nrpe_check_config.write("command[{}]={}\n".format(
172 self.command, self.check_cmd))
173
174 if not os.path.exists(NRPE.nagios_exportdir):
175 log('Not writing service config as {} is not accessible'.format(
176 NRPE.nagios_exportdir))
177 else:
178 self.write_service_config(nagios_context, hostname,
179 nagios_servicegroups)
180
181 def write_service_config(self, nagios_context, hostname,
182 nagios_servicegroups):
183 for f in os.listdir(NRPE.nagios_exportdir):
184 if re.search('.*{}.cfg'.format(self.command), f):
185 os.remove(os.path.join(NRPE.nagios_exportdir, f))
186
187 templ_vars = {
188 'nagios_hostname': hostname,
189 'nagios_servicegroup': nagios_servicegroups,
190 'description': self.description,
191 'shortname': self.shortname,
192 'command': self.command,
193 }
194 nrpe_service_text = Check.service_template.format(**templ_vars)
195 nrpe_service_file = '{}/service__{}_{}.cfg'.format(
196 NRPE.nagios_exportdir, hostname, self.command)
197 with open(nrpe_service_file, 'w') as nrpe_service_config:
198 nrpe_service_config.write(str(nrpe_service_text))
199
200 def run(self):
201 subprocess.call(self.check_cmd)
202
203
204class NRPE(object):
205 nagios_logdir = '/var/log/nagios'
206 nagios_exportdir = '/var/lib/nagios/export'
207 nrpe_confdir = '/etc/nagios/nrpe.d'
208
209 def __init__(self, hostname=None):
210 super(NRPE, self).__init__()
211 self.config = config()
212 self.nagios_context = self.config['nagios_context']
213 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
214 self.nagios_servicegroups = self.config['nagios_servicegroups']
215 else:
216 self.nagios_servicegroups = self.nagios_context
217 self.unit_name = local_unit().replace('/', '-')
218 if hostname:
219 self.hostname = hostname
220 else:
221 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
222 self.checks = []
223
224 def add_check(self, *args, **kwargs):
225 self.checks.append(Check(*args, **kwargs))
226
227 def write(self):
228 try:
229 nagios_uid = pwd.getpwnam('nagios').pw_uid
230 nagios_gid = grp.getgrnam('nagios').gr_gid
231 except:
232 log("Nagios user not set up, nrpe checks not updated")
233 return
234
235 if not os.path.exists(NRPE.nagios_logdir):
236 os.mkdir(NRPE.nagios_logdir)
237 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
238
239 nrpe_monitors = {}
240 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
241 for nrpecheck in self.checks:
242 nrpecheck.write(self.nagios_context, self.hostname,
243 self.nagios_servicegroups)
244 nrpe_monitors[nrpecheck.shortname] = {
245 "command": nrpecheck.command,
246 }
247
248 service('restart', 'nagios-nrpe-server')
249
250 monitor_ids = relation_ids("local-monitors") + \
251 relation_ids("nrpe-external-master")
252 for rid in monitor_ids:
253 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
254
255
256def get_nagios_hostcontext(relation_name='nrpe-external-master'):
257 """
258 Query relation with nrpe subordinate, return the nagios_host_context
259
260 :param str relation_name: Name of relation nrpe sub joined to
261 """
262 for rel in relations_of_type(relation_name):
263 if 'nagios_hostname' in rel:
264 return rel['nagios_host_context']
265
266
267def get_nagios_hostname(relation_name='nrpe-external-master'):
268 """
269 Query relation with nrpe subordinate, return the nagios_hostname
270
271 :param str relation_name: Name of relation nrpe sub joined to
272 """
273 for rel in relations_of_type(relation_name):
274 if 'nagios_hostname' in rel:
275 return rel['nagios_hostname']
276
277
278def get_nagios_unit_name(relation_name='nrpe-external-master'):
279 """
280 Return the nagios unit name prepended with host_context if needed
281
282 :param str relation_name: Name of relation nrpe sub joined to
283 """
284 host_context = get_nagios_hostcontext(relation_name)
285 if host_context:
286 unit = "%s:%s" % (host_context, local_unit())
287 else:
288 unit = local_unit()
289 return unit
290
291
292def add_init_service_checks(nrpe, services, unit_name):
293 """
294 Add checks for each service in list
295
296 :param NRPE nrpe: NRPE object to add check to
297 :param list services: List of services to check
298 :param str unit_name: Unit name to use in check description
299 """
300 for svc in services:
301 upstart_init = '/etc/init/%s.conf' % svc
302 sysv_init = '/etc/init.d/%s' % svc
303 if os.path.exists(upstart_init):
304 nrpe.add_check(
305 shortname=svc,
306 description='process check {%s}' % unit_name,
307 check_cmd='check_upstart_job %s' % svc
308 )
309 elif os.path.exists(sysv_init):
310 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
311 cron_file = ('*/5 * * * * root '
312 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
313 '-s /etc/init.d/%s status > '
314 '/var/lib/nagios/service-check-%s.txt\n' % (svc,
315 svc)
316 )
317 f = open(cronpath, 'w')
318 f.write(cron_file)
319 f.close()
320 nrpe.add_check(
321 shortname=svc,
322 description='process check {%s}' % unit_name,
323 check_cmd='check_status_file.py -f '
324 '/var/lib/nagios/service-check-%s.txt' % svc,
325 )
326
327
328def copy_nrpe_checks():
329 """
330 Copy the nrpe checks into place
331
332 """
333 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
334 nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
335 'charmhelpers', 'contrib', 'openstack',
336 'files')
337
338 if not os.path.exists(NAGIOS_PLUGINS):
339 os.makedirs(NAGIOS_PLUGINS)
340 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
341 if os.path.isfile(fname):
342 shutil.copy2(fname,
343 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
344
345
346def add_haproxy_checks(nrpe, unit_name):
347 """
348 Add checks for each service in list
349
350 :param NRPE nrpe: NRPE object to add check to
351 :param str unit_name: Unit name to use in check description
352 """
353 nrpe.add_check(
354 shortname='haproxy_servers',
355 description='Check HAProxy {%s}' % unit_name,
356 check_cmd='check_haproxy.sh')
357 nrpe.add_check(
358 shortname='haproxy_queue',
359 description='Check HAProxy queue depth {%s}' % unit_name,
360 check_cmd='check_haproxy_queue_depth.sh')
0361
=== added file 'charmhelpers/contrib/charmsupport/volumes.py'
--- charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/charmsupport/volumes.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,175 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17'''
18Functions for managing volumes in juju units. One volume is supported per unit.
19Subordinates may have their own storage, provided it is on its own partition.
20
21Configuration stanzas::
22
23 volume-ephemeral:
24 type: boolean
25 default: true
26 description: >
27 If false, a volume is mounted as sepecified in "volume-map"
28 If true, ephemeral storage will be used, meaning that log data
29 will only exist as long as the machine. YOU HAVE BEEN WARNED.
30 volume-map:
31 type: string
32 default: {}
33 description: >
34 YAML map of units to device names, e.g:
35 "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
36 Service units will raise a configure-error if volume-ephemeral
37 is 'true' and no volume-map value is set. Use 'juju set' to set a
38 value and 'juju resolved' to complete configuration.
39
40Usage::
41
42 from charmsupport.volumes import configure_volume, VolumeConfigurationError
43 from charmsupport.hookenv import log, ERROR
44 def post_mount_hook():
45 stop_service('myservice')
46 def post_mount_hook():
47 start_service('myservice')
48
49 if __name__ == '__main__':
50 try:
51 configure_volume(before_change=pre_mount_hook,
52 after_change=post_mount_hook)
53 except VolumeConfigurationError:
54 log('Storage could not be configured', ERROR)
55
56'''
57
58# XXX: Known limitations
59# - fstab is neither consulted nor updated
60
61import os
62from charmhelpers.core import hookenv
63from charmhelpers.core import host
64import yaml
65
66
67MOUNT_BASE = '/srv/juju/volumes'
68
69
70class VolumeConfigurationError(Exception):
71 '''Volume configuration data is missing or invalid'''
72 pass
73
74
75def get_config():
76 '''Gather and sanity-check volume configuration data'''
77 volume_config = {}
78 config = hookenv.config()
79
80 errors = False
81
82 if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
83 volume_config['ephemeral'] = True
84 else:
85 volume_config['ephemeral'] = False
86
87 try:
88 volume_map = yaml.safe_load(config.get('volume-map', '{}'))
89 except yaml.YAMLError as e:
90 hookenv.log("Error parsing YAML volume-map: {}".format(e),
91 hookenv.ERROR)
92 errors = True
93 if volume_map is None:
94 # probably an empty string
95 volume_map = {}
96 elif not isinstance(volume_map, dict):
97 hookenv.log("Volume-map should be a dictionary, not {}".format(
98 type(volume_map)))
99 errors = True
100
101 volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
102 if volume_config['device'] and volume_config['ephemeral']:
103 # asked for ephemeral storage but also defined a volume ID
104 hookenv.log('A volume is defined for this unit, but ephemeral '
105 'storage was requested', hookenv.ERROR)
106 errors = True
107 elif not volume_config['device'] and not volume_config['ephemeral']:
108 # asked for permanent storage but did not define volume ID
109 hookenv.log('Ephemeral storage was requested, but there is no volume '
110 'defined for this unit.', hookenv.ERROR)
111 errors = True
112
113 unit_mount_name = hookenv.local_unit().replace('/', '-')
114 volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
115
116 if errors:
117 return None
118 return volume_config
119
120
121def mount_volume(config):
122 if os.path.exists(config['mountpoint']):
123 if not os.path.isdir(config['mountpoint']):
124 hookenv.log('Not a directory: {}'.format(config['mountpoint']))
125 raise VolumeConfigurationError()
126 else:
127 host.mkdir(config['mountpoint'])
128 if os.path.ismount(config['mountpoint']):
129 unmount_volume(config)
130 if not host.mount(config['device'], config['mountpoint'], persist=True):
131 raise VolumeConfigurationError()
132
133
134def unmount_volume(config):
135 if os.path.ismount(config['mountpoint']):
136 if not host.umount(config['mountpoint'], persist=True):
137 raise VolumeConfigurationError()
138
139
140def managed_mounts():
141 '''List of all mounted managed volumes'''
142 return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
143
144
145def configure_volume(before_change=lambda: None, after_change=lambda: None):
146 '''Set up storage (or don't) according to the charm's volume configuration.
147 Returns the mount point or "ephemeral". before_change and after_change
148 are optional functions to be called if the volume configuration changes.
149 '''
150
151 config = get_config()
152 if not config:
153 hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
154 raise VolumeConfigurationError()
155
156 if config['ephemeral']:
157 if os.path.ismount(config['mountpoint']):
158 before_change()
159 unmount_volume(config)
160 after_change()
161 return 'ephemeral'
162 else:
163 # persistent storage
164 if os.path.ismount(config['mountpoint']):
165 mounts = dict(managed_mounts())
166 if mounts.get(config['mountpoint']) != config['device']:
167 before_change()
168 unmount_volume(config)
169 mount_volume(config)
170 after_change()
171 else:
172 before_change()
173 mount_volume(config)
174 after_change()
175 return config['mountpoint']
0176
=== added directory 'charmhelpers/contrib/hahelpers'
=== added file 'charmhelpers/contrib/hahelpers/__init__.py'
--- charmhelpers/contrib/hahelpers/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/hahelpers/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'charmhelpers/contrib/hahelpers/apache.py'
--- charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/hahelpers/apache.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,82 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# This file is sourced from lp:openstack-charm-helpers
21#
22# Authors:
23# James Page <james.page@ubuntu.com>
24# Adam Gandelman <adamg@ubuntu.com>
25#
26
27import subprocess
28
29from charmhelpers.core.hookenv import (
30 config as config_get,
31 relation_get,
32 relation_ids,
33 related_units as relation_list,
34 log,
35 INFO,
36)
37
38
39def get_cert(cn=None):
40 # TODO: deal with multiple https endpoints via charm config
41 cert = config_get('ssl_cert')
42 key = config_get('ssl_key')
43 if not (cert and key):
44 log("Inspecting identity-service relations for SSL certificate.",
45 level=INFO)
46 cert = key = None
47 if cn:
48 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
49 ssl_key_attr = 'ssl_key_{}'.format(cn)
50 else:
51 ssl_cert_attr = 'ssl_cert'
52 ssl_key_attr = 'ssl_key'
53 for r_id in relation_ids('identity-service'):
54 for unit in relation_list(r_id):
55 if not cert:
56 cert = relation_get(ssl_cert_attr,
57 rid=r_id, unit=unit)
58 if not key:
59 key = relation_get(ssl_key_attr,
60 rid=r_id, unit=unit)
61 return (cert, key)
62
63
64def get_ca_cert():
65 ca_cert = config_get('ssl_ca')
66 if ca_cert is None:
67 log("Inspecting identity-service relations for CA SSL certificate.",
68 level=INFO)
69 for r_id in relation_ids('identity-service'):
70 for unit in relation_list(r_id):
71 if ca_cert is None:
72 ca_cert = relation_get('ca_cert',
73 rid=r_id, unit=unit)
74 return ca_cert
75
76
77def install_ca_cert(ca_cert):
78 if ca_cert:
79 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
80 'w') as crt:
81 crt.write(ca_cert)
82 subprocess.check_call(['update-ca-certificates', '--fresh'])
083
=== added file 'charmhelpers/contrib/hahelpers/cluster.py'
--- charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/hahelpers/cluster.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,316 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# James Page <james.page@ubuntu.com>
22# Adam Gandelman <adamg@ubuntu.com>
23#
24
25"""
26Helpers for clustering and determining "cluster leadership" and other
27clustering-related helpers.
28"""
29
30import subprocess
31import os
32
33from socket import gethostname as get_unit_hostname
34
35import six
36
37from charmhelpers.core.hookenv import (
38 log,
39 relation_ids,
40 related_units as relation_list,
41 relation_get,
42 config as config_get,
43 INFO,
44 ERROR,
45 WARNING,
46 unit_get,
47 is_leader as juju_is_leader
48)
49from charmhelpers.core.decorators import (
50 retry_on_exception,
51)
52from charmhelpers.core.strutils import (
53 bool_from_string,
54)
55
56DC_RESOURCE_NAME = 'DC'
57
58
59class HAIncompleteConfig(Exception):
60 pass
61
62
63class CRMResourceNotFound(Exception):
64 pass
65
66
67class CRMDCNotFound(Exception):
68 pass
69
70
71def is_elected_leader(resource):
72 """
73 Returns True if the charm executing this is the elected cluster leader.
74
75 It relies on two mechanisms to determine leadership:
76 1. If juju is sufficiently new and leadership election is supported,
77 the is_leader command will be used.
78 2. If the charm is part of a corosync cluster, call corosync to
79 determine leadership.
80 3. If the charm is not part of a corosync cluster, the leader is
81 determined as being "the alive unit with the lowest unit numer". In
82 other words, the oldest surviving unit.
83 """
84 try:
85 return juju_is_leader()
86 except NotImplementedError:
87 log('Juju leadership election feature not enabled'
88 ', using fallback support',
89 level=WARNING)
90
91 if is_clustered():
92 if not is_crm_leader(resource):
93 log('Deferring action to CRM leader.', level=INFO)
94 return False
95 else:
96 peers = peer_units()
97 if peers and not oldest_peer(peers):
98 log('Deferring action to oldest service unit.', level=INFO)
99 return False
100 return True
101
102
103def is_clustered():
104 for r_id in (relation_ids('ha') or []):
105 for unit in (relation_list(r_id) or []):
106 clustered = relation_get('clustered',
107 rid=r_id,
108 unit=unit)
109 if clustered:
110 return True
111 return False
112
113
114def is_crm_dc():
115 """
116 Determine leadership by querying the pacemaker Designated Controller
117 """
118 cmd = ['crm', 'status']
119 try:
120 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
121 if not isinstance(status, six.text_type):
122 status = six.text_type(status, "utf-8")
123 except subprocess.CalledProcessError as ex:
124 raise CRMDCNotFound(str(ex))
125
126 current_dc = ''
127 for line in status.split('\n'):
128 if line.startswith('Current DC'):
129 # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
130 current_dc = line.split(':')[1].split()[0]
131 if current_dc == get_unit_hostname():
132 return True
133 elif current_dc == 'NONE':
134 raise CRMDCNotFound('Current DC: NONE')
135
136 return False
137
138
139@retry_on_exception(5, base_delay=2,
140 exc_type=(CRMResourceNotFound, CRMDCNotFound))
141def is_crm_leader(resource, retry=False):
142 """
143 Returns True if the charm calling this is the elected corosync leader,
144 as returned by calling the external "crm" command.
145
146 We allow this operation to be retried to avoid the possibility of getting a
147 false negative. See LP #1396246 for more info.
148 """
149 if resource == DC_RESOURCE_NAME:
150 return is_crm_dc()
151 cmd = ['crm', 'resource', 'show', resource]
152 try:
153 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
154 if not isinstance(status, six.text_type):
155 status = six.text_type(status, "utf-8")
156 except subprocess.CalledProcessError:
157 status = None
158
159 if status and get_unit_hostname() in status:
160 return True
161
162 if status and "resource %s is NOT running" % (resource) in status:
163 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
164
165 return False
166
167
168def is_leader(resource):
169 log("is_leader is deprecated. Please consider using is_crm_leader "
170 "instead.", level=WARNING)
171 return is_crm_leader(resource)
172
173
174def peer_units(peer_relation="cluster"):
175 peers = []
176 for r_id in (relation_ids(peer_relation) or []):
177 for unit in (relation_list(r_id) or []):
178 peers.append(unit)
179 return peers
180
181
182def peer_ips(peer_relation='cluster', addr_key='private-address'):
183 '''Return a dict of peers and their private-address'''
184 peers = {}
185 for r_id in relation_ids(peer_relation):
186 for unit in relation_list(r_id):
187 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
188 return peers
189
190
191def oldest_peer(peers):
192 """Determines who the oldest peer is by comparing unit numbers."""
193 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
194 for peer in peers:
195 remote_unit_no = int(peer.split('/')[1])
196 if remote_unit_no < local_unit_no:
197 return False
198 return True
199
200
201def eligible_leader(resource):
202 log("eligible_leader is deprecated. Please consider using "
203 "is_elected_leader instead.", level=WARNING)
204 return is_elected_leader(resource)
205
206
207def https():
208 '''
209 Determines whether enough data has been provided in configuration
210 or relation data to configure HTTPS
211 .
212 returns: boolean
213 '''
214 use_https = config_get('use-https')
215 if use_https and bool_from_string(use_https):
216 return True
217 if config_get('ssl_cert') and config_get('ssl_key'):
218 return True
219 for r_id in relation_ids('identity-service'):
220 for unit in relation_list(r_id):
221 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
222 rel_state = [
223 relation_get('https_keystone', rid=r_id, unit=unit),
224 relation_get('ca_cert', rid=r_id, unit=unit),
225 ]
226 # NOTE: works around (LP: #1203241)
227 if (None not in rel_state) and ('' not in rel_state):
228 return True
229 return False
230
231
232def determine_api_port(public_port, singlenode_mode=False):
233 '''
234 Determine correct API server listening port based on
235 existence of HTTPS reverse proxy and/or haproxy.
236
237 public_port: int: standard public port for given service
238
239 singlenode_mode: boolean: Shuffle ports when only a single unit is present
240
241 returns: int: the correct listening port for the API service
242 '''
243 i = 0
244 if singlenode_mode:
245 i += 1
246 elif len(peer_units()) > 0 or is_clustered():
247 i += 1
248 if https():
249 i += 1
250 return public_port - (i * 10)
251
252
253def determine_apache_port(public_port, singlenode_mode=False):
254 '''
255 Description: Determine correct apache listening port based on public IP +
256 state of the cluster.
257
258 public_port: int: standard public port for given service
259
260 singlenode_mode: boolean: Shuffle ports when only a single unit is present
261
262 returns: int: the correct listening port for the HAProxy service
263 '''
264 i = 0
265 if singlenode_mode:
266 i += 1
267 elif len(peer_units()) > 0 or is_clustered():
268 i += 1
269 return public_port - (i * 10)
270
271
272def get_hacluster_config(exclude_keys=None):
273 '''
274 Obtains all relevant configuration from charm configuration required
275 for initiating a relation to hacluster:
276
277 ha-bindiface, ha-mcastport, vip
278
279 param: exclude_keys: list of setting key(s) to be excluded.
280 returns: dict: A dict containing settings keyed by setting name.
281 raises: HAIncompleteConfig if settings are missing.
282 '''
283 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
284 conf = {}
285 for setting in settings:
286 if exclude_keys and setting in exclude_keys:
287 continue
288
289 conf[setting] = config_get(setting)
290 missing = []
291 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
292 if missing:
293 log('Insufficient config data to configure hacluster.', level=ERROR)
294 raise HAIncompleteConfig
295 return conf
296
297
298def canonical_url(configs, vip_setting='vip'):
299 '''
300 Returns the correct HTTP URL to this host given the state of HTTPS
301 configuration and hacluster.
302
303 :configs : OSTemplateRenderer: A config tempating object to inspect for
304 a complete https context.
305
306 :vip_setting: str: Setting in charm config that specifies
307 VIP address.
308 '''
309 scheme = 'http'
310 if 'https' in configs.complete_contexts():
311 scheme = 'https'
312 if is_clustered():
313 addr = config_get(vip_setting)
314 else:
315 addr = unit_get('private-address')
316 return '%s://%s' % (scheme, addr)
0317
=== added directory 'charmhelpers/contrib/network'
=== added file 'charmhelpers/contrib/network/__init__.py'
--- charmhelpers/contrib/network/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/network/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'charmhelpers/contrib/network/ip.py'
--- charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/network/ip.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,456 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import re
19import subprocess
20import six
21import socket
22
23from functools import partial
24
25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import (
28 log,
29 WARNING,
30)
31
32try:
33 import netifaces
34except ImportError:
35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
37 import netifaces
38
39try:
40 import netaddr
41except ImportError:
42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
44 import netaddr
45
46
47def _validate_cidr(network):
48 try:
49 netaddr.IPNetwork(network)
50 except (netaddr.core.AddrFormatError, ValueError):
51 raise ValueError("Network (%s) is not in CIDR presentation format" %
52 network)
53
54
55def no_ip_found_error_out(network):
56 errmsg = ("No IP address found in network: %s" % network)
57 raise ValueError(errmsg)
58
59
60def get_address_in_network(network, fallback=None, fatal=False):
61 """Get an IPv4 or IPv6 address within the network from the host.
62
63 :param network (str): CIDR presentation format. For example,
64 '192.168.1.0/24'.
65 :param fallback (str): If no address is found, return fallback.
66 :param fatal (boolean): If no address is found, fallback is not
67 set and fatal is True then exit(1).
68 """
69 if network is None:
70 if fallback is not None:
71 return fallback
72
73 if fatal:
74 no_ip_found_error_out(network)
75 else:
76 return None
77
78 _validate_cidr(network)
79 network = netaddr.IPNetwork(network)
80 for iface in netifaces.interfaces():
81 addresses = netifaces.ifaddresses(iface)
82 if network.version == 4 and netifaces.AF_INET in addresses:
83 addr = addresses[netifaces.AF_INET][0]['addr']
84 netmask = addresses[netifaces.AF_INET][0]['netmask']
85 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
86 if cidr in network:
87 return str(cidr.ip)
88
89 if network.version == 6 and netifaces.AF_INET6 in addresses:
90 for addr in addresses[netifaces.AF_INET6]:
91 if not addr['addr'].startswith('fe80'):
92 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
93 addr['netmask']))
94 if cidr in network:
95 return str(cidr.ip)
96
97 if fallback is not None:
98 return fallback
99
100 if fatal:
101 no_ip_found_error_out(network)
102
103 return None
104
105
106def is_ipv6(address):
107 """Determine whether provided address is IPv6 or not."""
108 try:
109 address = netaddr.IPAddress(address)
110 except netaddr.AddrFormatError:
111 # probably a hostname - so not an address at all!
112 return False
113
114 return address.version == 6
115
116
117def is_address_in_network(network, address):
118 """
119 Determine whether the provided address is within a network range.
120
121 :param network (str): CIDR presentation format. For example,
122 '192.168.1.0/24'.
123 :param address: An individual IPv4 or IPv6 address without a net
124 mask or subnet prefix. For example, '192.168.1.1'.
125 :returns boolean: Flag indicating whether address is in network.
126 """
127 try:
128 network = netaddr.IPNetwork(network)
129 except (netaddr.core.AddrFormatError, ValueError):
130 raise ValueError("Network (%s) is not in CIDR presentation format" %
131 network)
132
133 try:
134 address = netaddr.IPAddress(address)
135 except (netaddr.core.AddrFormatError, ValueError):
136 raise ValueError("Address (%s) is not in correct presentation format" %
137 address)
138
139 if address in network:
140 return True
141 else:
142 return False
143
144
145def _get_for_address(address, key):
146 """Retrieve an attribute of or the physical interface that
147 the IP address provided could be bound to.
148
149 :param address (str): An individual IPv4 or IPv6 address without a net
150 mask or subnet prefix. For example, '192.168.1.1'.
151 :param key: 'iface' for the physical interface name or an attribute
152 of the configured interface, for example 'netmask'.
153 :returns str: Requested attribute or None if address is not bindable.
154 """
155 address = netaddr.IPAddress(address)
156 for iface in netifaces.interfaces():
157 addresses = netifaces.ifaddresses(iface)
158 if address.version == 4 and netifaces.AF_INET in addresses:
159 addr = addresses[netifaces.AF_INET][0]['addr']
160 netmask = addresses[netifaces.AF_INET][0]['netmask']
161 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
162 cidr = network.cidr
163 if address in cidr:
164 if key == 'iface':
165 return iface
166 else:
167 return addresses[netifaces.AF_INET][0][key]
168
169 if address.version == 6 and netifaces.AF_INET6 in addresses:
170 for addr in addresses[netifaces.AF_INET6]:
171 if not addr['addr'].startswith('fe80'):
172 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
173 addr['netmask']))
174 cidr = network.cidr
175 if address in cidr:
176 if key == 'iface':
177 return iface
178 elif key == 'netmask' and cidr:
179 return str(cidr).split('/')[1]
180 else:
181 return addr[key]
182
183 return None
184
185
186get_iface_for_address = partial(_get_for_address, key='iface')
187
188
189get_netmask_for_address = partial(_get_for_address, key='netmask')
190
191
192def format_ipv6_addr(address):
193 """If address is IPv6, wrap it in '[]' otherwise return None.
194
195 This is required by most configuration files when specifying IPv6
196 addresses.
197 """
198 if is_ipv6(address):
199 return "[%s]" % address
200
201 return None
202
203
204def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
205 fatal=True, exc_list=None):
206 """Return the assigned IP address for a given interface, if any."""
207 # Extract nic if passed /dev/ethX
208 if '/' in iface:
209 iface = iface.split('/')[-1]
210
211 if not exc_list:
212 exc_list = []
213
214 try:
215 inet_num = getattr(netifaces, inet_type)
216 except AttributeError:
217 raise Exception("Unknown inet type '%s'" % str(inet_type))
218
219 interfaces = netifaces.interfaces()
220 if inc_aliases:
221 ifaces = []
222 for _iface in interfaces:
223 if iface == _iface or _iface.split(':')[0] == iface:
224 ifaces.append(_iface)
225
226 if fatal and not ifaces:
227 raise Exception("Invalid interface '%s'" % iface)
228
229 ifaces.sort()
230 else:
231 if iface not in interfaces:
232 if fatal:
233 raise Exception("Interface '%s' not found " % (iface))
234 else:
235 return []
236
237 else:
238 ifaces = [iface]
239
240 addresses = []
241 for netiface in ifaces:
242 net_info = netifaces.ifaddresses(netiface)
243 if inet_num in net_info:
244 for entry in net_info[inet_num]:
245 if 'addr' in entry and entry['addr'] not in exc_list:
246 addresses.append(entry['addr'])
247
248 if fatal and not addresses:
249 raise Exception("Interface '%s' doesn't have any %s addresses." %
250 (iface, inet_type))
251
252 return sorted(addresses)
253
254
255get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
256
257
258def get_iface_from_addr(addr):
259 """Work out on which interface the provided address is configured."""
260 for iface in netifaces.interfaces():
261 addresses = netifaces.ifaddresses(iface)
262 for inet_type in addresses:
263 for _addr in addresses[inet_type]:
264 _addr = _addr['addr']
265 # link local
266 ll_key = re.compile("(.+)%.*")
267 raw = re.match(ll_key, _addr)
268 if raw:
269 _addr = raw.group(1)
270
271 if _addr == addr:
272 log("Address '%s' is configured on iface '%s'" %
273 (addr, iface))
274 return iface
275
276 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
277 raise Exception(msg)
278
279
280def sniff_iface(f):
281 """Ensure decorated function is called with a value for iface.
282
283 If no iface provided, inject net iface inferred from unit private address.
284 """
285 def iface_sniffer(*args, **kwargs):
286 if not kwargs.get('iface', None):
287 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
288
289 return f(*args, **kwargs)
290
291 return iface_sniffer
292
293
294@sniff_iface
295def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
296 dynamic_only=True):
297 """Get assigned IPv6 address for a given interface.
298
299 Returns list of addresses found. If no address found, returns empty list.
300
301 If iface is None, we infer the current primary interface by doing a reverse
302 lookup on the unit private-address.
303
304 We currently only support scope global IPv6 addresses i.e. non-temporary
305 addresses. If no global IPv6 address is found, return the first one found
306 in the ipv6 address list.
307 """
308 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
309 inc_aliases=inc_aliases, fatal=fatal,
310 exc_list=exc_list)
311
312 if addresses:
313 global_addrs = []
314 for addr in addresses:
315 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
316 m = re.match(key_scope_link_local, addr)
317 if m:
318 eui_64_mac = m.group(1)
319 iface = m.group(2)
320 else:
321 global_addrs.append(addr)
322
323 if global_addrs:
324 # Make sure any found global addresses are not temporary
325 cmd = ['ip', 'addr', 'show', iface]
326 out = subprocess.check_output(cmd).decode('UTF-8')
327 if dynamic_only:
328 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
329 else:
330 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
331
332 addrs = []
333 for line in out.split('\n'):
334 line = line.strip()
335 m = re.match(key, line)
336 if m and 'temporary' not in line:
337 # Return the first valid address we find
338 for addr in global_addrs:
339 if m.group(1) == addr:
340 if not dynamic_only or \
341 m.group(1).endswith(eui_64_mac):
342 addrs.append(addr)
343
344 if addrs:
345 return addrs
346
347 if fatal:
348 raise Exception("Interface '%s' does not have a scope global "
349 "non-temporary ipv6 address." % iface)
350
351 return []
352
353
354def get_bridges(vnic_dir='/sys/devices/virtual/net'):
355 """Return a list of bridges on the system."""
356 b_regex = "%s/*/bridge" % vnic_dir
357 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
358
359
360def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
361 """Return a list of nics comprising a given bridge on the system."""
362 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
363 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
364
365
366def is_bridge_member(nic):
367 """Check if a given nic is a member of a bridge."""
368 for bridge in get_bridges():
369 if nic in get_bridge_nics(bridge):
370 return True
371
372 return False
373
374
375def is_ip(address):
376 """
377 Returns True if address is a valid IP address.
378 """
379 try:
380 # Test to see if already an IPv4 address
381 socket.inet_aton(address)
382 return True
383 except socket.error:
384 return False
385
386
387def ns_query(address):
388 try:
389 import dns.resolver
390 except ImportError:
391 apt_install('python-dnspython')
392 import dns.resolver
393
394 if isinstance(address, dns.name.Name):
395 rtype = 'PTR'
396 elif isinstance(address, six.string_types):
397 rtype = 'A'
398 else:
399 return None
400
401 answers = dns.resolver.query(address, rtype)
402 if answers:
403 return str(answers[0])
404 return None
405
406
407def get_host_ip(hostname, fallback=None):
408 """
409 Resolves the IP for a given hostname, or returns
410 the input if it is already an IP.
411 """
412 if is_ip(hostname):
413 return hostname
414
415 ip_addr = ns_query(hostname)
416 if not ip_addr:
417 try:
418 ip_addr = socket.gethostbyname(hostname)
419 except:
420 log("Failed to resolve hostname '%s'" % (hostname),
421 level=WARNING)
422 return fallback
423 return ip_addr
424
425
426def get_hostname(address, fqdn=True):
427 """
428 Resolves hostname for given IP, or returns the input
429 if it is already a hostname.
430 """
431 if is_ip(address):
432 try:
433 import dns.reversename
434 except ImportError:
435 apt_install("python-dnspython")
436 import dns.reversename
437
438 rev = dns.reversename.from_address(address)
439 result = ns_query(rev)
440
441 if not result:
442 try:
443 result = socket.gethostbyaddr(address)[0]
444 except:
445 return None
446 else:
447 result = address
448
449 if fqdn:
450 # strip trailing .
451 if result.endswith('.'):
452 return result[:-1]
453 else:
454 return result
455 else:
456 return result.split('.')[0]
0457
=== added directory 'charmhelpers/contrib/openstack'
=== added file 'charmhelpers/contrib/openstack/__init__.py'
--- charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'charmhelpers/contrib/openstack/alternatives.py'
--- charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/alternatives.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,33 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17''' Helper for managing alternatives for file conflict resolution '''
18
19import subprocess
20import shutil
21import os
22
23
24def install_alternative(name, target, source, priority=50):
25 ''' Install alternative configuration '''
26 if (os.path.exists(target) and not os.path.islink(target)):
27 # Move existing file/directory away before installing
28 shutil.move(target, '{}.bak'.format(target))
29 cmd = [
30 'update-alternatives', '--force', '--install',
31 target, name, source, str(priority)
32 ]
33 subprocess.check_call(cmd)
034
=== added directory 'charmhelpers/contrib/openstack/amulet'
=== added file 'charmhelpers/contrib/openstack/amulet/__init__.py'
--- charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/amulet/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,15 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
016
=== added file 'charmhelpers/contrib/openstack/amulet/deployment.py'
--- charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,197 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import six
18from collections import OrderedDict
19from charmhelpers.contrib.amulet.deployment import (
20 AmuletDeployment
21)
22
23
24class OpenStackAmuletDeployment(AmuletDeployment):
25 """OpenStack amulet deployment.
26
27 This class inherits from AmuletDeployment and has additional support
28 that is specifically for use by OpenStack charms.
29 """
30
31 def __init__(self, series=None, openstack=None, source=None, stable=True):
32 """Initialize the deployment environment."""
33 super(OpenStackAmuletDeployment, self).__init__(series)
34 self.openstack = openstack
35 self.source = source
36 self.stable = stable
37 # Note(coreycb): this needs to be changed when new next branches come
38 # out.
39 self.current_next = "trusty"
40
41 def _determine_branch_locations(self, other_services):
42 """Determine the branch locations for the other services.
43
44 Determine if the local branch being tested is derived from its
45 stable or next (dev) branch, and based on this, use the corresonding
46 stable or next branches for the other_services."""
47
48 # Charms outside the lp:~openstack-charmers namespace
49 base_charms = ['mysql', 'mongodb', 'nrpe']
50
51 # Force these charms to current series even when using an older series.
52 # ie. Use trusty/nrpe even when series is precise, as the P charm
53 # does not possess the necessary external master config and hooks.
54 force_series_current = ['nrpe']
55
56 if self.series in ['precise', 'trusty']:
57 base_series = self.series
58 else:
59 base_series = self.current_next
60
61 for svc in other_services:
62 if svc['name'] in force_series_current:
63 base_series = self.current_next
64 # If a location has been explicitly set, use it
65 if svc.get('location'):
66 continue
67 if self.stable:
68 temp = 'lp:charms/{}/{}'
69 svc['location'] = temp.format(base_series,
70 svc['name'])
71 else:
72 if svc['name'] in base_charms:
73 temp = 'lp:charms/{}/{}'
74 svc['location'] = temp.format(base_series,
75 svc['name'])
76 else:
77 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
78 svc['location'] = temp.format(self.current_next,
79 svc['name'])
80
81 return other_services
82
83 def _add_services(self, this_service, other_services):
84 """Add services to the deployment and set openstack-origin/source."""
85 other_services = self._determine_branch_locations(other_services)
86
87 super(OpenStackAmuletDeployment, self)._add_services(this_service,
88 other_services)
89
90 services = other_services
91 services.append(this_service)
92
93 # Charms which should use the source config option
94 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
95 'ceph-osd', 'ceph-radosgw']
96
97 # Charms which can not use openstack-origin, ie. many subordinates
98 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
99
100 if self.openstack:
101 for svc in services:
102 if svc['name'] not in use_source + no_origin:
103 config = {'openstack-origin': self.openstack}
104 self.d.configure(svc['name'], config)
105
106 if self.source:
107 for svc in services:
108 if svc['name'] in use_source and svc['name'] not in no_origin:
109 config = {'source': self.source}
110 self.d.configure(svc['name'], config)
111
112 def _configure_services(self, configs):
113 """Configure all of the services."""
114 for service, config in six.iteritems(configs):
115 self.d.configure(service, config)
116
117 def _get_openstack_release(self):
118 """Get openstack release.
119
120 Return an integer representing the enum value of the openstack
121 release.
122 """
123 # Must be ordered by OpenStack release (not by Ubuntu release):
124 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
125 self.precise_havana, self.precise_icehouse,
126 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
127 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
128 self.wily_liberty) = range(12)
129
130 releases = {
131 ('precise', None): self.precise_essex,
132 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
133 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
134 ('precise', 'cloud:precise-havana'): self.precise_havana,
135 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
136 ('trusty', None): self.trusty_icehouse,
137 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
138 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
139 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
140 ('utopic', None): self.utopic_juno,
141 ('vivid', None): self.vivid_kilo,
142 ('wily', None): self.wily_liberty}
143 return releases[(self.series, self.openstack)]
144
145 def _get_openstack_release_string(self):
146 """Get openstack release string.
147
148 Return a string representing the openstack release.
149 """
150 releases = OrderedDict([
151 ('precise', 'essex'),
152 ('quantal', 'folsom'),
153 ('raring', 'grizzly'),
154 ('saucy', 'havana'),
155 ('trusty', 'icehouse'),
156 ('utopic', 'juno'),
157 ('vivid', 'kilo'),
158 ('wily', 'liberty'),
159 ])
160 if self.openstack:
161 os_origin = self.openstack.split(':')[1]
162 return os_origin.split('%s-' % self.series)[1].split('/')[0]
163 else:
164 return releases[self.series]
165
166 def get_ceph_expected_pools(self, radosgw=False):
167 """Return a list of expected ceph pools in a ceph + cinder + glance
168 test scenario, based on OpenStack release and whether ceph radosgw
169 is flagged as present or not."""
170
171 if self._get_openstack_release() >= self.trusty_kilo:
172 # Kilo or later
173 pools = [
174 'rbd',
175 'cinder',
176 'glance'
177 ]
178 else:
179 # Juno or earlier
180 pools = [
181 'data',
182 'metadata',
183 'rbd',
184 'cinder',
185 'glance'
186 ]
187
188 if radosgw:
189 pools.extend([
190 '.rgw.root',
191 '.rgw.control',
192 '.rgw',
193 '.rgw.gc',
194 '.users.uid'
195 ])
196
197 return pools
0198
=== added file 'charmhelpers/contrib/openstack/amulet/utils.py'
--- charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/amulet/utils.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,963 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import amulet
18import json
19import logging
20import os
21import six
22import time
23import urllib
24
25import cinderclient.v1.client as cinder_client
26import glanceclient.v1.client as glance_client
27import heatclient.v1.client as heat_client
28import keystoneclient.v2_0 as keystone_client
29import novaclient.v1_1.client as nova_client
30import pika
31import swiftclient
32
33from charmhelpers.contrib.amulet.utils import (
34 AmuletUtils
35)
36
37DEBUG = logging.DEBUG
38ERROR = logging.ERROR
39
40
41class OpenStackAmuletUtils(AmuletUtils):
42 """OpenStack amulet utilities.
43
44 This class inherits from AmuletUtils and has additional support
45 that is specifically for use by OpenStack charm tests.
46 """
47
48 def __init__(self, log_level=ERROR):
49 """Initialize the deployment environment."""
50 super(OpenStackAmuletUtils, self).__init__(log_level)
51
52 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
53 public_port, expected):
54 """Validate endpoint data.
55
56 Validate actual endpoint data vs expected endpoint data. The ports
57 are used to find the matching endpoint.
58 """
59 self.log.debug('Validating endpoint data...')
60 self.log.debug('actual: {}'.format(repr(endpoints)))
61 found = False
62 for ep in endpoints:
63 self.log.debug('endpoint: {}'.format(repr(ep)))
64 if (admin_port in ep.adminurl and
65 internal_port in ep.internalurl and
66 public_port in ep.publicurl):
67 found = True
68 actual = {'id': ep.id,
69 'region': ep.region,
70 'adminurl': ep.adminurl,
71 'internalurl': ep.internalurl,
72 'publicurl': ep.publicurl,
73 'service_id': ep.service_id}
74 ret = self._validate_dict_data(expected, actual)
75 if ret:
76 return 'unexpected endpoint data - {}'.format(ret)
77
78 if not found:
79 return 'endpoint not found'
80
81 def validate_svc_catalog_endpoint_data(self, expected, actual):
82 """Validate service catalog endpoint data.
83
84 Validate a list of actual service catalog endpoints vs a list of
85 expected service catalog endpoints.
86 """
87 self.log.debug('Validating service catalog endpoint data...')
88 self.log.debug('actual: {}'.format(repr(actual)))
89 for k, v in six.iteritems(expected):
90 if k in actual:
91 ret = self._validate_dict_data(expected[k][0], actual[k][0])
92 if ret:
93 return self.endpoint_error(k, ret)
94 else:
95 return "endpoint {} does not exist".format(k)
96 return ret
97
98 def validate_tenant_data(self, expected, actual):
99 """Validate tenant data.
100
101 Validate a list of actual tenant data vs list of expected tenant
102 data.
103 """
104 self.log.debug('Validating tenant data...')
105 self.log.debug('actual: {}'.format(repr(actual)))
106 for e in expected:
107 found = False
108 for act in actual:
109 a = {'enabled': act.enabled, 'description': act.description,
110 'name': act.name, 'id': act.id}
111 if e['name'] == a['name']:
112 found = True
113 ret = self._validate_dict_data(e, a)
114 if ret:
115 return "unexpected tenant data - {}".format(ret)
116 if not found:
117 return "tenant {} does not exist".format(e['name'])
118 return ret
119
120 def validate_role_data(self, expected, actual):
121 """Validate role data.
122
123 Validate a list of actual role data vs a list of expected role
124 data.
125 """
126 self.log.debug('Validating role data...')
127 self.log.debug('actual: {}'.format(repr(actual)))
128 for e in expected:
129 found = False
130 for act in actual:
131 a = {'name': act.name, 'id': act.id}
132 if e['name'] == a['name']:
133 found = True
134 ret = self._validate_dict_data(e, a)
135 if ret:
136 return "unexpected role data - {}".format(ret)
137 if not found:
138 return "role {} does not exist".format(e['name'])
139 return ret
140
141 def validate_user_data(self, expected, actual):
142 """Validate user data.
143
144 Validate a list of actual user data vs a list of expected user
145 data.
146 """
147 self.log.debug('Validating user data...')
148 self.log.debug('actual: {}'.format(repr(actual)))
149 for e in expected:
150 found = False
151 for act in actual:
152 a = {'enabled': act.enabled, 'name': act.name,
153 'email': act.email, 'tenantId': act.tenantId,
154 'id': act.id}
155 if e['name'] == a['name']:
156 found = True
157 ret = self._validate_dict_data(e, a)
158 if ret:
159 return "unexpected user data - {}".format(ret)
160 if not found:
161 return "user {} does not exist".format(e['name'])
162 return ret
163
164 def validate_flavor_data(self, expected, actual):
165 """Validate flavor data.
166
167 Validate a list of actual flavors vs a list of expected flavors.
168 """
169 self.log.debug('Validating flavor data...')
170 self.log.debug('actual: {}'.format(repr(actual)))
171 act = [a.name for a in actual]
172 return self._validate_list_data(expected, act)
173
174 def tenant_exists(self, keystone, tenant):
175 """Return True if tenant exists."""
176 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
177 return tenant in [t.name for t in keystone.tenants.list()]
178
179 def authenticate_cinder_admin(self, keystone_sentry, username,
180 password, tenant):
181 """Authenticates admin user with cinder."""
182 # NOTE(beisner): cinder python client doesn't accept tokens.
183 service_ip = \
184 keystone_sentry.relation('shared-db',
185 'mysql:shared-db')['private-address']
186 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
187 return cinder_client.Client(username, password, tenant, ept)
188
189 def authenticate_keystone_admin(self, keystone_sentry, user, password,
190 tenant):
191 """Authenticates admin user with the keystone admin endpoint."""
192 self.log.debug('Authenticating keystone admin...')
193 unit = keystone_sentry
194 service_ip = unit.relation('shared-db',
195 'mysql:shared-db')['private-address']
196 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
197 return keystone_client.Client(username=user, password=password,
198 tenant_name=tenant, auth_url=ep)
199
200 def authenticate_keystone_user(self, keystone, user, password, tenant):
201 """Authenticates a regular user with the keystone public endpoint."""
202 self.log.debug('Authenticating keystone user ({})...'.format(user))
203 ep = keystone.service_catalog.url_for(service_type='identity',
204 endpoint_type='publicURL')
205 return keystone_client.Client(username=user, password=password,
206 tenant_name=tenant, auth_url=ep)
207
208 def authenticate_glance_admin(self, keystone):
209 """Authenticates admin user with glance."""
210 self.log.debug('Authenticating glance admin...')
211 ep = keystone.service_catalog.url_for(service_type='image',
212 endpoint_type='adminURL')
213 return glance_client.Client(ep, token=keystone.auth_token)
214
215 def authenticate_heat_admin(self, keystone):
216 """Authenticates the admin user with heat."""
217 self.log.debug('Authenticating heat admin...')
218 ep = keystone.service_catalog.url_for(service_type='orchestration',
219 endpoint_type='publicURL')
220 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
221
222 def authenticate_nova_user(self, keystone, user, password, tenant):
223 """Authenticates a regular user with nova-api."""
224 self.log.debug('Authenticating nova user ({})...'.format(user))
225 ep = keystone.service_catalog.url_for(service_type='identity',
226 endpoint_type='publicURL')
227 return nova_client.Client(username=user, api_key=password,
228 project_id=tenant, auth_url=ep)
229
230 def authenticate_swift_user(self, keystone, user, password, tenant):
231 """Authenticates a regular user with swift api."""
232 self.log.debug('Authenticating swift user ({})...'.format(user))
233 ep = keystone.service_catalog.url_for(service_type='identity',
234 endpoint_type='publicURL')
235 return swiftclient.Connection(authurl=ep,
236 user=user,
237 key=password,
238 tenant_name=tenant,
239 auth_version='2.0')
240
241 def create_cirros_image(self, glance, image_name):
242 """Download the latest cirros image and upload it to glance,
243 validate and return a resource pointer.
244
245 :param glance: pointer to authenticated glance connection
246 :param image_name: display name for new image
247 :returns: glance image pointer
248 """
249 self.log.debug('Creating glance cirros image '
250 '({})...'.format(image_name))
251
252 # Download cirros image
253 http_proxy = os.getenv('AMULET_HTTP_PROXY')
254 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
255 if http_proxy:
256 proxies = {'http': http_proxy}
257 opener = urllib.FancyURLopener(proxies)
258 else:
259 opener = urllib.FancyURLopener()
260
261 f = opener.open('http://download.cirros-cloud.net/version/released')
262 version = f.read().strip()
263 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
264 local_path = os.path.join('tests', cirros_img)
265
266 if not os.path.exists(local_path):
267 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
268 version, cirros_img)
269 opener.retrieve(cirros_url, local_path)
270 f.close()
271
272 # Create glance image
273 with open(local_path) as f:
274 image = glance.images.create(name=image_name, is_public=True,
275 disk_format='qcow2',
276 container_format='bare', data=f)
277
278 # Wait for image to reach active status
279 img_id = image.id
280 ret = self.resource_reaches_status(glance.images, img_id,
281 expected_stat='active',
282 msg='Image status wait')
283 if not ret:
284 msg = 'Glance image failed to reach expected state.'
285 amulet.raise_status(amulet.FAIL, msg=msg)
286
287 # Re-validate new image
288 self.log.debug('Validating image attributes...')
289 val_img_name = glance.images.get(img_id).name
290 val_img_stat = glance.images.get(img_id).status
291 val_img_pub = glance.images.get(img_id).is_public
292 val_img_cfmt = glance.images.get(img_id).container_format
293 val_img_dfmt = glance.images.get(img_id).disk_format
294 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
295 'container fmt:{} disk fmt:{}'.format(
296 val_img_name, val_img_pub, img_id,
297 val_img_stat, val_img_cfmt, val_img_dfmt))
298
299 if val_img_name == image_name and val_img_stat == 'active' \
300 and val_img_pub is True and val_img_cfmt == 'bare' \
301 and val_img_dfmt == 'qcow2':
302 self.log.debug(msg_attr)
303 else:
304 msg = ('Volume validation failed, {}'.format(msg_attr))
305 amulet.raise_status(amulet.FAIL, msg=msg)
306
307 return image
308
309 def delete_image(self, glance, image):
310 """Delete the specified image."""
311
312 # /!\ DEPRECATION WARNING
313 self.log.warn('/!\\ DEPRECATION WARNING: use '
314 'delete_resource instead of delete_image.')
315 self.log.debug('Deleting glance image ({})...'.format(image))
316 return self.delete_resource(glance.images, image, msg='glance image')
317
318 def create_instance(self, nova, image_name, instance_name, flavor):
319 """Create the specified instance."""
320 self.log.debug('Creating instance '
321 '({}|{}|{})'.format(instance_name, image_name, flavor))
322 image = nova.images.find(name=image_name)
323 flavor = nova.flavors.find(name=flavor)
324 instance = nova.servers.create(name=instance_name, image=image,
325 flavor=flavor)
326
327 count = 1
328 status = instance.status
329 while status != 'ACTIVE' and count < 60:
330 time.sleep(3)
331 instance = nova.servers.get(instance.id)
332 status = instance.status
333 self.log.debug('instance status: {}'.format(status))
334 count += 1
335
336 if status != 'ACTIVE':
337 self.log.error('instance creation timed out')
338 return None
339
340 return instance
341
342 def delete_instance(self, nova, instance):
343 """Delete the specified instance."""
344
345 # /!\ DEPRECATION WARNING
346 self.log.warn('/!\\ DEPRECATION WARNING: use '
347 'delete_resource instead of delete_instance.')
348 self.log.debug('Deleting instance ({})...'.format(instance))
349 return self.delete_resource(nova.servers, instance,
350 msg='nova instance')
351
352 def create_or_get_keypair(self, nova, keypair_name="testkey"):
353 """Create a new keypair, or return pointer if it already exists."""
354 try:
355 _keypair = nova.keypairs.get(keypair_name)
356 self.log.debug('Keypair ({}) already exists, '
357 'using it.'.format(keypair_name))
358 return _keypair
359 except:
360 self.log.debug('Keypair ({}) does not exist, '
361 'creating it.'.format(keypair_name))
362
363 _keypair = nova.keypairs.create(name=keypair_name)
364 return _keypair
365
366 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
367 img_id=None, src_vol_id=None, snap_id=None):
368 """Create cinder volume, optionally from a glance image, OR
369 optionally as a clone of an existing volume, OR optionally
370 from a snapshot. Wait for the new volume status to reach
371 the expected status, validate and return a resource pointer.
372
373 :param vol_name: cinder volume display name
374 :param vol_size: size in gigabytes
375 :param img_id: optional glance image id
376 :param src_vol_id: optional source volume id to clone
377 :param snap_id: optional snapshot id to use
378 :returns: cinder volume pointer
379 """
380 # Handle parameter input and avoid impossible combinations
381 if img_id and not src_vol_id and not snap_id:
382 # Create volume from image
383 self.log.debug('Creating cinder volume from glance image...')
384 bootable = 'true'
385 elif src_vol_id and not img_id and not snap_id:
386 # Clone an existing volume
387 self.log.debug('Cloning cinder volume...')
388 bootable = cinder.volumes.get(src_vol_id).bootable
389 elif snap_id and not src_vol_id and not img_id:
390 # Create volume from snapshot
391 self.log.debug('Creating cinder volume from snapshot...')
392 snap = cinder.volume_snapshots.find(id=snap_id)
393 vol_size = snap.size
394 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
395 bootable = cinder.volumes.get(snap_vol_id).bootable
396 elif not img_id and not src_vol_id and not snap_id:
397 # Create volume
398 self.log.debug('Creating cinder volume...')
399 bootable = 'false'
400 else:
401 # Impossible combination of parameters
402 msg = ('Invalid method use - name:{} size:{} img_id:{} '
403 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
404 img_id, src_vol_id,
405 snap_id))
406 amulet.raise_status(amulet.FAIL, msg=msg)
407
408 # Create new volume
409 try:
410 vol_new = cinder.volumes.create(display_name=vol_name,
411 imageRef=img_id,
412 size=vol_size,
413 source_volid=src_vol_id,
414 snapshot_id=snap_id)
415 vol_id = vol_new.id
416 except Exception as e:
417 msg = 'Failed to create volume: {}'.format(e)
418 amulet.raise_status(amulet.FAIL, msg=msg)
419
420 # Wait for volume to reach available status
421 ret = self.resource_reaches_status(cinder.volumes, vol_id,
422 expected_stat="available",
423 msg="Volume status wait")
424 if not ret:
425 msg = 'Cinder volume failed to reach expected state.'
426 amulet.raise_status(amulet.FAIL, msg=msg)
427
428 # Re-validate new volume
429 self.log.debug('Validating volume attributes...')
430 val_vol_name = cinder.volumes.get(vol_id).display_name
431 val_vol_boot = cinder.volumes.get(vol_id).bootable
432 val_vol_stat = cinder.volumes.get(vol_id).status
433 val_vol_size = cinder.volumes.get(vol_id).size
434 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
435 '{} size:{}'.format(val_vol_name, vol_id,
436 val_vol_stat, val_vol_boot,
437 val_vol_size))
438
439 if val_vol_boot == bootable and val_vol_stat == 'available' \
440 and val_vol_name == vol_name and val_vol_size == vol_size:
441 self.log.debug(msg_attr)
442 else:
443 msg = ('Volume validation failed, {}'.format(msg_attr))
444 amulet.raise_status(amulet.FAIL, msg=msg)
445
446 return vol_new
447
448 def delete_resource(self, resource, resource_id,
449 msg="resource", max_wait=120):
450 """Delete one openstack resource, such as one instance, keypair,
451 image, volume, stack, etc., and confirm deletion within max wait time.
452
453 :param resource: pointer to os resource type, ex:glance_client.images
454 :param resource_id: unique name or id for the openstack resource
455 :param msg: text to identify purpose in logging
456 :param max_wait: maximum wait time in seconds
457 :returns: True if successful, otherwise False
458 """
459 self.log.debug('Deleting OpenStack resource '
460 '{} ({})'.format(resource_id, msg))
461 num_before = len(list(resource.list()))
462 resource.delete(resource_id)
463
464 tries = 0
465 num_after = len(list(resource.list()))
466 while num_after != (num_before - 1) and tries < (max_wait / 4):
467 self.log.debug('{} delete check: '
468 '{} [{}:{}] {}'.format(msg, tries,
469 num_before,
470 num_after,
471 resource_id))
472 time.sleep(4)
473 num_after = len(list(resource.list()))
474 tries += 1
475
476 self.log.debug('{}: expected, actual count = {}, '
477 '{}'.format(msg, num_before - 1, num_after))
478
479 if num_after == (num_before - 1):
480 return True
481 else:
482 self.log.error('{} delete timed out'.format(msg))
483 return False
484
485 def resource_reaches_status(self, resource, resource_id,
486 expected_stat='available',
487 msg='resource', max_wait=120):
488 """Wait for an openstack resources status to reach an
489 expected status within a specified time. Useful to confirm that
490 nova instances, cinder vols, snapshots, glance images, heat stacks
491 and other resources eventually reach the expected status.
492
493 :param resource: pointer to os resource type, ex: heat_client.stacks
494 :param resource_id: unique id for the openstack resource
495 :param expected_stat: status to expect resource to reach
496 :param msg: text to identify purpose in logging
497 :param max_wait: maximum wait time in seconds
498 :returns: True if successful, False if status is not reached
499 """
500
501 tries = 0
502 resource_stat = resource.get(resource_id).status
503 while resource_stat != expected_stat and tries < (max_wait / 4):
504 self.log.debug('{} status check: '
505 '{} [{}:{}] {}'.format(msg, tries,
506 resource_stat,
507 expected_stat,
508 resource_id))
509 time.sleep(4)
510 resource_stat = resource.get(resource_id).status
511 tries += 1
512
513 self.log.debug('{}: expected, actual status = {}, '
514 '{}'.format(msg, resource_stat, expected_stat))
515
516 if resource_stat == expected_stat:
517 return True
518 else:
519 self.log.debug('{} never reached expected status: '
520 '{}'.format(resource_id, expected_stat))
521 return False
522
523 def get_ceph_osd_id_cmd(self, index):
524 """Produce a shell command that will return a ceph-osd id."""
525 return ("`initctl list | grep 'ceph-osd ' | "
526 "awk 'NR=={} {{ print $2 }}' | "
527 "grep -o '[0-9]*'`".format(index + 1))
528
529 def get_ceph_pools(self, sentry_unit):
530 """Return a dict of ceph pools from a single ceph unit, with
531 pool name as keys, pool id as vals."""
532 pools = {}
533 cmd = 'sudo ceph osd lspools'
534 output, code = sentry_unit.run(cmd)
535 if code != 0:
536 msg = ('{} `{}` returned {} '
537 '{}'.format(sentry_unit.info['unit_name'],
538 cmd, code, output))
539 amulet.raise_status(amulet.FAIL, msg=msg)
540
541 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
542 for pool in str(output).split(','):
543 pool_id_name = pool.split(' ')
544 if len(pool_id_name) == 2:
545 pool_id = pool_id_name[0]
546 pool_name = pool_id_name[1]
547 pools[pool_name] = int(pool_id)
548
549 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
550 pools))
551 return pools
552
553 def get_ceph_df(self, sentry_unit):
554 """Return dict of ceph df json output, including ceph pool state.
555
556 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
557 :returns: Dict of ceph df output
558 """
559 cmd = 'sudo ceph df --format=json'
560 output, code = sentry_unit.run(cmd)
561 if code != 0:
562 msg = ('{} `{}` returned {} '
563 '{}'.format(sentry_unit.info['unit_name'],
564 cmd, code, output))
565 amulet.raise_status(amulet.FAIL, msg=msg)
566 return json.loads(output)
567
568 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
569 """Take a sample of attributes of a ceph pool, returning ceph
570 pool name, object count and disk space used for the specified
571 pool ID number.
572
573 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
574 :param pool_id: Ceph pool ID
575 :returns: List of pool name, object count, kb disk space used
576 """
577 df = self.get_ceph_df(sentry_unit)
578 pool_name = df['pools'][pool_id]['name']
579 obj_count = df['pools'][pool_id]['stats']['objects']
580 kb_used = df['pools'][pool_id]['stats']['kb_used']
581 self.log.debug('Ceph {} pool (ID {}): {} objects, '
582 '{} kb used'.format(pool_name, pool_id,
583 obj_count, kb_used))
584 return pool_name, obj_count, kb_used
585
586 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
587 """Validate ceph pool samples taken over time, such as pool
588 object counts or pool kb used, before adding, after adding, and
589 after deleting items which affect those pool attributes. The
590 2nd element is expected to be greater than the 1st; 3rd is expected
591 to be less than the 2nd.
592
593 :param samples: List containing 3 data samples
594 :param sample_type: String for logging and usage context
595 :returns: None if successful, Failure message otherwise
596 """
597 original, created, deleted = range(3)
598 if samples[created] <= samples[original] or \
599 samples[deleted] >= samples[created]:
600 return ('Ceph {} samples ({}) '
601 'unexpected.'.format(sample_type, samples))
602 else:
603 self.log.debug('Ceph {} samples (OK): '
604 '{}'.format(sample_type, samples))
605 return None
606
607# rabbitmq/amqp specific helpers:
608 def add_rmq_test_user(self, sentry_units,
609 username="testuser1", password="changeme"):
610 """Add a test user via the first rmq juju unit, check connection as
611 the new user against all sentry units.
612
613 :param sentry_units: list of sentry unit pointers
614 :param username: amqp user name, default to testuser1
615 :param password: amqp user password
616 :returns: None if successful. Raise on error.
617 """
618 self.log.debug('Adding rmq user ({})...'.format(username))
619
620 # Check that user does not already exist
621 cmd_user_list = 'rabbitmqctl list_users'
622 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
623 if username in output:
624 self.log.warning('User ({}) already exists, returning '
625 'gracefully.'.format(username))
626 return
627
628 perms = '".*" ".*" ".*"'
629 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
630 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
631
632 # Add user via first unit
633 for cmd in cmds:
634 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
635
636 # Check connection against the other sentry_units
637 self.log.debug('Checking user connect against units...')
638 for sentry_unit in sentry_units:
639 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
640 username=username,
641 password=password)
642 connection.close()
643
644 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
645 """Delete a rabbitmq user via the first rmq juju unit.
646
647 :param sentry_units: list of sentry unit pointers
648 :param username: amqp user name, default to testuser1
649 :param password: amqp user password
650 :returns: None if successful or no such user.
651 """
652 self.log.debug('Deleting rmq user ({})...'.format(username))
653
654 # Check that the user exists
655 cmd_user_list = 'rabbitmqctl list_users'
656 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
657
658 if username not in output:
659 self.log.warning('User ({}) does not exist, returning '
660 'gracefully.'.format(username))
661 return
662
663 # Delete the user
664 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
665 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
666
667 def get_rmq_cluster_status(self, sentry_unit):
668 """Execute rabbitmq cluster status command on a unit and return
669 the full output.
670
671 :param unit: sentry unit
672 :returns: String containing console output of cluster status command
673 """
674 cmd = 'rabbitmqctl cluster_status'
675 output, _ = self.run_cmd_unit(sentry_unit, cmd)
676 self.log.debug('{} cluster_status:\n{}'.format(
677 sentry_unit.info['unit_name'], output))
678 return str(output)
679
680 def get_rmq_cluster_running_nodes(self, sentry_unit):
681 """Parse rabbitmqctl cluster_status output string, return list of
682 running rabbitmq cluster nodes.
683
684 :param unit: sentry unit
685 :returns: List containing node names of running nodes
686 """
687 # NOTE(beisner): rabbitmqctl cluster_status output is not
688 # json-parsable, do string chop foo, then json.loads that.
689 str_stat = self.get_rmq_cluster_status(sentry_unit)
690 if 'running_nodes' in str_stat:
691 pos_start = str_stat.find("{running_nodes,") + 15
692 pos_end = str_stat.find("]},", pos_start) + 1
693 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
694 run_nodes = json.loads(str_run_nodes)
695 return run_nodes
696 else:
697 return []
698
699 def validate_rmq_cluster_running_nodes(self, sentry_units):
700 """Check that all rmq unit hostnames are represented in the
701 cluster_status output of all units.
702
703 :param host_names: dict of juju unit names to host names
704 :param units: list of sentry unit pointers (all rmq units)
705 :returns: None if successful, otherwise return error message
706 """
707 host_names = self.get_unit_hostnames(sentry_units)
708 errors = []
709
710 # Query every unit for cluster_status running nodes
711 for query_unit in sentry_units:
712 query_unit_name = query_unit.info['unit_name']
713 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
714
715 # Confirm that every unit is represented in the queried unit's
716 # cluster_status running nodes output.
717 for validate_unit in sentry_units:
718 val_host_name = host_names[validate_unit.info['unit_name']]
719 val_node_name = 'rabbit@{}'.format(val_host_name)
720
721 if val_node_name not in running_nodes:
722 errors.append('Cluster member check failed on {}: {} not '
723 'in {}\n'.format(query_unit_name,
724 val_node_name,
725 running_nodes))
726 if errors:
727 return ''.join(errors)
728
729 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
730 """Check a single juju rmq unit for ssl and port in the config file."""
731 host = sentry_unit.info['public-address']
732 unit_name = sentry_unit.info['unit_name']
733
734 conf_file = '/etc/rabbitmq/rabbitmq.config'
735 conf_contents = str(self.file_contents_safe(sentry_unit,
736 conf_file, max_wait=16))
737 # Checks
738 conf_ssl = 'ssl' in conf_contents
739 conf_port = str(port) in conf_contents
740
741 # Port explicitly checked in config
742 if port and conf_port and conf_ssl:
743 self.log.debug('SSL is enabled @{}:{} '
744 '({})'.format(host, port, unit_name))
745 return True
746 elif port and not conf_port and conf_ssl:
747 self.log.debug('SSL is enabled @{} but not on port {} '
748 '({})'.format(host, port, unit_name))
749 return False
750 # Port not checked (useful when checking that ssl is disabled)
751 elif not port and conf_ssl:
752 self.log.debug('SSL is enabled @{}:{} '
753 '({})'.format(host, port, unit_name))
754 return True
755 elif not conf_ssl:
756 self.log.debug('SSL not enabled @{}:{} '
757 '({})'.format(host, port, unit_name))
758 return False
759 else:
760 msg = ('Unknown condition when checking SSL status @{}:{} '
761 '({})'.format(host, port, unit_name))
762 amulet.raise_status(amulet.FAIL, msg)
763
764 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
765 """Check that ssl is enabled on rmq juju sentry units.
766
767 :param sentry_units: list of all rmq sentry units
768 :param port: optional ssl port override to validate
769 :returns: None if successful, otherwise return error message
770 """
771 for sentry_unit in sentry_units:
772 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
773 return ('Unexpected condition: ssl is disabled on unit '
774 '({})'.format(sentry_unit.info['unit_name']))
775 return None
776
777 def validate_rmq_ssl_disabled_units(self, sentry_units):
778 """Check that ssl is enabled on listed rmq juju sentry units.
779
780 :param sentry_units: list of all rmq sentry units
781 :returns: True if successful. Raise on error.
782 """
783 for sentry_unit in sentry_units:
784 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
785 return ('Unexpected condition: ssl is enabled on unit '
786 '({})'.format(sentry_unit.info['unit_name']))
787 return None
788
789 def configure_rmq_ssl_on(self, sentry_units, deployment,
790 port=None, max_wait=60):
791 """Turn ssl charm config option on, with optional non-default
792 ssl port specification. Confirm that it is enabled on every
793 unit.
794
795 :param sentry_units: list of sentry units
796 :param deployment: amulet deployment object pointer
797 :param port: amqp port, use defaults if None
798 :param max_wait: maximum time to wait in seconds to confirm
799 :returns: None if successful. Raise on error.
800 """
801 self.log.debug('Setting ssl charm config option: on')
802
803 # Enable RMQ SSL
804 config = {'ssl': 'on'}
805 if port:
806 config['ssl_port'] = port
807
808 deployment.configure('rabbitmq-server', config)
809
810 # Confirm
811 tries = 0
812 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
813 while ret and tries < (max_wait / 4):
814 time.sleep(4)
815 self.log.debug('Attempt {}: {}'.format(tries, ret))
816 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
817 tries += 1
818
819 if ret:
820 amulet.raise_status(amulet.FAIL, ret)
821
822 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
823 """Turn ssl charm config option off, confirm that it is disabled
824 on every unit.
825
826 :param sentry_units: list of sentry units
827 :param deployment: amulet deployment object pointer
828 :param max_wait: maximum time to wait in seconds to confirm
829 :returns: None if successful. Raise on error.
830 """
831 self.log.debug('Setting ssl charm config option: off')
832
833 # Disable RMQ SSL
834 config = {'ssl': 'off'}
835 deployment.configure('rabbitmq-server', config)
836
837 # Confirm
838 tries = 0
839 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
840 while ret and tries < (max_wait / 4):
841 time.sleep(4)
842 self.log.debug('Attempt {}: {}'.format(tries, ret))
843 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
844 tries += 1
845
846 if ret:
847 amulet.raise_status(amulet.FAIL, ret)
848
849 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
850 port=None, fatal=True,
851 username="testuser1", password="changeme"):
852 """Establish and return a pika amqp connection to the rabbitmq service
853 running on a rmq juju unit.
854
855 :param sentry_unit: sentry unit pointer
856 :param ssl: boolean, default to False
857 :param port: amqp port, use defaults if None
858 :param fatal: boolean, default to True (raises on connect error)
859 :param username: amqp user name, default to testuser1
860 :param password: amqp user password
861 :returns: pika amqp connection pointer or None if failed and non-fatal
862 """
863 host = sentry_unit.info['public-address']
864 unit_name = sentry_unit.info['unit_name']
865
866 # Default port logic if port is not specified
867 if ssl and not port:
868 port = 5671
869 elif not ssl and not port:
870 port = 5672
871
872 self.log.debug('Connecting to amqp on {}:{} ({}) as '
873 '{}...'.format(host, port, unit_name, username))
874
875 try:
876 credentials = pika.PlainCredentials(username, password)
877 parameters = pika.ConnectionParameters(host=host, port=port,
878 credentials=credentials,
879 ssl=ssl,
880 connection_attempts=3,
881 retry_delay=5,
882 socket_timeout=1)
883 connection = pika.BlockingConnection(parameters)
884 assert connection.server_properties['product'] == 'RabbitMQ'
885 self.log.debug('Connect OK')
886 return connection
887 except Exception as e:
888 msg = ('amqp connection failed to {}:{} as '
889 '{} ({})'.format(host, port, username, str(e)))
890 if fatal:
891 amulet.raise_status(amulet.FAIL, msg)
892 else:
893 self.log.warn(msg)
894 return None
895
896 def publish_amqp_message_by_unit(self, sentry_unit, message,
897 queue="test", ssl=False,
898 username="testuser1",
899 password="changeme",
900 port=None):
901 """Publish an amqp message to a rmq juju unit.
902
903 :param sentry_unit: sentry unit pointer
904 :param message: amqp message string
905 :param queue: message queue, default to test
906 :param username: amqp user name, default to testuser1
907 :param password: amqp user password
908 :param ssl: boolean, default to False
909 :param port: amqp port, use defaults if None
910 :returns: None. Raises exception if publish failed.
911 """
912 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
913 message))
914 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
915 port=port,
916 username=username,
917 password=password)
918
919 # NOTE(beisner): extra debug here re: pika hang potential:
920 # https://github.com/pika/pika/issues/297
921 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
922 self.log.debug('Defining channel...')
923 channel = connection.channel()
924 self.log.debug('Declaring queue...')
925 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
926 self.log.debug('Publishing message...')
927 channel.basic_publish(exchange='', routing_key=queue, body=message)
928 self.log.debug('Closing channel...')
929 channel.close()
930 self.log.debug('Closing connection...')
931 connection.close()
932
933 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
934 username="testuser1",
935 password="changeme",
936 ssl=False, port=None):
937 """Get an amqp message from a rmq juju unit.
938
939 :param sentry_unit: sentry unit pointer
940 :param queue: message queue, default to test
941 :param username: amqp user name, default to testuser1
942 :param password: amqp user password
943 :param ssl: boolean, default to False
944 :param port: amqp port, use defaults if None
945 :returns: amqp message body as string. Raise if get fails.
946 """
947 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
948 port=port,
949 username=username,
950 password=password)
951 channel = connection.channel()
952 method_frame, _, body = channel.basic_get(queue)
953
954 if method_frame:
955 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
956 body))
957 channel.basic_ack(method_frame.delivery_tag)
958 channel.close()
959 connection.close()
960 return body
961 else:
962 msg = 'No message retrieved.'
963 amulet.raise_status(amulet.FAIL, msg)
0964
=== added file 'charmhelpers/contrib/openstack/context.py'
--- charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/context.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,1427 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import json
19import os
20import re
21import time
22from base64 import b64decode
23from subprocess import check_call
24
25import six
26import yaml
27
28from charmhelpers.fetch import (
29 apt_install,
30 filter_installed_packages,
31)
32from charmhelpers.core.hookenv import (
33 config,
34 is_relation_made,
35 local_unit,
36 log,
37 relation_get,
38 relation_ids,
39 related_units,
40 relation_set,
41 unit_get,
42 unit_private_ip,
43 charm_name,
44 DEBUG,
45 INFO,
46 WARNING,
47 ERROR,
48)
49
50from charmhelpers.core.sysctl import create as sysctl_create
51from charmhelpers.core.strutils import bool_from_string
52
53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
56 list_nics,
57 get_nic_hwaddr,
58 mkdir,
59 write_file,
60)
61from charmhelpers.contrib.hahelpers.cluster import (
62 determine_apache_port,
63 determine_api_port,
64 https,
65 is_clustered,
66)
67from charmhelpers.contrib.hahelpers.apache import (
68 get_cert,
69 get_ca_cert,
70 install_ca_cert,
71)
72from charmhelpers.contrib.openstack.neutron import (
73 neutron_plugin_attribute,
74 parse_data_port_mappings,
75)
76from charmhelpers.contrib.openstack.ip import (
77 resolve_address,
78 INTERNAL,
79)
80from charmhelpers.contrib.network.ip import (
81 get_address_in_network,
82 get_ipv4_addr,
83 get_ipv6_addr,
84 get_netmask_for_address,
85 format_ipv6_addr,
86 is_address_in_network,
87 is_bridge_member,
88)
89from charmhelpers.contrib.openstack.utils import get_host_ip
90CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
91ADDRESS_TYPES = ['admin', 'internal', 'public']
92
93
94class OSContextError(Exception):
95 pass
96
97
98def ensure_packages(packages):
99 """Install but do not upgrade required plugin packages."""
100 required = filter_installed_packages(packages)
101 if required:
102 apt_install(required, fatal=True)
103
104
105def context_complete(ctxt):
106 _missing = []
107 for k, v in six.iteritems(ctxt):
108 if v is None or v == '':
109 _missing.append(k)
110
111 if _missing:
112 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
113 return False
114
115 return True
116
117
118def config_flags_parser(config_flags):
119 """Parses config flags string into dict.
120
121 This parsing method supports a few different formats for the config
122 flag values to be parsed:
123
124 1. A string in the simple format of key=value pairs, with the possibility
125 of specifying multiple key value pairs within the same string. For
126 example, a string in the format of 'key1=value1, key2=value2' will
127 return a dict of:
128
129 {'key1': 'value1',
130 'key2': 'value2'}.
131
132 2. A string in the above format, but supporting a comma-delimited list
133 of values for the same key. For example, a string in the format of
134 'key1=value1, key2=value3,value4,value5' will return a dict of:
135
136 {'key1', 'value1',
137 'key2', 'value2,value3,value4'}
138
139 3. A string containing a colon character (:) prior to an equal
140 character (=) will be treated as yaml and parsed as such. This can be
141 used to specify more complex key value pairs. For example,
142 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
143 return a dict of:
144
145 {'key1', 'subkey1=value1, subkey2=value2'}
146
147 The provided config_flags string may be a list of comma-separated values
148 which themselves may be comma-separated list of values.
149 """
150 # If we find a colon before an equals sign then treat it as yaml.
151 # Note: limit it to finding the colon first since this indicates assignment
152 # for inline yaml.
153 colon = config_flags.find(':')
154 equals = config_flags.find('=')
155 if colon > 0:
156 if colon < equals or equals < 0:
157 return yaml.safe_load(config_flags)
158
159 if config_flags.find('==') >= 0:
160 log("config_flags is not in expected format (key=value)", level=ERROR)
161 raise OSContextError
162
163 # strip the following from each value.
164 post_strippers = ' ,'
165 # we strip any leading/trailing '=' or ' ' from the string then
166 # split on '='.
167 split = config_flags.strip(' =').split('=')
168 limit = len(split)
169 flags = {}
170 for i in range(0, limit - 1):
171 current = split[i]
172 next = split[i + 1]
173 vindex = next.rfind(',')
174 if (i == limit - 2) or (vindex < 0):
175 value = next
176 else:
177 value = next[:vindex]
178
179 if i == 0:
180 key = current
181 else:
182 # if this not the first entry, expect an embedded key.
183 index = current.rfind(',')
184 if index < 0:
185 log("Invalid config value(s) at index %s" % (i), level=ERROR)
186 raise OSContextError
187 key = current[index + 1:]
188
189 # Add to collection.
190 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
191
192 return flags
193
194
195class OSContextGenerator(object):
196 """Base class for all context generators."""
197 interfaces = []
198 related = False
199 complete = False
200 missing_data = []
201
202 def __call__(self):
203 raise NotImplementedError
204
205 def context_complete(self, ctxt):
206 """Check for missing data for the required context data.
207 Set self.missing_data if it exists and return False.
208 Set self.complete if no missing data and return True.
209 """
210 # Fresh start
211 self.complete = False
212 self.missing_data = []
213 for k, v in six.iteritems(ctxt):
214 if v is None or v == '':
215 if k not in self.missing_data:
216 self.missing_data.append(k)
217
218 if self.missing_data:
219 self.complete = False
220 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
221 else:
222 self.complete = True
223 return self.complete
224
225 def get_related(self):
226 """Check if any of the context interfaces have relation ids.
227 Set self.related and return True if one of the interfaces
228 has relation ids.
229 """
230 # Fresh start
231 self.related = False
232 try:
233 for interface in self.interfaces:
234 if relation_ids(interface):
235 self.related = True
236 return self.related
237 except AttributeError as e:
238 log("{} {}"
239 "".format(self, e), 'INFO')
240 return self.related
241
242
243class SharedDBContext(OSContextGenerator):
244 interfaces = ['shared-db']
245
246 def __init__(self,
247 database=None, user=None, relation_prefix=None, ssl_dir=None):
248 """Allows inspecting relation for settings prefixed with
249 relation_prefix. This is useful for parsing access for multiple
250 databases returned via the shared-db interface (eg, nova_password,
251 quantum_password)
252 """
253 self.relation_prefix = relation_prefix
254 self.database = database
255 self.user = user
256 self.ssl_dir = ssl_dir
257 self.rel_name = self.interfaces[0]
258
259 def __call__(self):
260 self.database = self.database or config('database')
261 self.user = self.user or config('database-user')
262 if None in [self.database, self.user]:
263 log("Could not generate shared_db context. Missing required charm "
264 "config options. (database name and user)", level=ERROR)
265 raise OSContextError
266
267 ctxt = {}
268
269 # NOTE(jamespage) if mysql charm provides a network upon which
270 # access to the database should be made, reconfigure relation
271 # with the service units local address and defer execution
272 access_network = relation_get('access-network')
273 if access_network is not None:
274 if self.relation_prefix is not None:
275 hostname_key = "{}_hostname".format(self.relation_prefix)
276 else:
277 hostname_key = "hostname"
278 access_hostname = get_address_in_network(access_network,
279 unit_get('private-address'))
280 set_hostname = relation_get(attribute=hostname_key,
281 unit=local_unit())
282 if set_hostname != access_hostname:
283 relation_set(relation_settings={hostname_key: access_hostname})
284 return None # Defer any further hook execution for now....
285
286 password_setting = 'password'
287 if self.relation_prefix:
288 password_setting = self.relation_prefix + '_password'
289
290 for rid in relation_ids(self.interfaces[0]):
291 self.related = True
292 for unit in related_units(rid):
293 rdata = relation_get(rid=rid, unit=unit)
294 host = rdata.get('db_host')
295 host = format_ipv6_addr(host) or host
296 ctxt = {
297 'database_host': host,
298 'database': self.database,
299 'database_user': self.user,
300 'database_password': rdata.get(password_setting),
301 'database_type': 'mysql'
302 }
303 if self.context_complete(ctxt):
304 db_ssl(rdata, ctxt, self.ssl_dir)
305 return ctxt
306 return {}
307
308
309class PostgresqlDBContext(OSContextGenerator):
310 interfaces = ['pgsql-db']
311
312 def __init__(self, database=None):
313 self.database = database
314
315 def __call__(self):
316 self.database = self.database or config('database')
317 if self.database is None:
318 log('Could not generate postgresql_db context. Missing required '
319 'charm config options. (database name)', level=ERROR)
320 raise OSContextError
321
322 ctxt = {}
323 for rid in relation_ids(self.interfaces[0]):
324 self.related = True
325 for unit in related_units(rid):
326 rel_host = relation_get('host', rid=rid, unit=unit)
327 rel_user = relation_get('user', rid=rid, unit=unit)
328 rel_passwd = relation_get('password', rid=rid, unit=unit)
329 ctxt = {'database_host': rel_host,
330 'database': self.database,
331 'database_user': rel_user,
332 'database_password': rel_passwd,
333 'database_type': 'postgresql'}
334 if self.context_complete(ctxt):
335 return ctxt
336
337 return {}
338
339
340def db_ssl(rdata, ctxt, ssl_dir):
341 if 'ssl_ca' in rdata and ssl_dir:
342 ca_path = os.path.join(ssl_dir, 'db-client.ca')
343 with open(ca_path, 'w') as fh:
344 fh.write(b64decode(rdata['ssl_ca']))
345
346 ctxt['database_ssl_ca'] = ca_path
347 elif 'ssl_ca' in rdata:
348 log("Charm not setup for ssl support but ssl ca found", level=INFO)
349 return ctxt
350
351 if 'ssl_cert' in rdata:
352 cert_path = os.path.join(
353 ssl_dir, 'db-client.cert')
354 if not os.path.exists(cert_path):
355 log("Waiting 1m for ssl client cert validity", level=INFO)
356 time.sleep(60)
357
358 with open(cert_path, 'w') as fh:
359 fh.write(b64decode(rdata['ssl_cert']))
360
361 ctxt['database_ssl_cert'] = cert_path
362 key_path = os.path.join(ssl_dir, 'db-client.key')
363 with open(key_path, 'w') as fh:
364 fh.write(b64decode(rdata['ssl_key']))
365
366 ctxt['database_ssl_key'] = key_path
367
368 return ctxt
369
370
371class IdentityServiceContext(OSContextGenerator):
372
373 def __init__(self, service=None, service_user=None, rel_name='identity-service'):
374 self.service = service
375 self.service_user = service_user
376 self.rel_name = rel_name
377 self.interfaces = [self.rel_name]
378
379 def __call__(self):
380 log('Generating template context for ' + self.rel_name, level=DEBUG)
381 ctxt = {}
382
383 if self.service and self.service_user:
384 # This is required for pki token signing if we don't want /tmp to
385 # be used.
386 cachedir = '/var/cache/%s' % (self.service)
387 if not os.path.isdir(cachedir):
388 log("Creating service cache dir %s" % (cachedir), level=DEBUG)
389 mkdir(path=cachedir, owner=self.service_user,
390 group=self.service_user, perms=0o700)
391
392 ctxt['signing_dir'] = cachedir
393
394 for rid in relation_ids(self.rel_name):
395 self.related = True
396 for unit in related_units(rid):
397 rdata = relation_get(rid=rid, unit=unit)
398 serv_host = rdata.get('service_host')
399 serv_host = format_ipv6_addr(serv_host) or serv_host
400 auth_host = rdata.get('auth_host')
401 auth_host = format_ipv6_addr(auth_host) or auth_host
402 svc_protocol = rdata.get('service_protocol') or 'http'
403 auth_protocol = rdata.get('auth_protocol') or 'http'
404 ctxt.update({'service_port': rdata.get('service_port'),
405 'service_host': serv_host,
406 'auth_host': auth_host,
407 'auth_port': rdata.get('auth_port'),
408 'admin_tenant_name': rdata.get('service_tenant'),
409 'admin_user': rdata.get('service_username'),
410 'admin_password': rdata.get('service_password'),
411 'service_protocol': svc_protocol,
412 'auth_protocol': auth_protocol})
413
414 if self.context_complete(ctxt):
415 # NOTE(jamespage) this is required for >= icehouse
416 # so a missing value just indicates keystone needs
417 # upgrading
418 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
419 return ctxt
420
421 return {}
422
423
424class AMQPContext(OSContextGenerator):
425
426 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
427 self.ssl_dir = ssl_dir
428 self.rel_name = rel_name
429 self.relation_prefix = relation_prefix
430 self.interfaces = [rel_name]
431
432 def __call__(self):
433 log('Generating template context for amqp', level=DEBUG)
434 conf = config()
435 if self.relation_prefix:
436 user_setting = '%s-rabbit-user' % (self.relation_prefix)
437 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
438 else:
439 user_setting = 'rabbit-user'
440 vhost_setting = 'rabbit-vhost'
441
442 try:
443 username = conf[user_setting]
444 vhost = conf[vhost_setting]
445 except KeyError as e:
446 log('Could not generate shared_db context. Missing required charm '
447 'config options: %s.' % e, level=ERROR)
448 raise OSContextError
449
450 ctxt = {}
451 for rid in relation_ids(self.rel_name):
452 ha_vip_only = False
453 self.related = True
454 for unit in related_units(rid):
455 if relation_get('clustered', rid=rid, unit=unit):
456 ctxt['clustered'] = True
457 vip = relation_get('vip', rid=rid, unit=unit)
458 vip = format_ipv6_addr(vip) or vip
459 ctxt['rabbitmq_host'] = vip
460 else:
461 host = relation_get('private-address', rid=rid, unit=unit)
462 host = format_ipv6_addr(host) or host
463 ctxt['rabbitmq_host'] = host
464
465 ctxt.update({
466 'rabbitmq_user': username,
467 'rabbitmq_password': relation_get('password', rid=rid,
468 unit=unit),
469 'rabbitmq_virtual_host': vhost,
470 })
471
472 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
473 if ssl_port:
474 ctxt['rabbit_ssl_port'] = ssl_port
475
476 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
477 if ssl_ca:
478 ctxt['rabbit_ssl_ca'] = ssl_ca
479
480 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
481 ctxt['rabbitmq_ha_queues'] = True
482
483 ha_vip_only = relation_get('ha-vip-only',
484 rid=rid, unit=unit) is not None
485
486 if self.context_complete(ctxt):
487 if 'rabbit_ssl_ca' in ctxt:
488 if not self.ssl_dir:
489 log("Charm not setup for ssl support but ssl ca "
490 "found", level=INFO)
491 break
492
493 ca_path = os.path.join(
494 self.ssl_dir, 'rabbit-client-ca.pem')
495 with open(ca_path, 'w') as fh:
496 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
497 ctxt['rabbit_ssl_ca'] = ca_path
498
499 # Sufficient information found = break out!
500 break
501
502 # Used for active/active rabbitmq >= grizzly
503 if (('clustered' not in ctxt or ha_vip_only) and
504 len(related_units(rid)) > 1):
505 rabbitmq_hosts = []
506 for unit in related_units(rid):
507 host = relation_get('private-address', rid=rid, unit=unit)
508 host = format_ipv6_addr(host) or host
509 rabbitmq_hosts.append(host)
510
511 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
512
513 oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
514 if oslo_messaging_flags:
515 ctxt['oslo_messaging_flags'] = config_flags_parser(
516 oslo_messaging_flags)
517
518 if not self.complete:
519 return {}
520
521 return ctxt
522
523
524class CephContext(OSContextGenerator):
525 """Generates context for /etc/ceph/ceph.conf templates."""
526 interfaces = ['ceph']
527
528 def __call__(self):
529 if not relation_ids('ceph'):
530 return {}
531
532 log('Generating template context for ceph', level=DEBUG)
533 mon_hosts = []
534 ctxt = {
535 'use_syslog': str(config('use-syslog')).lower()
536 }
537 for rid in relation_ids('ceph'):
538 for unit in related_units(rid):
539 if not ctxt.get('auth'):
540 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
541 if not ctxt.get('key'):
542 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
543 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
544 unit=unit)
545 unit_priv_addr = relation_get('private-address', rid=rid,
546 unit=unit)
547 ceph_addr = ceph_pub_addr or unit_priv_addr
548 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
549 mon_hosts.append(ceph_addr)
550
551 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
552
553 if not os.path.isdir('/etc/ceph'):
554 os.mkdir('/etc/ceph')
555
556 if not self.context_complete(ctxt):
557 return {}
558
559 ensure_packages(['ceph-common'])
560 return ctxt
561
562
563class HAProxyContext(OSContextGenerator):
564 """Provides half a context for the haproxy template, which describes
565 all peers to be included in the cluster. Each charm needs to include
566 its own context generator that describes the port mapping.
567 """
568 interfaces = ['cluster']
569
570 def __init__(self, singlenode_mode=False):
571 self.singlenode_mode = singlenode_mode
572
573 def __call__(self):
574 if not relation_ids('cluster') and not self.singlenode_mode:
575 return {}
576
577 if config('prefer-ipv6'):
578 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
579 else:
580 addr = get_host_ip(unit_get('private-address'))
581
582 l_unit = local_unit().replace('/', '-')
583 cluster_hosts = {}
584
585 # NOTE(jamespage): build out map of configured network endpoints
586 # and associated backends
587 for addr_type in ADDRESS_TYPES:
588 cfg_opt = 'os-{}-network'.format(addr_type)
589 laddr = get_address_in_network(config(cfg_opt))
590 if laddr:
591 netmask = get_netmask_for_address(laddr)
592 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
593 netmask),
594 'backends': {l_unit: laddr}}
595 for rid in relation_ids('cluster'):
596 for unit in related_units(rid):
597 _laddr = relation_get('{}-address'.format(addr_type),
598 rid=rid, unit=unit)
599 if _laddr:
600 _unit = unit.replace('/', '-')
601 cluster_hosts[laddr]['backends'][_unit] = _laddr
602
603 # NOTE(jamespage) add backend based on private address - this
604 # with either be the only backend or the fallback if no acls
605 # match in the frontend
606 cluster_hosts[addr] = {}
607 netmask = get_netmask_for_address(addr)
608 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
609 'backends': {l_unit: addr}}
610 for rid in relation_ids('cluster'):
611 for unit in related_units(rid):
612 _laddr = relation_get('private-address',
613 rid=rid, unit=unit)
614 if _laddr:
615 _unit = unit.replace('/', '-')
616 cluster_hosts[addr]['backends'][_unit] = _laddr
617
618 ctxt = {
619 'frontends': cluster_hosts,
620 'default_backend': addr
621 }
622
623 if config('haproxy-server-timeout'):
624 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
625
626 if config('haproxy-client-timeout'):
627 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
628
629 if config('prefer-ipv6'):
630 ctxt['ipv6'] = True
631 ctxt['local_host'] = 'ip6-localhost'
632 ctxt['haproxy_host'] = '::'
633 ctxt['stat_port'] = ':::8888'
634 else:
635 ctxt['local_host'] = '127.0.0.1'
636 ctxt['haproxy_host'] = '0.0.0.0'
637 ctxt['stat_port'] = ':8888'
638
639 for frontend in cluster_hosts:
640 if (len(cluster_hosts[frontend]['backends']) > 1 or
641 self.singlenode_mode):
642 # Enable haproxy when we have enough peers.
643 log('Ensuring haproxy enabled in /etc/default/haproxy.',
644 level=DEBUG)
645 with open('/etc/default/haproxy', 'w') as out:
646 out.write('ENABLED=1\n')
647
648 return ctxt
649
650 log('HAProxy context is incomplete, this unit has no peers.',
651 level=INFO)
652 return {}
653
654
655class ImageServiceContext(OSContextGenerator):
656 interfaces = ['image-service']
657
658 def __call__(self):
659 """Obtains the glance API server from the image-service relation.
660 Useful in nova and cinder (currently).
661 """
662 log('Generating template context for image-service.', level=DEBUG)
663 rids = relation_ids('image-service')
664 if not rids:
665 return {}
666
667 for rid in rids:
668 for unit in related_units(rid):
669 api_server = relation_get('glance-api-server',
670 rid=rid, unit=unit)
671 if api_server:
672 return {'glance_api_servers': api_server}
673
674 log("ImageService context is incomplete. Missing required relation "
675 "data.", level=INFO)
676 return {}
677
678
679class ApacheSSLContext(OSContextGenerator):
680 """Generates a context for an apache vhost configuration that configures
681 HTTPS reverse proxying for one or many endpoints. Generated context
682 looks something like::
683
684 {
685 'namespace': 'cinder',
686 'private_address': 'iscsi.mycinderhost.com',
687 'endpoints': [(8776, 8766), (8777, 8767)]
688 }
689
690 The endpoints list consists of a tuples mapping external ports
691 to internal ports.
692 """
693 interfaces = ['https']
694
695 # charms should inherit this context and set external ports
696 # and service namespace accordingly.
697 external_ports = []
698 service_namespace = None
699
700 def enable_modules(self):
701 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
702 check_call(cmd)
703
704 def configure_cert(self, cn=None):
705 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
706 mkdir(path=ssl_dir)
707 cert, key = get_cert(cn)
708 if cn:
709 cert_filename = 'cert_{}'.format(cn)
710 key_filename = 'key_{}'.format(cn)
711 else:
712 cert_filename = 'cert'
713 key_filename = 'key'
714
715 write_file(path=os.path.join(ssl_dir, cert_filename),
716 content=b64decode(cert))
717 write_file(path=os.path.join(ssl_dir, key_filename),
718 content=b64decode(key))
719
720 def configure_ca(self):
721 ca_cert = get_ca_cert()
722 if ca_cert:
723 install_ca_cert(b64decode(ca_cert))
724
725 def canonical_names(self):
726 """Figure out which canonical names clients will access this service.
727 """
728 cns = []
729 for r_id in relation_ids('identity-service'):
730 for unit in related_units(r_id):
731 rdata = relation_get(rid=r_id, unit=unit)
732 for k in rdata:
733 if k.startswith('ssl_key_'):
734 cns.append(k.lstrip('ssl_key_'))
735
736 return sorted(list(set(cns)))
737
738 def get_network_addresses(self):
739 """For each network configured, return corresponding address and vip
740 (if available).
741
742 Returns a list of tuples of the form:
743
744 [(address_in_net_a, vip_in_net_a),
745 (address_in_net_b, vip_in_net_b),
746 ...]
747
748 or, if no vip(s) available:
749
750 [(address_in_net_a, address_in_net_a),
751 (address_in_net_b, address_in_net_b),
752 ...]
753 """
754 addresses = []
755 if config('vip'):
756 vips = config('vip').split()
757 else:
758 vips = []
759
760 for net_type in ['os-internal-network', 'os-admin-network',
761 'os-public-network']:
762 addr = get_address_in_network(config(net_type),
763 unit_get('private-address'))
764 if len(vips) > 1 and is_clustered():
765 if not config(net_type):
766 log("Multiple networks configured but net_type "
767 "is None (%s)." % net_type, level=WARNING)
768 continue
769
770 for vip in vips:
771 if is_address_in_network(config(net_type), vip):
772 addresses.append((addr, vip))
773 break
774
775 elif is_clustered() and config('vip'):
776 addresses.append((addr, config('vip')))
777 else:
778 addresses.append((addr, addr))
779
780 return sorted(addresses)
781
782 def __call__(self):
783 if isinstance(self.external_ports, six.string_types):
784 self.external_ports = [self.external_ports]
785
786 if not self.external_ports or not https():
787 return {}
788
789 self.configure_ca()
790 self.enable_modules()
791
792 ctxt = {'namespace': self.service_namespace,
793 'endpoints': [],
794 'ext_ports': []}
795
796 cns = self.canonical_names()
797 if cns:
798 for cn in cns:
799 self.configure_cert(cn)
800 else:
801 # Expect cert/key provided in config (currently assumed that ca
802 # uses ip for cn)
803 cn = resolve_address(endpoint_type=INTERNAL)
804 self.configure_cert(cn)
805
806 addresses = self.get_network_addresses()
807 for address, endpoint in sorted(set(addresses)):
808 for api_port in self.external_ports:
809 ext_port = determine_apache_port(api_port,
810 singlenode_mode=True)
811 int_port = determine_api_port(api_port, singlenode_mode=True)
812 portmap = (address, endpoint, int(ext_port), int(int_port))
813 ctxt['endpoints'].append(portmap)
814 ctxt['ext_ports'].append(int(ext_port))
815
816 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
817 return ctxt
818
819
820class NeutronContext(OSContextGenerator):
821 interfaces = []
822
823 @property
824 def plugin(self):
825 return None
826
827 @property
828 def network_manager(self):
829 return None
830
831 @property
832 def packages(self):
833 return neutron_plugin_attribute(self.plugin, 'packages',
834 self.network_manager)
835
836 @property
837 def neutron_security_groups(self):
838 return None
839
840 def _ensure_packages(self):
841 for pkgs in self.packages:
842 ensure_packages(pkgs)
843
844 def _save_flag_file(self):
845 if self.network_manager == 'quantum':
846 _file = '/etc/nova/quantum_plugin.conf'
847 else:
848 _file = '/etc/nova/neutron_plugin.conf'
849
850 with open(_file, 'wb') as out:
851 out.write(self.plugin + '\n')
852
853 def ovs_ctxt(self):
854 driver = neutron_plugin_attribute(self.plugin, 'driver',
855 self.network_manager)
856 config = neutron_plugin_attribute(self.plugin, 'config',
857 self.network_manager)
858 ovs_ctxt = {'core_plugin': driver,
859 'neutron_plugin': 'ovs',
860 'neutron_security_groups': self.neutron_security_groups,
861 'local_ip': unit_private_ip(),
862 'config': config}
863
864 return ovs_ctxt
865
866 def nuage_ctxt(self):
867 driver = neutron_plugin_attribute(self.plugin, 'driver',
868 self.network_manager)
869 config = neutron_plugin_attribute(self.plugin, 'config',
870 self.network_manager)
871 nuage_ctxt = {'core_plugin': driver,
872 'neutron_plugin': 'vsp',
873 'neutron_security_groups': self.neutron_security_groups,
874 'local_ip': unit_private_ip(),
875 'config': config}
876
877 return nuage_ctxt
878
879 def nvp_ctxt(self):
880 driver = neutron_plugin_attribute(self.plugin, 'driver',
881 self.network_manager)
882 config = neutron_plugin_attribute(self.plugin, 'config',
883 self.network_manager)
884 nvp_ctxt = {'core_plugin': driver,
885 'neutron_plugin': 'nvp',
886 'neutron_security_groups': self.neutron_security_groups,
887 'local_ip': unit_private_ip(),
888 'config': config}
889
890 return nvp_ctxt
891
892 def n1kv_ctxt(self):
893 driver = neutron_plugin_attribute(self.plugin, 'driver',
894 self.network_manager)
895 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
896 self.network_manager)
897 n1kv_user_config_flags = config('n1kv-config-flags')
898 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
899 n1kv_ctxt = {'core_plugin': driver,
900 'neutron_plugin': 'n1kv',
901 'neutron_security_groups': self.neutron_security_groups,
902 'local_ip': unit_private_ip(),
903 'config': n1kv_config,
904 'vsm_ip': config('n1kv-vsm-ip'),
905 'vsm_username': config('n1kv-vsm-username'),
906 'vsm_password': config('n1kv-vsm-password'),
907 'restrict_policy_profiles': restrict_policy_profiles}
908
909 if n1kv_user_config_flags:
910 flags = config_flags_parser(n1kv_user_config_flags)
911 n1kv_ctxt['user_config_flags'] = flags
912
913 return n1kv_ctxt
914
915 def calico_ctxt(self):
916 driver = neutron_plugin_attribute(self.plugin, 'driver',
917 self.network_manager)
918 config = neutron_plugin_attribute(self.plugin, 'config',
919 self.network_manager)
920 calico_ctxt = {'core_plugin': driver,
921 'neutron_plugin': 'Calico',
922 'neutron_security_groups': self.neutron_security_groups,
923 'local_ip': unit_private_ip(),
924 'config': config}
925
926 return calico_ctxt
927
928 def neutron_ctxt(self):
929 if https():
930 proto = 'https'
931 else:
932 proto = 'http'
933
934 if is_clustered():
935 host = config('vip')
936 else:
937 host = unit_get('private-address')
938
939 ctxt = {'network_manager': self.network_manager,
940 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
941 return ctxt
942
943 def pg_ctxt(self):
944 driver = neutron_plugin_attribute(self.plugin, 'driver',
945 self.network_manager)
946 config = neutron_plugin_attribute(self.plugin, 'config',
947 self.network_manager)
948 ovs_ctxt = {'core_plugin': driver,
949 'neutron_plugin': 'plumgrid',
950 'neutron_security_groups': self.neutron_security_groups,
951 'local_ip': unit_private_ip(),
952 'config': config}
953 return ovs_ctxt
954
955 def __call__(self):
956 if self.network_manager not in ['quantum', 'neutron']:
957 return {}
958
959 if not self.plugin:
960 return {}
961
962 ctxt = self.neutron_ctxt()
963
964 if self.plugin == 'ovs':
965 ctxt.update(self.ovs_ctxt())
966 elif self.plugin in ['nvp', 'nsx']:
967 ctxt.update(self.nvp_ctxt())
968 elif self.plugin == 'n1kv':
969 ctxt.update(self.n1kv_ctxt())
970 elif self.plugin == 'Calico':
971 ctxt.update(self.calico_ctxt())
972 elif self.plugin == 'vsp':
973 ctxt.update(self.nuage_ctxt())
974 elif self.plugin == 'plumgrid':
975 ctxt.update(self.pg_ctxt())
976
977 alchemy_flags = config('neutron-alchemy-flags')
978 if alchemy_flags:
979 flags = config_flags_parser(alchemy_flags)
980 ctxt['neutron_alchemy_flags'] = flags
981
982 self._save_flag_file()
983 return ctxt
984
985
986class NeutronPortContext(OSContextGenerator):
987
988 def resolve_ports(self, ports):
989 """Resolve NICs not yet bound to bridge(s)
990
991 If hwaddress provided then returns resolved hwaddress otherwise NIC.
992 """
993 if not ports:
994 return None
995
996 hwaddr_to_nic = {}
997 hwaddr_to_ip = {}
998 for nic in list_nics():
999 # Ignore virtual interfaces (bond masters will be identified from
1000 # their slaves)
1001 if not is_phy_iface(nic):
1002 continue
1003
1004 _nic = get_bond_master(nic)
1005 if _nic:
1006 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1007 level=DEBUG)
1008 nic = _nic
1009
1010 hwaddr = get_nic_hwaddr(nic)
1011 hwaddr_to_nic[hwaddr] = nic
1012 addresses = get_ipv4_addr(nic, fatal=False)
1013 addresses += get_ipv6_addr(iface=nic, fatal=False)
1014 hwaddr_to_ip[hwaddr] = addresses
1015
1016 resolved = []
1017 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
1018 for entry in ports:
1019 if re.match(mac_regex, entry):
1020 # NIC is in known NICs and does NOT hace an IP address
1021 if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
1022 # If the nic is part of a bridge then don't use it
1023 if is_bridge_member(hwaddr_to_nic[entry]):
1024 continue
1025
1026 # Entry is a MAC address for a valid interface that doesn't
1027 # have an IP address assigned yet.
1028 resolved.append(hwaddr_to_nic[entry])
1029 else:
1030 # If the passed entry is not a MAC address, assume it's a valid
1031 # interface, and that the user put it there on purpose (we can
1032 # trust it to be the real external network).
1033 resolved.append(entry)
1034
1035 # Ensure no duplicates
1036 return list(set(resolved))
1037
1038
1039class OSConfigFlagContext(OSContextGenerator):
1040 """Provides support for user-defined config flags.
1041
1042 Users can define a comma-seperated list of key=value pairs
1043 in the charm configuration and apply them at any point in
1044 any file by using a template flag.
1045
1046 Sometimes users might want config flags inserted within a
1047 specific section so this class allows users to specify the
1048 template flag name, allowing for multiple template flags
1049 (sections) within the same context.
1050
1051 NOTE: the value of config-flags may be a comma-separated list of
1052 key=value pairs and some Openstack config files support
1053 comma-separated lists as values.
1054 """
1055
1056 def __init__(self, charm_flag='config-flags',
1057 template_flag='user_config_flags'):
1058 """
1059 :param charm_flag: config flags in charm configuration.
1060 :param template_flag: insert point for user-defined flags in template
1061 file.
1062 """
1063 super(OSConfigFlagContext, self).__init__()
1064 self._charm_flag = charm_flag
1065 self._template_flag = template_flag
1066
1067 def __call__(self):
1068 config_flags = config(self._charm_flag)
1069 if not config_flags:
1070 return {}
1071
1072 return {self._template_flag:
1073 config_flags_parser(config_flags)}
1074
1075
1076class SubordinateConfigContext(OSContextGenerator):
1077
1078 """
1079 Responsible for inspecting relations to subordinates that
1080 may be exporting required config via a json blob.
1081
1082 The subordinate interface allows subordinates to export their
1083 configuration requirements to the principle for multiple config
1084 files and multiple serivces. Ie, a subordinate that has interfaces
1085 to both glance and nova may export to following yaml blob as json::
1086
1087 glance:
1088 /etc/glance/glance-api.conf:
1089 sections:
1090 DEFAULT:
1091 - [key1, value1]
1092 /etc/glance/glance-registry.conf:
1093 MYSECTION:
1094 - [key2, value2]
1095 nova:
1096 /etc/nova/nova.conf:
1097 sections:
1098 DEFAULT:
1099 - [key3, value3]
1100
1101
1102 It is then up to the principle charms to subscribe this context to
1103 the service+config file it is interestd in. Configuration data will
1104 be available in the template context, in glance's case, as::
1105
1106 ctxt = {
1107 ... other context ...
1108 'subordinate_config': {
1109 'DEFAULT': {
1110 'key1': 'value1',
1111 },
1112 'MYSECTION': {
1113 'key2': 'value2',
1114 },
1115 }
1116 }
1117 """
1118
1119 def __init__(self, service, config_file, interface):
1120 """
1121 :param service : Service name key to query in any subordinate
1122 data found
1123 :param config_file : Service's config file to query sections
1124 :param interface : Subordinate interface to inspect
1125 """
1126 self.config_file = config_file
1127 if isinstance(service, list):
1128 self.services = service
1129 else:
1130 self.services = [service]
1131 if isinstance(interface, list):
1132 self.interfaces = interface
1133 else:
1134 self.interfaces = [interface]
1135
1136 def __call__(self):
1137 ctxt = {'sections': {}}
1138 rids = []
1139 for interface in self.interfaces:
1140 rids.extend(relation_ids(interface))
1141 for rid in rids:
1142 for unit in related_units(rid):
1143 sub_config = relation_get('subordinate_configuration',
1144 rid=rid, unit=unit)
1145 if sub_config and sub_config != '':
1146 try:
1147 sub_config = json.loads(sub_config)
1148 except:
1149 log('Could not parse JSON from subordinate_config '
1150 'setting from %s' % rid, level=ERROR)
1151 continue
1152
1153 for service in self.services:
1154 if service not in sub_config:
1155 log('Found subordinate_config on %s but it contained'
1156 'nothing for %s service' % (rid, service),
1157 level=INFO)
1158 continue
1159
1160 sub_config = sub_config[service]
1161 if self.config_file not in sub_config:
1162 log('Found subordinate_config on %s but it contained'
1163 'nothing for %s' % (rid, self.config_file),
1164 level=INFO)
1165 continue
1166
1167 sub_config = sub_config[self.config_file]
1168 for k, v in six.iteritems(sub_config):
1169 if k == 'sections':
1170 for section, config_list in six.iteritems(v):
1171 log("adding section '%s'" % (section),
1172 level=DEBUG)
1173 if ctxt[k].get(section):
1174 ctxt[k][section].extend(config_list)
1175 else:
1176 ctxt[k][section] = config_list
1177 else:
1178 ctxt[k] = v
1179 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1180 return ctxt
1181
1182
1183class LogLevelContext(OSContextGenerator):
1184
1185 def __call__(self):
1186 ctxt = {}
1187 ctxt['debug'] = \
1188 False if config('debug') is None else config('debug')
1189 ctxt['verbose'] = \
1190 False if config('verbose') is None else config('verbose')
1191
1192 return ctxt
1193
1194
1195class SyslogContext(OSContextGenerator):
1196
1197 def __call__(self):
1198 ctxt = {'use_syslog': config('use-syslog')}
1199 return ctxt
1200
1201
1202class BindHostContext(OSContextGenerator):
1203
1204 def __call__(self):
1205 if config('prefer-ipv6'):
1206 return {'bind_host': '::'}
1207 else:
1208 return {'bind_host': '0.0.0.0'}
1209
1210
1211class WorkerConfigContext(OSContextGenerator):
1212
1213 @property
1214 def num_cpus(self):
1215 try:
1216 from psutil import NUM_CPUS
1217 except ImportError:
1218 apt_install('python-psutil', fatal=True)
1219 from psutil import NUM_CPUS
1220
1221 return NUM_CPUS
1222
1223 def __call__(self):
1224 multiplier = config('worker-multiplier') or 0
1225 ctxt = {"workers": self.num_cpus * multiplier}
1226 return ctxt
1227
1228
1229class ZeroMQContext(OSContextGenerator):
1230 interfaces = ['zeromq-configuration']
1231
1232 def __call__(self):
1233 ctxt = {}
1234 if is_relation_made('zeromq-configuration', 'host'):
1235 for rid in relation_ids('zeromq-configuration'):
1236 for unit in related_units(rid):
1237 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1238 ctxt['zmq_host'] = relation_get('host', unit, rid)
1239 ctxt['zmq_redis_address'] = relation_get(
1240 'zmq_redis_address', unit, rid)
1241
1242 return ctxt
1243
1244
1245class NotificationDriverContext(OSContextGenerator):
1246
1247 def __init__(self, zmq_relation='zeromq-configuration',
1248 amqp_relation='amqp'):
1249 """
1250 :param zmq_relation: Name of Zeromq relation to check
1251 """
1252 self.zmq_relation = zmq_relation
1253 self.amqp_relation = amqp_relation
1254
1255 def __call__(self):
1256 ctxt = {'notifications': 'False'}
1257 if is_relation_made(self.amqp_relation):
1258 ctxt['notifications'] = "True"
1259
1260 return ctxt
1261
1262
1263class SysctlContext(OSContextGenerator):
1264 """This context check if the 'sysctl' option exists on configuration
1265 then creates a file with the loaded contents"""
1266 def __call__(self):
1267 sysctl_dict = config('sysctl')
1268 if sysctl_dict:
1269 sysctl_create(sysctl_dict,
1270 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1271 return {'sysctl': sysctl_dict}
1272
1273
1274class NeutronAPIContext(OSContextGenerator):
1275 '''
1276 Inspects current neutron-plugin-api relation for neutron settings. Return
1277 defaults if it is not present.
1278 '''
1279 interfaces = ['neutron-plugin-api']
1280
1281 def __call__(self):
1282 self.neutron_defaults = {
1283 'l2_population': {
1284 'rel_key': 'l2-population',
1285 'default': False,
1286 },
1287 'overlay_network_type': {
1288 'rel_key': 'overlay-network-type',
1289 'default': 'gre',
1290 },
1291 'neutron_security_groups': {
1292 'rel_key': 'neutron-security-groups',
1293 'default': False,
1294 },
1295 'network_device_mtu': {
1296 'rel_key': 'network-device-mtu',
1297 'default': None,
1298 },
1299 'enable_dvr': {
1300 'rel_key': 'enable-dvr',
1301 'default': False,
1302 },
1303 'enable_l3ha': {
1304 'rel_key': 'enable-l3ha',
1305 'default': False,
1306 },
1307 }
1308 ctxt = self.get_neutron_options({})
1309 for rid in relation_ids('neutron-plugin-api'):
1310 for unit in related_units(rid):
1311 rdata = relation_get(rid=rid, unit=unit)
1312 if 'l2-population' in rdata:
1313 ctxt.update(self.get_neutron_options(rdata))
1314
1315 return ctxt
1316
1317 def get_neutron_options(self, rdata):
1318 settings = {}
1319 for nkey in self.neutron_defaults.keys():
1320 defv = self.neutron_defaults[nkey]['default']
1321 rkey = self.neutron_defaults[nkey]['rel_key']
1322 if rkey in rdata.keys():
1323 if type(defv) is bool:
1324 settings[nkey] = bool_from_string(rdata[rkey])
1325 else:
1326 settings[nkey] = rdata[rkey]
1327 else:
1328 settings[nkey] = defv
1329 return settings
1330
1331
1332class ExternalPortContext(NeutronPortContext):
1333
1334 def __call__(self):
1335 ctxt = {}
1336 ports = config('ext-port')
1337 if ports:
1338 ports = [p.strip() for p in ports.split()]
1339 ports = self.resolve_ports(ports)
1340 if ports:
1341 ctxt = {"ext_port": ports[0]}
1342 napi_settings = NeutronAPIContext()()
1343 mtu = napi_settings.get('network_device_mtu')
1344 if mtu:
1345 ctxt['ext_port_mtu'] = mtu
1346
1347 return ctxt
1348
1349
1350class DataPortContext(NeutronPortContext):
1351
1352 def __call__(self):
1353 ports = config('data-port')
1354 if ports:
1355 # Map of {port/mac:bridge}
1356 portmap = parse_data_port_mappings(ports)
1357 ports = portmap.keys()
1358 # Resolve provided ports or mac addresses and filter out those
1359 # already attached to a bridge.
1360 resolved = self.resolve_ports(ports)
1361 # FIXME: is this necessary?
1362 normalized = {get_nic_hwaddr(port): port for port in resolved
1363 if port not in ports}
1364 normalized.update({port: port for port in resolved
1365 if port in ports})
1366 if resolved:
1367 return {bridge: normalized[port] for port, bridge in
1368 six.iteritems(portmap) if port in normalized.keys()}
1369
1370 return None
1371
1372
1373class PhyNICMTUContext(DataPortContext):
1374
1375 def __call__(self):
1376 ctxt = {}
1377 mappings = super(PhyNICMTUContext, self).__call__()
1378 if mappings and mappings.values():
1379 ports = mappings.values()
1380 napi_settings = NeutronAPIContext()()
1381 mtu = napi_settings.get('network_device_mtu')
1382 all_ports = set()
1383 # If any of ports is a vlan device, its underlying device must have
1384 # mtu applied first.
1385 for port in ports:
1386 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1387 lport = os.path.basename(lport)
1388 all_ports.add(lport.split('_')[1])
1389
1390 all_ports = list(all_ports)
1391 all_ports.extend(ports)
1392 if mtu:
1393 ctxt["devs"] = '\\n'.join(all_ports)
1394 ctxt['mtu'] = mtu
1395
1396 return ctxt
1397
1398
1399class NetworkServiceContext(OSContextGenerator):
1400
1401 def __init__(self, rel_name='quantum-network-service'):
1402 self.rel_name = rel_name
1403 self.interfaces = [rel_name]
1404
1405 def __call__(self):
1406 for rid in relation_ids(self.rel_name):
1407 for unit in related_units(rid):
1408 rdata = relation_get(rid=rid, unit=unit)
1409 ctxt = {
1410 'keystone_host': rdata.get('keystone_host'),
1411 'service_port': rdata.get('service_port'),
1412 'auth_port': rdata.get('auth_port'),
1413 'service_tenant': rdata.get('service_tenant'),
1414 'service_username': rdata.get('service_username'),
1415 'service_password': rdata.get('service_password'),
1416 'quantum_host': rdata.get('quantum_host'),
1417 'quantum_port': rdata.get('quantum_port'),
1418 'quantum_url': rdata.get('quantum_url'),
1419 'region': rdata.get('region'),
1420 'service_protocol':
1421 rdata.get('service_protocol') or 'http',
1422 'auth_protocol':
1423 rdata.get('auth_protocol') or 'http',
1424 }
1425 if self.context_complete(ctxt):
1426 return ctxt
1427 return {}
01428
=== added directory 'charmhelpers/contrib/openstack/files'
=== added file 'charmhelpers/contrib/openstack/files/__init__.py'
--- charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/files/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,18 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# dummy __init__.py to fool syncer into thinking this is a syncable python
18# module
019
=== added file 'charmhelpers/contrib/openstack/files/check_haproxy.sh'
--- charmhelpers/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-10-08 20:44:37 +0000
@@ -0,0 +1,32 @@
1#!/bin/bash
2#--------------------------------------------
3# This file is managed by Juju
4#--------------------------------------------
5#
6# Copyright 2009,2012 Canonical Ltd.
7# Author: Tom Haddon
8
9CRITICAL=0
10NOTACTIVE=''
11LOGFILE=/var/log/nagios/check_haproxy.log
12AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
13
14for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
15do
16 output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
17 if [ $? != 0 ]; then
18 date >> $LOGFILE
19 echo $output >> $LOGFILE
20 /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
21 CRITICAL=1
22 NOTACTIVE="${NOTACTIVE} $appserver"
23 fi
24done
25
26if [ $CRITICAL = 1 ]; then
27 echo "CRITICAL:${NOTACTIVE}"
28 exit 2
29fi
30
31echo "OK: All haproxy instances looking good"
32exit 0
033
=== added file 'charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh'
--- charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 2015-10-08 20:44:37 +0000
@@ -0,0 +1,30 @@
1#!/bin/bash
2#--------------------------------------------
3# This file is managed by Juju
4#--------------------------------------------
5#
6# Copyright 2009,2012 Canonical Ltd.
7# Author: Tom Haddon
8
9# These should be config options at some stage
10CURRQthrsh=0
11MAXQthrsh=100
12
13AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
14
15HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
16
17for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
18do
19 CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
20 MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
21
22 if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
23 echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
24 exit 2
25 fi
26done
27
28echo "OK: All haproxy queue depths looking good"
29exit 0
30
031
=== added file 'charmhelpers/contrib/openstack/ip.py'
--- charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/ip.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,151 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from charmhelpers.core.hookenv import (
18 config,
19 unit_get,
20 service_name,
21)
22from charmhelpers.contrib.network.ip import (
23 get_address_in_network,
24 is_address_in_network,
25 is_ipv6,
26 get_ipv6_addr,
27)
28from charmhelpers.contrib.hahelpers.cluster import is_clustered
29
30PUBLIC = 'public'
31INTERNAL = 'int'
32ADMIN = 'admin'
33
34ADDRESS_MAP = {
35 PUBLIC: {
36 'config': 'os-public-network',
37 'fallback': 'public-address',
38 'override': 'os-public-hostname',
39 },
40 INTERNAL: {
41 'config': 'os-internal-network',
42 'fallback': 'private-address',
43 'override': 'os-internal-hostname',
44 },
45 ADMIN: {
46 'config': 'os-admin-network',
47 'fallback': 'private-address',
48 'override': 'os-admin-hostname',
49 }
50}
51
52
53def canonical_url(configs, endpoint_type=PUBLIC):
54 """Returns the correct HTTP URL to this host given the state of HTTPS
55 configuration, hacluster and charm configuration.
56
57 :param configs: OSTemplateRenderer config templating object to inspect
58 for a complete https context.
59 :param endpoint_type: str endpoint type to resolve.
60 :param returns: str base URL for services on the current service unit.
61 """
62 scheme = _get_scheme(configs)
63
64 address = resolve_address(endpoint_type)
65 if is_ipv6(address):
66 address = "[{}]".format(address)
67
68 return '%s://%s' % (scheme, address)
69
70
71def _get_scheme(configs):
72 """Returns the scheme to use for the url (either http or https)
73 depending upon whether https is in the configs value.
74
75 :param configs: OSTemplateRenderer config templating object to inspect
76 for a complete https context.
77 :returns: either 'http' or 'https' depending on whether https is
78 configured within the configs context.
79 """
80 scheme = 'http'
81 if configs and 'https' in configs.complete_contexts():
82 scheme = 'https'
83 return scheme
84
85
86def _get_address_override(endpoint_type=PUBLIC):
87 """Returns any address overrides that the user has defined based on the
88 endpoint type.
89
90 Note: this function allows for the service name to be inserted into the
91 address if the user specifies {service_name}.somehost.org.
92
93 :param endpoint_type: the type of endpoint to retrieve the override
94 value for.
95 :returns: any endpoint address or hostname that the user has overridden
96 or None if an override is not present.
97 """
98 override_key = ADDRESS_MAP[endpoint_type]['override']
99 addr_override = config(override_key)
100 if not addr_override:
101 return None
102 else:
103 return addr_override.format(service_name=service_name())
104
105
106def resolve_address(endpoint_type=PUBLIC):
107 """Return unit address depending on net config.
108
109 If unit is clustered with vip(s) and has net splits defined, return vip on
110 correct network. If clustered with no nets defined, return primary vip.
111
112 If not clustered, return unit address ensuring address is on configured net
113 split if one is configured.
114
115 :param endpoint_type: Network endpoing type
116 """
117 resolved_address = _get_address_override(endpoint_type)
118 if resolved_address:
119 return resolved_address
120
121 vips = config('vip')
122 if vips:
123 vips = vips.split()
124
125 net_type = ADDRESS_MAP[endpoint_type]['config']
126 net_addr = config(net_type)
127 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
128 clustered = is_clustered()
129 if clustered:
130 if not net_addr:
131 # If no net-splits defined, we expect a single vip
132 resolved_address = vips[0]
133 else:
134 for vip in vips:
135 if is_address_in_network(net_addr, vip):
136 resolved_address = vip
137 break
138 else:
139 if config('prefer-ipv6'):
140 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
141 else:
142 fallback_addr = unit_get(net_fallback)
143
144 resolved_address = get_address_in_network(net_addr, fallback_addr)
145
146 if resolved_address is None:
147 raise ValueError("Unable to resolve a suitable IP address based on "
148 "charm state and configuration. (net_type=%s, "
149 "clustered=%s)" % (net_type, clustered))
150
151 return resolved_address
0152
=== added file 'charmhelpers/contrib/openstack/neutron.py'
--- charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/neutron.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,356 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Various utilies for dealing with Neutron and the renaming from Quantum.
18
19import six
20from subprocess import check_output
21
22from charmhelpers.core.hookenv import (
23 config,
24 log,
25 ERROR,
26)
27
28from charmhelpers.contrib.openstack.utils import os_release
29
30
31def headers_package():
32 """Ensures correct linux-headers for running kernel are installed,
33 for building DKMS package"""
34 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
35 return 'linux-headers-%s' % kver
36
37QUANTUM_CONF_DIR = '/etc/quantum'
38
39
40def kernel_version():
41 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
42 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
43 kver = kver.split('.')
44 return (int(kver[0]), int(kver[1]))
45
46
47def determine_dkms_package():
48 """ Determine which DKMS package should be used based on kernel version """
49 # NOTE: 3.13 kernels have support for GRE and VXLAN native
50 if kernel_version() >= (3, 13):
51 return []
52 else:
53 return ['openvswitch-datapath-dkms']
54
55
56# legacy
57
58
59def quantum_plugins():
60 from charmhelpers.contrib.openstack import context
61 return {
62 'ovs': {
63 'config': '/etc/quantum/plugins/openvswitch/'
64 'ovs_quantum_plugin.ini',
65 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
66 'OVSQuantumPluginV2',
67 'contexts': [
68 context.SharedDBContext(user=config('neutron-database-user'),
69 database=config('neutron-database'),
70 relation_prefix='neutron',
71 ssl_dir=QUANTUM_CONF_DIR)],
72 'services': ['quantum-plugin-openvswitch-agent'],
73 'packages': [[headers_package()] + determine_dkms_package(),
74 ['quantum-plugin-openvswitch-agent']],
75 'server_packages': ['quantum-server',
76 'quantum-plugin-openvswitch'],
77 'server_services': ['quantum-server']
78 },
79 'nvp': {
80 'config': '/etc/quantum/plugins/nicira/nvp.ini',
81 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
82 'QuantumPlugin.NvpPluginV2',
83 'contexts': [
84 context.SharedDBContext(user=config('neutron-database-user'),
85 database=config('neutron-database'),
86 relation_prefix='neutron',
87 ssl_dir=QUANTUM_CONF_DIR)],
88 'services': [],
89 'packages': [],
90 'server_packages': ['quantum-server',
91 'quantum-plugin-nicira'],
92 'server_services': ['quantum-server']
93 }
94 }
95
96NEUTRON_CONF_DIR = '/etc/neutron'
97
98
99def neutron_plugins():
100 from charmhelpers.contrib.openstack import context
101 release = os_release('nova-common')
102 plugins = {
103 'ovs': {
104 'config': '/etc/neutron/plugins/openvswitch/'
105 'ovs_neutron_plugin.ini',
106 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
107 'OVSNeutronPluginV2',
108 'contexts': [
109 context.SharedDBContext(user=config('neutron-database-user'),
110 database=config('neutron-database'),
111 relation_prefix='neutron',
112 ssl_dir=NEUTRON_CONF_DIR)],
113 'services': ['neutron-plugin-openvswitch-agent'],
114 'packages': [[headers_package()] + determine_dkms_package(),
115 ['neutron-plugin-openvswitch-agent']],
116 'server_packages': ['neutron-server',
117 'neutron-plugin-openvswitch'],
118 'server_services': ['neutron-server']
119 },
120 'nvp': {
121 'config': '/etc/neutron/plugins/nicira/nvp.ini',
122 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
123 'NeutronPlugin.NvpPluginV2',
124 'contexts': [
125 context.SharedDBContext(user=config('neutron-database-user'),
126 database=config('neutron-database'),
127 relation_prefix='neutron',
128 ssl_dir=NEUTRON_CONF_DIR)],
129 'services': [],
130 'packages': [],
131 'server_packages': ['neutron-server',
132 'neutron-plugin-nicira'],
133 'server_services': ['neutron-server']
134 },
135 'nsx': {
136 'config': '/etc/neutron/plugins/vmware/nsx.ini',
137 'driver': 'vmware',
138 'contexts': [
139 context.SharedDBContext(user=config('neutron-database-user'),
140 database=config('neutron-database'),
141 relation_prefix='neutron',
142 ssl_dir=NEUTRON_CONF_DIR)],
143 'services': [],
144 'packages': [],
145 'server_packages': ['neutron-server',
146 'neutron-plugin-vmware'],
147 'server_services': ['neutron-server']
148 },
149 'n1kv': {
150 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
151 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
152 'contexts': [
153 context.SharedDBContext(user=config('neutron-database-user'),
154 database=config('neutron-database'),
155 relation_prefix='neutron',
156 ssl_dir=NEUTRON_CONF_DIR)],
157 'services': [],
158 'packages': [[headers_package()] + determine_dkms_package(),
159 ['neutron-plugin-cisco']],
160 'server_packages': ['neutron-server',
161 'neutron-plugin-cisco'],
162 'server_services': ['neutron-server']
163 },
164 'Calico': {
165 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
166 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
167 'contexts': [
168 context.SharedDBContext(user=config('neutron-database-user'),
169 database=config('neutron-database'),
170 relation_prefix='neutron',
171 ssl_dir=NEUTRON_CONF_DIR)],
172 'services': ['calico-felix',
173 'bird',
174 'neutron-dhcp-agent',
175 'nova-api-metadata',
176 'etcd'],
177 'packages': [[headers_package()] + determine_dkms_package(),
178 ['calico-compute',
179 'bird',
180 'neutron-dhcp-agent',
181 'nova-api-metadata',
182 'etcd']],
183 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
184 'server_services': ['neutron-server', 'etcd']
185 },
186 'vsp': {
187 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
188 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
189 'contexts': [
190 context.SharedDBContext(user=config('neutron-database-user'),
191 database=config('neutron-database'),
192 relation_prefix='neutron',
193 ssl_dir=NEUTRON_CONF_DIR)],
194 'services': [],
195 'packages': [],
196 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
197 'server_services': ['neutron-server']
198 },
199 'plumgrid': {
200 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
201 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
202 'contexts': [
203 context.SharedDBContext(user=config('database-user'),
204 database=config('database'),
205 ssl_dir=NEUTRON_CONF_DIR)],
206 'services': [],
207 'packages': [['plumgrid-lxc'],
208 ['iovisor-dkms']],
209 'server_packages': ['neutron-server',
210 'neutron-plugin-plumgrid'],
211 'server_services': ['neutron-server']
212 }
213 }
214 if release >= 'icehouse':
215 # NOTE: patch in ml2 plugin for icehouse onwards
216 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
217 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
218 plugins['ovs']['server_packages'] = ['neutron-server',
219 'neutron-plugin-ml2']
220 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
221 plugins['nvp'] = plugins['nsx']
222 return plugins
223
224
225def neutron_plugin_attribute(plugin, attr, net_manager=None):
226 manager = net_manager or network_manager()
227 if manager == 'quantum':
228 plugins = quantum_plugins()
229 elif manager == 'neutron':
230 plugins = neutron_plugins()
231 else:
232 log("Network manager '%s' does not support plugins." % (manager),
233 level=ERROR)
234 raise Exception
235
236 try:
237 _plugin = plugins[plugin]
238 except KeyError:
239 log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
240 raise Exception
241
242 try:
243 return _plugin[attr]
244 except KeyError:
245 return None
246
247
248def network_manager():
249 '''
250 Deals with the renaming of Quantum to Neutron in H and any situations
251 that require compatability (eg, deploying H with network-manager=quantum,
252 upgrading from G).
253 '''
254 release = os_release('nova-common')
255 manager = config('network-manager').lower()
256
257 if manager not in ['quantum', 'neutron']:
258 return manager
259
260 if release in ['essex']:
261 # E does not support neutron
262 log('Neutron networking not supported in Essex.', level=ERROR)
263 raise Exception
264 elif release in ['folsom', 'grizzly']:
265 # neutron is named quantum in F and G
266 return 'quantum'
267 else:
268 # ensure accurate naming for all releases post-H
269 return 'neutron'
270
271
272def parse_mappings(mappings, key_rvalue=False):
273 """By default mappings are lvalue keyed.
274
275 If key_rvalue is True, the mapping will be reversed to allow multiple
276 configs for the same lvalue.
277 """
278 parsed = {}
279 if mappings:
280 mappings = mappings.split()
281 for m in mappings:
282 p = m.partition(':')
283
284 if key_rvalue:
285 key_index = 2
286 val_index = 0
287 # if there is no rvalue skip to next
288 if not p[1]:
289 continue
290 else:
291 key_index = 0
292 val_index = 2
293
294 key = p[key_index].strip()
295 parsed[key] = p[val_index].strip()
296
297 return parsed
298
299
300def parse_bridge_mappings(mappings):
301 """Parse bridge mappings.
302
303 Mappings must be a space-delimited list of provider:bridge mappings.
304
305 Returns dict of the form {provider:bridge}.
306 """
307 return parse_mappings(mappings)
308
309
310def parse_data_port_mappings(mappings, default_bridge='br-data'):
311 """Parse data port mappings.
312
313 Mappings must be a space-delimited list of port:bridge mappings.
314
315 Returns dict of the form {port:bridge} where port may be an mac address or
316 interface name.
317 """
318
319 # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
320 # proposed for <port> since it may be a mac address which will differ
321 # across units this allowing first-known-good to be chosen.
322 _mappings = parse_mappings(mappings, key_rvalue=True)
323 if not _mappings or list(_mappings.values()) == ['']:
324 if not mappings:
325 return {}
326
327 # For backwards-compatibility we need to support port-only provided in
328 # config.
329 _mappings = {mappings.split()[0]: default_bridge}
330
331 ports = _mappings.keys()
332 if len(set(ports)) != len(ports):
333 raise Exception("It is not allowed to have the same port configured "
334 "on more than one bridge")
335
336 return _mappings
337
338
339def parse_vlan_range_mappings(mappings):
340 """Parse vlan range mappings.
341
342 Mappings must be a space-delimited list of provider:start:end mappings.
343
344 The start:end range is optional and may be omitted.
345
346 Returns dict of the form {provider: (start, end)}.
347 """
348 _mappings = parse_mappings(mappings)
349 if not _mappings:
350 return {}
351
352 mappings = {}
353 for p, r in six.iteritems(_mappings):
354 mappings[p] = tuple(r.split(':'))
355
356 return mappings
0357
=== added directory 'charmhelpers/contrib/openstack/templates'
=== added file 'charmhelpers/contrib/openstack/templates/__init__.py'
--- charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/templates/__init__.py 2015-10-08 20:44:37 +0000
@@ -0,0 +1,18 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# dummy __init__.py to fool syncer into thinking this is a syncable python
18# module
019
=== added file 'charmhelpers/contrib/openstack/templates/ceph.conf'
--- charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/templates/ceph.conf 2015-10-08 20:44:37 +0000
@@ -0,0 +1,21 @@
1###############################################################################
2# [ WARNING ]
3# cinder configuration file maintained by Juju
4# local changes may be overwritten.
5###############################################################################
6[global]
7{% if auth -%}
8auth_supported = {{ auth }}
9keyring = /etc/ceph/$cluster.$name.keyring
10mon host = {{ mon_hosts }}
11{% endif -%}
12log to syslog = {{ use_syslog }}
13err to syslog = {{ use_syslog }}
14clog to syslog = {{ use_syslog }}
15
16[client]
17{% if rbd_client_cache_settings -%}
18{% for key, value in rbd_client_cache_settings.iteritems() -%}
19{{ key }} = {{ value }}
20{% endfor -%}
21{%- endif %}
0\ No newline at end of file22\ No newline at end of file
123
=== added file 'charmhelpers/contrib/openstack/templates/git.upstart'
--- charmhelpers/contrib/openstack/templates/git.upstart 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/templates/git.upstart 2015-10-08 20:44:37 +0000
@@ -0,0 +1,17 @@
1description "{{ service_description }}"
2author "Juju {{ service_name }} Charm <juju@localhost>"
3
4start on runlevel [2345]
5stop on runlevel [!2345]
6
7respawn
8
9exec start-stop-daemon --start --chuid {{ user_name }} \
10 --chdir {{ start_dir }} --name {{ process_name }} \
11 --exec {{ executable_name }} -- \
12 {% for config_file in config_files -%}
13 --config-file={{ config_file }} \
14 {% endfor -%}
15 {% if log_file -%}
16 --log-file={{ log_file }}
17 {% endif -%}
018
=== added file 'charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
+++ charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-10-08 20:44:37 +0000
@@ -0,0 +1,58 @@
1global
2 log {{ local_host }} local0
3 log {{ local_host }} local1 notice
4 maxconn 20000
5 user haproxy
6 group haproxy
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches