Merge lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356 into lp:~openstack-charmers/charms/trusty/glance-simplestreams-sync/next

Proposed by Felipe Reyes
Status: Merged
Merged at revision: 56
Proposed branch: lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356
Merge into: lp:~openstack-charmers/charms/trusty/glance-simplestreams-sync/next
Diff against target: 25126 lines (+11877/-12381)
152 files modified
.bzrignore (+4/-0)
.testr.conf (+8/-0)
Makefile (+21/-5)
charm-helpers-sync.yaml (+4/-1)
charmhelpers/__init__.py (+38/-0)
charmhelpers/contrib/__init__.py (+15/-0)
charmhelpers/contrib/charmsupport/__init__.py (+15/-0)
charmhelpers/contrib/charmsupport/nrpe.py (+360/-0)
charmhelpers/contrib/charmsupport/volumes.py (+175/-0)
charmhelpers/contrib/hahelpers/__init__.py (+15/-0)
charmhelpers/contrib/hahelpers/apache.py (+82/-0)
charmhelpers/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers/contrib/network/__init__.py (+15/-0)
charmhelpers/contrib/network/ip.py (+456/-0)
charmhelpers/contrib/openstack/__init__.py (+15/-0)
charmhelpers/contrib/openstack/alternatives.py (+33/-0)
charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0)
charmhelpers/contrib/openstack/amulet/deployment.py (+197/-0)
charmhelpers/contrib/openstack/amulet/utils.py (+963/-0)
charmhelpers/contrib/openstack/context.py (+1427/-0)
charmhelpers/contrib/openstack/files/__init__.py (+18/-0)
charmhelpers/contrib/openstack/files/check_haproxy.sh (+32/-0)
charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+30/-0)
charmhelpers/contrib/openstack/ip.py (+151/-0)
charmhelpers/contrib/openstack/neutron.py (+356/-0)
charmhelpers/contrib/openstack/templates/__init__.py (+18/-0)
charmhelpers/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers/contrib/openstack/templates/git.upstart (+17/-0)
charmhelpers/contrib/openstack/templates/haproxy.cfg (+58/-0)
charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0)
charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0)
charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+9/-0)
charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+22/-0)
charmhelpers/contrib/openstack/templates/section-zeromq (+14/-0)
charmhelpers/contrib/openstack/templating.py (+323/-0)
charmhelpers/contrib/openstack/utils.py (+977/-0)
charmhelpers/contrib/python/__init__.py (+15/-0)
charmhelpers/contrib/python/packages.py (+121/-0)
charmhelpers/contrib/storage/__init__.py (+15/-0)
charmhelpers/contrib/storage/linux/__init__.py (+15/-0)
charmhelpers/contrib/storage/linux/ceph.py (+657/-0)
charmhelpers/contrib/storage/linux/loopback.py (+78/-0)
charmhelpers/contrib/storage/linux/lvm.py (+105/-0)
charmhelpers/contrib/storage/linux/utils.py (+71/-0)
charmhelpers/core/__init__.py (+15/-0)
charmhelpers/core/decorators.py (+57/-0)
charmhelpers/core/files.py (+45/-0)
charmhelpers/core/fstab.py (+134/-0)
charmhelpers/core/hookenv.py (+930/-0)
charmhelpers/core/host.py (+586/-0)
charmhelpers/core/hugepage.py (+69/-0)
charmhelpers/core/kernel.py (+68/-0)
charmhelpers/core/services/__init__.py (+18/-0)
charmhelpers/core/services/base.py (+353/-0)
charmhelpers/core/services/helpers.py (+283/-0)
charmhelpers/core/strutils.py (+72/-0)
charmhelpers/core/sysctl.py (+56/-0)
charmhelpers/core/templating.py (+68/-0)
charmhelpers/core/unitdata.py (+521/-0)
charmhelpers/fetch/__init__.py (+456/-0)
charmhelpers/fetch/archiveurl.py (+167/-0)
charmhelpers/fetch/bzrurl.py (+78/-0)
charmhelpers/fetch/giturl.py (+73/-0)
charmhelpers/payload/__init__.py (+17/-0)
charmhelpers/payload/archive.py (+73/-0)
charmhelpers/payload/execd.py (+66/-0)
hooks/charmhelpers/__init__.py (+0/-38)
hooks/charmhelpers/cli/README.rst (+0/-57)
hooks/charmhelpers/cli/__init__.py (+0/-147)
hooks/charmhelpers/cli/commands.py (+0/-2)
hooks/charmhelpers/cli/host.py (+0/-15)
hooks/charmhelpers/contrib/__init__.py (+0/-15)
hooks/charmhelpers/contrib/ansible/__init__.py (+0/-165)
hooks/charmhelpers/contrib/charmhelpers/IMPORT (+0/-4)
hooks/charmhelpers/contrib/charmhelpers/__init__.py (+0/-184)
hooks/charmhelpers/contrib/charmsupport/__init__.py (+0/-15)
hooks/charmhelpers/contrib/charmsupport/nrpe.py (+0/-360)
hooks/charmhelpers/contrib/charmsupport/volumes.py (+0/-175)
hooks/charmhelpers/contrib/hahelpers/apache.py (+0/-59)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+0/-183)
hooks/charmhelpers/contrib/jujugui/IMPORT (+0/-4)
hooks/charmhelpers/contrib/jujugui/utils.py (+0/-602)
hooks/charmhelpers/contrib/network/__init__.py (+0/-15)
hooks/charmhelpers/contrib/network/ip.py (+0/-454)
hooks/charmhelpers/contrib/network/ovs/__init__.py (+0/-75)
hooks/charmhelpers/contrib/openstack/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/alternatives.py (+0/-33)
hooks/charmhelpers/contrib/openstack/amulet/__init__.py (+0/-15)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-183)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+0/-604)
hooks/charmhelpers/contrib/openstack/context.py (+0/-1372)
hooks/charmhelpers/contrib/openstack/files/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+0/-32)
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+0/-30)
hooks/charmhelpers/contrib/openstack/ip.py (+0/-151)
hooks/charmhelpers/contrib/openstack/neutron.py (+0/-356)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+0/-18)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+0/-15)
hooks/charmhelpers/contrib/openstack/templates/git.upstart (+0/-17)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+0/-58)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+0/-24)
hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+0/-9)
hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+0/-22)
hooks/charmhelpers/contrib/openstack/templates/section-zeromq (+0/-14)
hooks/charmhelpers/contrib/openstack/templating.py (+0/-295)
hooks/charmhelpers/contrib/openstack/utils.py (+0/-751)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+0/-83)
hooks/charmhelpers/contrib/python/__init__.py (+0/-15)
hooks/charmhelpers/contrib/python/packages.py (+0/-121)
hooks/charmhelpers/contrib/python/version.py (+0/-18)
hooks/charmhelpers/contrib/saltstack/__init__.py (+0/-102)
hooks/charmhelpers/contrib/ssl/__init__.py (+0/-78)
hooks/charmhelpers/contrib/ssl/service.py (+0/-267)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+0/-387)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+0/-62)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+0/-88)
hooks/charmhelpers/contrib/storage/linux/utils.py (+0/-49)
hooks/charmhelpers/contrib/templating/contexts.py (+0/-104)
hooks/charmhelpers/contrib/templating/pyformat.py (+0/-13)
hooks/charmhelpers/contrib/unison/__init__.py (+0/-257)
hooks/charmhelpers/core/__init__.py (+0/-15)
hooks/charmhelpers/core/decorators.py (+0/-57)
hooks/charmhelpers/core/files.py (+0/-45)
hooks/charmhelpers/core/fstab.py (+0/-134)
hooks/charmhelpers/core/hookenv.py (+0/-898)
hooks/charmhelpers/core/host.py (+0/-586)
hooks/charmhelpers/core/hugepage.py (+0/-62)
hooks/charmhelpers/core/kernel.py (+0/-68)
hooks/charmhelpers/core/services/__init__.py (+0/-18)
hooks/charmhelpers/core/services/base.py (+0/-353)
hooks/charmhelpers/core/services/helpers.py (+0/-283)
hooks/charmhelpers/core/strutils.py (+0/-42)
hooks/charmhelpers/core/sysctl.py (+0/-56)
hooks/charmhelpers/core/templating.py (+0/-68)
hooks/charmhelpers/core/unitdata.py (+0/-521)
hooks/charmhelpers/fetch/__init__.py (+0/-456)
hooks/charmhelpers/fetch/archiveurl.py (+0/-167)
hooks/charmhelpers/fetch/bzrurl.py (+0/-78)
hooks/charmhelpers/fetch/giturl.py (+0/-73)
hooks/charmhelpers/payload/__init__.py (+0/-1)
hooks/charmhelpers/payload/archive.py (+0/-57)
hooks/charmhelpers/payload/execd.py (+0/-50)
hooks/hooks.py (+23/-17)
metadata.yaml (+2/-1)
requirements/requirements-precise.txt (+8/-0)
requirements/requirements-trusty.txt (+8/-0)
requirements/test-requirements.txt (+6/-0)
setup.cfg (+6/-0)
tox.ini (+36/-0)
unit_tests/test_hooks.py (+105/-0)
unit_tests/test_utils.py (+133/-0)
To merge this branch: bzr merge lp:~freyes/charms/trusty/glance-simplestreams-sync/lp1434356
Reviewer Review Type Date Requested Status
Billy Olsen Approve
Review via email: mp+271384@code.launchpad.net

Description of the change

Dear OpenStack Charmers,

This MP refactors the config-changed hook handler to fix LP: #1434356 and unit tests.

Best Regards,

To post a comment you must log in.
Revision history for this message
Billy Olsen (billy-olsen) wrote :

Felipe, thanks for the submission! A few comments inline below, thanks!

review: Needs Fixing
Revision history for this message
Felipe Reyes (freyes) wrote :

Billy, thanks for taking the time to review it, I'll integrate tox.

Revision history for this message
Felipe Reyes (freyes) wrote :

Billy, I included tox support into this MP and it's ready for review.

Best,

67. By Felipe Reyes

Use ostestr when running 'make unit_tests'

This change keeps consistency between Makefile and tox

68. By Felipe Reyes

Adjust mock patches from 'charmhelpers' to 'hooks.charmhelpers'

69. By Felipe Reyes

Add sitepackages=True to tox.ini

Revision history for this message
Billy Olsen (billy-olsen) wrote :

LGTM. Approved - thanks Felipe!

review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.bzrignore'
2--- .bzrignore 2015-09-08 16:25:57 +0000
3+++ .bzrignore 2015-10-08 20:44:37 +0000
4@@ -1,1 +1,5 @@
5 bin
6+.coverage
7+.venv
8+.testrepository/
9+.tox/
10
11=== added file '.testr.conf'
12--- .testr.conf 1970-01-01 00:00:00 +0000
13+++ .testr.conf 2015-10-08 20:44:37 +0000
14@@ -0,0 +1,8 @@
15+[DEFAULT]
16+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
17+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
18+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
19+ ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
20+
21+test_id_option=--load-list $IDFILE
22+test_list_option=--list
23
24=== modified file 'Makefile'
25--- Makefile 2014-06-18 17:32:48 +0000
26+++ Makefile 2015-10-08 20:44:37 +0000
27@@ -1,13 +1,29 @@
28 #!/usr/bin/make
29 PYTHON := /usr/bin/env python
30-
31-lint:
32- @pyflakes hooks/*.py scripts/*.py
33+CHARM_DIR := $(PWD)
34+HOOKS_DIR := $(PWD)/hooks
35+TEST_PREFIX := PYTHONPATH=$(HOOKS_DIR)
36+
37+clean:
38+ rm -f .coverage
39+ find . -name '*.pyc' -delete
40+ rm -rf .venv
41+ (which dh_clean && dh_clean) || true
42+
43+.venv:
44+ dpkg -s gcc python-dev python-virtualenv python-apt > /dev/null || sudo apt-get install -y gcc python-dev python-virtualenv python-apt
45+ virtualenv .venv --system-site-packages
46+ .venv/bin/pip install -I \
47+ -r requirements/requirements-trusty.txt \
48+ -r requirements/test-requirements.txt
49+
50+lint: clean .venv
51+ .venv/bin/flake8 hooks/*.py scripts/*.py unit_tests/*.py
52 @charm proof
53
54-test:
55+unit_tests: clean .venv
56 @echo Starting tests...
57- @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage -v unit_tests
58+ env CHARM_DIR=$(CHARM_DIR) $(TEST_PREFIX) .venv/bin/ostestr
59
60
61 bin/charm_helpers_sync.py:
62
63=== modified file 'charm-helpers-sync.yaml'
64--- charm-helpers-sync.yaml 2015-09-08 16:39:40 +0000
65+++ charm-helpers-sync.yaml 2015-10-08 20:44:37 +0000
66@@ -1,9 +1,12 @@
67 branch: lp:charm-helpers
68-destination: hooks/charmhelpers
69+destination: charmhelpers
70 include:
71 - core
72 - fetch
73+ - payload
74 - contrib.openstack|inc=*
75 - contrib.charmsupport
76 - contrib.network.ip
77 - contrib.python.packages
78+ - contrib.hahelpers
79+ - contrib.storage
80
81=== added directory 'charmhelpers'
82=== added file 'charmhelpers/__init__.py'
83--- charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
84+++ charmhelpers/__init__.py 2015-10-08 20:44:37 +0000
85@@ -0,0 +1,38 @@
86+# Copyright 2014-2015 Canonical Limited.
87+#
88+# This file is part of charm-helpers.
89+#
90+# charm-helpers is free software: you can redistribute it and/or modify
91+# it under the terms of the GNU Lesser General Public License version 3 as
92+# published by the Free Software Foundation.
93+#
94+# charm-helpers is distributed in the hope that it will be useful,
95+# but WITHOUT ANY WARRANTY; without even the implied warranty of
96+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
97+# GNU Lesser General Public License for more details.
98+#
99+# You should have received a copy of the GNU Lesser General Public License
100+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
101+
102+# Bootstrap charm-helpers, installing its dependencies if necessary using
103+# only standard libraries.
104+import subprocess
105+import sys
106+
107+try:
108+ import six # flake8: noqa
109+except ImportError:
110+ if sys.version_info.major == 2:
111+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
112+ else:
113+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
114+ import six # flake8: noqa
115+
116+try:
117+ import yaml # flake8: noqa
118+except ImportError:
119+ if sys.version_info.major == 2:
120+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
121+ else:
122+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
123+ import yaml # flake8: noqa
124
125=== added directory 'charmhelpers/contrib'
126=== added file 'charmhelpers/contrib/__init__.py'
127--- charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000
128+++ charmhelpers/contrib/__init__.py 2015-10-08 20:44:37 +0000
129@@ -0,0 +1,15 @@
130+# Copyright 2014-2015 Canonical Limited.
131+#
132+# This file is part of charm-helpers.
133+#
134+# charm-helpers is free software: you can redistribute it and/or modify
135+# it under the terms of the GNU Lesser General Public License version 3 as
136+# published by the Free Software Foundation.
137+#
138+# charm-helpers is distributed in the hope that it will be useful,
139+# but WITHOUT ANY WARRANTY; without even the implied warranty of
140+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
141+# GNU Lesser General Public License for more details.
142+#
143+# You should have received a copy of the GNU Lesser General Public License
144+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
145
146=== added directory 'charmhelpers/contrib/charmsupport'
147=== added file 'charmhelpers/contrib/charmsupport/__init__.py'
148--- charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000
149+++ charmhelpers/contrib/charmsupport/__init__.py 2015-10-08 20:44:37 +0000
150@@ -0,0 +1,15 @@
151+# Copyright 2014-2015 Canonical Limited.
152+#
153+# This file is part of charm-helpers.
154+#
155+# charm-helpers is free software: you can redistribute it and/or modify
156+# it under the terms of the GNU Lesser General Public License version 3 as
157+# published by the Free Software Foundation.
158+#
159+# charm-helpers is distributed in the hope that it will be useful,
160+# but WITHOUT ANY WARRANTY; without even the implied warranty of
161+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
162+# GNU Lesser General Public License for more details.
163+#
164+# You should have received a copy of the GNU Lesser General Public License
165+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
166
167=== added file 'charmhelpers/contrib/charmsupport/nrpe.py'
168--- charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
169+++ charmhelpers/contrib/charmsupport/nrpe.py 2015-10-08 20:44:37 +0000
170@@ -0,0 +1,360 @@
171+# Copyright 2014-2015 Canonical Limited.
172+#
173+# This file is part of charm-helpers.
174+#
175+# charm-helpers is free software: you can redistribute it and/or modify
176+# it under the terms of the GNU Lesser General Public License version 3 as
177+# published by the Free Software Foundation.
178+#
179+# charm-helpers is distributed in the hope that it will be useful,
180+# but WITHOUT ANY WARRANTY; without even the implied warranty of
181+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
182+# GNU Lesser General Public License for more details.
183+#
184+# You should have received a copy of the GNU Lesser General Public License
185+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
186+
187+"""Compatibility with the nrpe-external-master charm"""
188+# Copyright 2012 Canonical Ltd.
189+#
190+# Authors:
191+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
192+
193+import subprocess
194+import pwd
195+import grp
196+import os
197+import glob
198+import shutil
199+import re
200+import shlex
201+import yaml
202+
203+from charmhelpers.core.hookenv import (
204+ config,
205+ local_unit,
206+ log,
207+ relation_ids,
208+ relation_set,
209+ relations_of_type,
210+)
211+
212+from charmhelpers.core.host import service
213+
214+# This module adds compatibility with the nrpe-external-master and plain nrpe
215+# subordinate charms. To use it in your charm:
216+#
217+# 1. Update metadata.yaml
218+#
219+# provides:
220+# (...)
221+# nrpe-external-master:
222+# interface: nrpe-external-master
223+# scope: container
224+#
225+# and/or
226+#
227+# provides:
228+# (...)
229+# local-monitors:
230+# interface: local-monitors
231+# scope: container
232+
233+#
234+# 2. Add the following to config.yaml
235+#
236+# nagios_context:
237+# default: "juju"
238+# type: string
239+# description: |
240+# Used by the nrpe subordinate charms.
241+# A string that will be prepended to instance name to set the host name
242+# in nagios. So for instance the hostname would be something like:
243+# juju-myservice-0
244+# If you're running multiple environments with the same services in them
245+# this allows you to differentiate between them.
246+# nagios_servicegroups:
247+# default: ""
248+# type: string
249+# description: |
250+# A comma-separated list of nagios servicegroups.
251+# If left empty, the nagios_context will be used as the servicegroup
252+#
253+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
254+#
255+# 4. Update your hooks.py with something like this:
256+#
257+# from charmsupport.nrpe import NRPE
258+# (...)
259+# def update_nrpe_config():
260+# nrpe_compat = NRPE()
261+# nrpe_compat.add_check(
262+# shortname = "myservice",
263+# description = "Check MyService",
264+# check_cmd = "check_http -w 2 -c 10 http://localhost"
265+# )
266+# nrpe_compat.add_check(
267+# "myservice_other",
268+# "Check for widget failures",
269+# check_cmd = "/srv/myapp/scripts/widget_check"
270+# )
271+# nrpe_compat.write()
272+#
273+# def config_changed():
274+# (...)
275+# update_nrpe_config()
276+#
277+# def nrpe_external_master_relation_changed():
278+# update_nrpe_config()
279+#
280+# def local_monitors_relation_changed():
281+# update_nrpe_config()
282+#
283+# 5. ln -s hooks.py nrpe-external-master-relation-changed
284+# ln -s hooks.py local-monitors-relation-changed
285+
286+
287+class CheckException(Exception):
288+ pass
289+
290+
291+class Check(object):
292+ shortname_re = '[A-Za-z0-9-_]+$'
293+ service_template = ("""
294+#---------------------------------------------------
295+# This file is Juju managed
296+#---------------------------------------------------
297+define service {{
298+ use active-service
299+ host_name {nagios_hostname}
300+ service_description {nagios_hostname}[{shortname}] """
301+ """{description}
302+ check_command check_nrpe!{command}
303+ servicegroups {nagios_servicegroup}
304+}}
305+""")
306+
307+ def __init__(self, shortname, description, check_cmd):
308+ super(Check, self).__init__()
309+ # XXX: could be better to calculate this from the service name
310+ if not re.match(self.shortname_re, shortname):
311+ raise CheckException("shortname must match {}".format(
312+ Check.shortname_re))
313+ self.shortname = shortname
314+ self.command = "check_{}".format(shortname)
315+ # Note: a set of invalid characters is defined by the
316+ # Nagios server config
317+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
318+ self.description = description
319+ self.check_cmd = self._locate_cmd(check_cmd)
320+
321+ def _locate_cmd(self, check_cmd):
322+ search_path = (
323+ '/usr/lib/nagios/plugins',
324+ '/usr/local/lib/nagios/plugins',
325+ )
326+ parts = shlex.split(check_cmd)
327+ for path in search_path:
328+ if os.path.exists(os.path.join(path, parts[0])):
329+ command = os.path.join(path, parts[0])
330+ if len(parts) > 1:
331+ command += " " + " ".join(parts[1:])
332+ return command
333+ log('Check command not found: {}'.format(parts[0]))
334+ return ''
335+
336+ def write(self, nagios_context, hostname, nagios_servicegroups):
337+ nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
338+ self.command)
339+ with open(nrpe_check_file, 'w') as nrpe_check_config:
340+ nrpe_check_config.write("# check {}\n".format(self.shortname))
341+ nrpe_check_config.write("command[{}]={}\n".format(
342+ self.command, self.check_cmd))
343+
344+ if not os.path.exists(NRPE.nagios_exportdir):
345+ log('Not writing service config as {} is not accessible'.format(
346+ NRPE.nagios_exportdir))
347+ else:
348+ self.write_service_config(nagios_context, hostname,
349+ nagios_servicegroups)
350+
351+ def write_service_config(self, nagios_context, hostname,
352+ nagios_servicegroups):
353+ for f in os.listdir(NRPE.nagios_exportdir):
354+ if re.search('.*{}.cfg'.format(self.command), f):
355+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
356+
357+ templ_vars = {
358+ 'nagios_hostname': hostname,
359+ 'nagios_servicegroup': nagios_servicegroups,
360+ 'description': self.description,
361+ 'shortname': self.shortname,
362+ 'command': self.command,
363+ }
364+ nrpe_service_text = Check.service_template.format(**templ_vars)
365+ nrpe_service_file = '{}/service__{}_{}.cfg'.format(
366+ NRPE.nagios_exportdir, hostname, self.command)
367+ with open(nrpe_service_file, 'w') as nrpe_service_config:
368+ nrpe_service_config.write(str(nrpe_service_text))
369+
370+ def run(self):
371+ subprocess.call(self.check_cmd)
372+
373+
374+class NRPE(object):
375+ nagios_logdir = '/var/log/nagios'
376+ nagios_exportdir = '/var/lib/nagios/export'
377+ nrpe_confdir = '/etc/nagios/nrpe.d'
378+
379+ def __init__(self, hostname=None):
380+ super(NRPE, self).__init__()
381+ self.config = config()
382+ self.nagios_context = self.config['nagios_context']
383+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
384+ self.nagios_servicegroups = self.config['nagios_servicegroups']
385+ else:
386+ self.nagios_servicegroups = self.nagios_context
387+ self.unit_name = local_unit().replace('/', '-')
388+ if hostname:
389+ self.hostname = hostname
390+ else:
391+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
392+ self.checks = []
393+
394+ def add_check(self, *args, **kwargs):
395+ self.checks.append(Check(*args, **kwargs))
396+
397+ def write(self):
398+ try:
399+ nagios_uid = pwd.getpwnam('nagios').pw_uid
400+ nagios_gid = grp.getgrnam('nagios').gr_gid
401+ except:
402+ log("Nagios user not set up, nrpe checks not updated")
403+ return
404+
405+ if not os.path.exists(NRPE.nagios_logdir):
406+ os.mkdir(NRPE.nagios_logdir)
407+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
408+
409+ nrpe_monitors = {}
410+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
411+ for nrpecheck in self.checks:
412+ nrpecheck.write(self.nagios_context, self.hostname,
413+ self.nagios_servicegroups)
414+ nrpe_monitors[nrpecheck.shortname] = {
415+ "command": nrpecheck.command,
416+ }
417+
418+ service('restart', 'nagios-nrpe-server')
419+
420+ monitor_ids = relation_ids("local-monitors") + \
421+ relation_ids("nrpe-external-master")
422+ for rid in monitor_ids:
423+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
424+
425+
426+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
427+ """
428+ Query relation with nrpe subordinate, return the nagios_host_context
429+
430+ :param str relation_name: Name of relation nrpe sub joined to
431+ """
432+ for rel in relations_of_type(relation_name):
433+ if 'nagios_hostname' in rel:
434+ return rel['nagios_host_context']
435+
436+
437+def get_nagios_hostname(relation_name='nrpe-external-master'):
438+ """
439+ Query relation with nrpe subordinate, return the nagios_hostname
440+
441+ :param str relation_name: Name of relation nrpe sub joined to
442+ """
443+ for rel in relations_of_type(relation_name):
444+ if 'nagios_hostname' in rel:
445+ return rel['nagios_hostname']
446+
447+
448+def get_nagios_unit_name(relation_name='nrpe-external-master'):
449+ """
450+ Return the nagios unit name prepended with host_context if needed
451+
452+ :param str relation_name: Name of relation nrpe sub joined to
453+ """
454+ host_context = get_nagios_hostcontext(relation_name)
455+ if host_context:
456+ unit = "%s:%s" % (host_context, local_unit())
457+ else:
458+ unit = local_unit()
459+ return unit
460+
461+
462+def add_init_service_checks(nrpe, services, unit_name):
463+ """
464+ Add checks for each service in list
465+
466+ :param NRPE nrpe: NRPE object to add check to
467+ :param list services: List of services to check
468+ :param str unit_name: Unit name to use in check description
469+ """
470+ for svc in services:
471+ upstart_init = '/etc/init/%s.conf' % svc
472+ sysv_init = '/etc/init.d/%s' % svc
473+ if os.path.exists(upstart_init):
474+ nrpe.add_check(
475+ shortname=svc,
476+ description='process check {%s}' % unit_name,
477+ check_cmd='check_upstart_job %s' % svc
478+ )
479+ elif os.path.exists(sysv_init):
480+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
481+ cron_file = ('*/5 * * * * root '
482+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
483+ '-s /etc/init.d/%s status > '
484+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
485+ svc)
486+ )
487+ f = open(cronpath, 'w')
488+ f.write(cron_file)
489+ f.close()
490+ nrpe.add_check(
491+ shortname=svc,
492+ description='process check {%s}' % unit_name,
493+ check_cmd='check_status_file.py -f '
494+ '/var/lib/nagios/service-check-%s.txt' % svc,
495+ )
496+
497+
498+def copy_nrpe_checks():
499+ """
500+ Copy the nrpe checks into place
501+
502+ """
503+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
504+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
505+ 'charmhelpers', 'contrib', 'openstack',
506+ 'files')
507+
508+ if not os.path.exists(NAGIOS_PLUGINS):
509+ os.makedirs(NAGIOS_PLUGINS)
510+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
511+ if os.path.isfile(fname):
512+ shutil.copy2(fname,
513+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
514+
515+
516+def add_haproxy_checks(nrpe, unit_name):
517+ """
518+ Add checks for each service in list
519+
520+ :param NRPE nrpe: NRPE object to add check to
521+ :param str unit_name: Unit name to use in check description
522+ """
523+ nrpe.add_check(
524+ shortname='haproxy_servers',
525+ description='Check HAProxy {%s}' % unit_name,
526+ check_cmd='check_haproxy.sh')
527+ nrpe.add_check(
528+ shortname='haproxy_queue',
529+ description='Check HAProxy queue depth {%s}' % unit_name,
530+ check_cmd='check_haproxy_queue_depth.sh')
531
532=== added file 'charmhelpers/contrib/charmsupport/volumes.py'
533--- charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000
534+++ charmhelpers/contrib/charmsupport/volumes.py 2015-10-08 20:44:37 +0000
535@@ -0,0 +1,175 @@
536+# Copyright 2014-2015 Canonical Limited.
537+#
538+# This file is part of charm-helpers.
539+#
540+# charm-helpers is free software: you can redistribute it and/or modify
541+# it under the terms of the GNU Lesser General Public License version 3 as
542+# published by the Free Software Foundation.
543+#
544+# charm-helpers is distributed in the hope that it will be useful,
545+# but WITHOUT ANY WARRANTY; without even the implied warranty of
546+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
547+# GNU Lesser General Public License for more details.
548+#
549+# You should have received a copy of the GNU Lesser General Public License
550+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
551+
552+'''
553+Functions for managing volumes in juju units. One volume is supported per unit.
554+Subordinates may have their own storage, provided it is on its own partition.
555+
556+Configuration stanzas::
557+
558+ volume-ephemeral:
559+ type: boolean
560+ default: true
561+ description: >
562+ If false, a volume is mounted as sepecified in "volume-map"
563+ If true, ephemeral storage will be used, meaning that log data
564+ will only exist as long as the machine. YOU HAVE BEEN WARNED.
565+ volume-map:
566+ type: string
567+ default: {}
568+ description: >
569+ YAML map of units to device names, e.g:
570+ "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
571+ Service units will raise a configure-error if volume-ephemeral
572+ is 'true' and no volume-map value is set. Use 'juju set' to set a
573+ value and 'juju resolved' to complete configuration.
574+
575+Usage::
576+
577+ from charmsupport.volumes import configure_volume, VolumeConfigurationError
578+ from charmsupport.hookenv import log, ERROR
579+ def post_mount_hook():
580+ stop_service('myservice')
581+ def post_mount_hook():
582+ start_service('myservice')
583+
584+ if __name__ == '__main__':
585+ try:
586+ configure_volume(before_change=pre_mount_hook,
587+ after_change=post_mount_hook)
588+ except VolumeConfigurationError:
589+ log('Storage could not be configured', ERROR)
590+
591+'''
592+
593+# XXX: Known limitations
594+# - fstab is neither consulted nor updated
595+
596+import os
597+from charmhelpers.core import hookenv
598+from charmhelpers.core import host
599+import yaml
600+
601+
602+MOUNT_BASE = '/srv/juju/volumes'
603+
604+
605+class VolumeConfigurationError(Exception):
606+ '''Volume configuration data is missing or invalid'''
607+ pass
608+
609+
610+def get_config():
611+ '''Gather and sanity-check volume configuration data'''
612+ volume_config = {}
613+ config = hookenv.config()
614+
615+ errors = False
616+
617+ if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
618+ volume_config['ephemeral'] = True
619+ else:
620+ volume_config['ephemeral'] = False
621+
622+ try:
623+ volume_map = yaml.safe_load(config.get('volume-map', '{}'))
624+ except yaml.YAMLError as e:
625+ hookenv.log("Error parsing YAML volume-map: {}".format(e),
626+ hookenv.ERROR)
627+ errors = True
628+ if volume_map is None:
629+ # probably an empty string
630+ volume_map = {}
631+ elif not isinstance(volume_map, dict):
632+ hookenv.log("Volume-map should be a dictionary, not {}".format(
633+ type(volume_map)))
634+ errors = True
635+
636+ volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
637+ if volume_config['device'] and volume_config['ephemeral']:
638+ # asked for ephemeral storage but also defined a volume ID
639+ hookenv.log('A volume is defined for this unit, but ephemeral '
640+ 'storage was requested', hookenv.ERROR)
641+ errors = True
642+ elif not volume_config['device'] and not volume_config['ephemeral']:
643+ # asked for permanent storage but did not define volume ID
644+ hookenv.log('Ephemeral storage was requested, but there is no volume '
645+ 'defined for this unit.', hookenv.ERROR)
646+ errors = True
647+
648+ unit_mount_name = hookenv.local_unit().replace('/', '-')
649+ volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
650+
651+ if errors:
652+ return None
653+ return volume_config
654+
655+
656+def mount_volume(config):
657+ if os.path.exists(config['mountpoint']):
658+ if not os.path.isdir(config['mountpoint']):
659+ hookenv.log('Not a directory: {}'.format(config['mountpoint']))
660+ raise VolumeConfigurationError()
661+ else:
662+ host.mkdir(config['mountpoint'])
663+ if os.path.ismount(config['mountpoint']):
664+ unmount_volume(config)
665+ if not host.mount(config['device'], config['mountpoint'], persist=True):
666+ raise VolumeConfigurationError()
667+
668+
669+def unmount_volume(config):
670+ if os.path.ismount(config['mountpoint']):
671+ if not host.umount(config['mountpoint'], persist=True):
672+ raise VolumeConfigurationError()
673+
674+
675+def managed_mounts():
676+ '''List of all mounted managed volumes'''
677+ return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
678+
679+
680+def configure_volume(before_change=lambda: None, after_change=lambda: None):
681+ '''Set up storage (or don't) according to the charm's volume configuration.
682+ Returns the mount point or "ephemeral". before_change and after_change
683+ are optional functions to be called if the volume configuration changes.
684+ '''
685+
686+ config = get_config()
687+ if not config:
688+ hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
689+ raise VolumeConfigurationError()
690+
691+ if config['ephemeral']:
692+ if os.path.ismount(config['mountpoint']):
693+ before_change()
694+ unmount_volume(config)
695+ after_change()
696+ return 'ephemeral'
697+ else:
698+ # persistent storage
699+ if os.path.ismount(config['mountpoint']):
700+ mounts = dict(managed_mounts())
701+ if mounts.get(config['mountpoint']) != config['device']:
702+ before_change()
703+ unmount_volume(config)
704+ mount_volume(config)
705+ after_change()
706+ else:
707+ before_change()
708+ mount_volume(config)
709+ after_change()
710+ return config['mountpoint']
711
712=== added directory 'charmhelpers/contrib/hahelpers'
713=== added file 'charmhelpers/contrib/hahelpers/__init__.py'
714--- charmhelpers/contrib/hahelpers/__init__.py 1970-01-01 00:00:00 +0000
715+++ charmhelpers/contrib/hahelpers/__init__.py 2015-10-08 20:44:37 +0000
716@@ -0,0 +1,15 @@
717+# Copyright 2014-2015 Canonical Limited.
718+#
719+# This file is part of charm-helpers.
720+#
721+# charm-helpers is free software: you can redistribute it and/or modify
722+# it under the terms of the GNU Lesser General Public License version 3 as
723+# published by the Free Software Foundation.
724+#
725+# charm-helpers is distributed in the hope that it will be useful,
726+# but WITHOUT ANY WARRANTY; without even the implied warranty of
727+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
728+# GNU Lesser General Public License for more details.
729+#
730+# You should have received a copy of the GNU Lesser General Public License
731+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
732
733=== added file 'charmhelpers/contrib/hahelpers/apache.py'
734--- charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
735+++ charmhelpers/contrib/hahelpers/apache.py 2015-10-08 20:44:37 +0000
736@@ -0,0 +1,82 @@
737+# Copyright 2014-2015 Canonical Limited.
738+#
739+# This file is part of charm-helpers.
740+#
741+# charm-helpers is free software: you can redistribute it and/or modify
742+# it under the terms of the GNU Lesser General Public License version 3 as
743+# published by the Free Software Foundation.
744+#
745+# charm-helpers is distributed in the hope that it will be useful,
746+# but WITHOUT ANY WARRANTY; without even the implied warranty of
747+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
748+# GNU Lesser General Public License for more details.
749+#
750+# You should have received a copy of the GNU Lesser General Public License
751+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
752+
753+#
754+# Copyright 2012 Canonical Ltd.
755+#
756+# This file is sourced from lp:openstack-charm-helpers
757+#
758+# Authors:
759+# James Page <james.page@ubuntu.com>
760+# Adam Gandelman <adamg@ubuntu.com>
761+#
762+
763+import subprocess
764+
765+from charmhelpers.core.hookenv import (
766+ config as config_get,
767+ relation_get,
768+ relation_ids,
769+ related_units as relation_list,
770+ log,
771+ INFO,
772+)
773+
774+
775+def get_cert(cn=None):
776+ # TODO: deal with multiple https endpoints via charm config
777+ cert = config_get('ssl_cert')
778+ key = config_get('ssl_key')
779+ if not (cert and key):
780+ log("Inspecting identity-service relations for SSL certificate.",
781+ level=INFO)
782+ cert = key = None
783+ if cn:
784+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
785+ ssl_key_attr = 'ssl_key_{}'.format(cn)
786+ else:
787+ ssl_cert_attr = 'ssl_cert'
788+ ssl_key_attr = 'ssl_key'
789+ for r_id in relation_ids('identity-service'):
790+ for unit in relation_list(r_id):
791+ if not cert:
792+ cert = relation_get(ssl_cert_attr,
793+ rid=r_id, unit=unit)
794+ if not key:
795+ key = relation_get(ssl_key_attr,
796+ rid=r_id, unit=unit)
797+ return (cert, key)
798+
799+
800+def get_ca_cert():
801+ ca_cert = config_get('ssl_ca')
802+ if ca_cert is None:
803+ log("Inspecting identity-service relations for CA SSL certificate.",
804+ level=INFO)
805+ for r_id in relation_ids('identity-service'):
806+ for unit in relation_list(r_id):
807+ if ca_cert is None:
808+ ca_cert = relation_get('ca_cert',
809+ rid=r_id, unit=unit)
810+ return ca_cert
811+
812+
813+def install_ca_cert(ca_cert):
814+ if ca_cert:
815+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
816+ 'w') as crt:
817+ crt.write(ca_cert)
818+ subprocess.check_call(['update-ca-certificates', '--fresh'])
819
820=== added file 'charmhelpers/contrib/hahelpers/cluster.py'
821--- charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
822+++ charmhelpers/contrib/hahelpers/cluster.py 2015-10-08 20:44:37 +0000
823@@ -0,0 +1,316 @@
824+# Copyright 2014-2015 Canonical Limited.
825+#
826+# This file is part of charm-helpers.
827+#
828+# charm-helpers is free software: you can redistribute it and/or modify
829+# it under the terms of the GNU Lesser General Public License version 3 as
830+# published by the Free Software Foundation.
831+#
832+# charm-helpers is distributed in the hope that it will be useful,
833+# but WITHOUT ANY WARRANTY; without even the implied warranty of
834+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
835+# GNU Lesser General Public License for more details.
836+#
837+# You should have received a copy of the GNU Lesser General Public License
838+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
839+
840+#
841+# Copyright 2012 Canonical Ltd.
842+#
843+# Authors:
844+# James Page <james.page@ubuntu.com>
845+# Adam Gandelman <adamg@ubuntu.com>
846+#
847+
848+"""
849+Helpers for clustering and determining "cluster leadership" and other
850+clustering-related helpers.
851+"""
852+
853+import subprocess
854+import os
855+
856+from socket import gethostname as get_unit_hostname
857+
858+import six
859+
860+from charmhelpers.core.hookenv import (
861+ log,
862+ relation_ids,
863+ related_units as relation_list,
864+ relation_get,
865+ config as config_get,
866+ INFO,
867+ ERROR,
868+ WARNING,
869+ unit_get,
870+ is_leader as juju_is_leader
871+)
872+from charmhelpers.core.decorators import (
873+ retry_on_exception,
874+)
875+from charmhelpers.core.strutils import (
876+ bool_from_string,
877+)
878+
879+DC_RESOURCE_NAME = 'DC'
880+
881+
882+class HAIncompleteConfig(Exception):
883+ pass
884+
885+
886+class CRMResourceNotFound(Exception):
887+ pass
888+
889+
890+class CRMDCNotFound(Exception):
891+ pass
892+
893+
894+def is_elected_leader(resource):
895+ """
896+ Returns True if the charm executing this is the elected cluster leader.
897+
898+ It relies on two mechanisms to determine leadership:
899+ 1. If juju is sufficiently new and leadership election is supported,
900+ the is_leader command will be used.
901+ 2. If the charm is part of a corosync cluster, call corosync to
902+ determine leadership.
903+ 3. If the charm is not part of a corosync cluster, the leader is
904+ determined as being "the alive unit with the lowest unit numer". In
905+ other words, the oldest surviving unit.
906+ """
907+ try:
908+ return juju_is_leader()
909+ except NotImplementedError:
910+ log('Juju leadership election feature not enabled'
911+ ', using fallback support',
912+ level=WARNING)
913+
914+ if is_clustered():
915+ if not is_crm_leader(resource):
916+ log('Deferring action to CRM leader.', level=INFO)
917+ return False
918+ else:
919+ peers = peer_units()
920+ if peers and not oldest_peer(peers):
921+ log('Deferring action to oldest service unit.', level=INFO)
922+ return False
923+ return True
924+
925+
926+def is_clustered():
927+ for r_id in (relation_ids('ha') or []):
928+ for unit in (relation_list(r_id) or []):
929+ clustered = relation_get('clustered',
930+ rid=r_id,
931+ unit=unit)
932+ if clustered:
933+ return True
934+ return False
935+
936+
937+def is_crm_dc():
938+ """
939+ Determine leadership by querying the pacemaker Designated Controller
940+ """
941+ cmd = ['crm', 'status']
942+ try:
943+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
944+ if not isinstance(status, six.text_type):
945+ status = six.text_type(status, "utf-8")
946+ except subprocess.CalledProcessError as ex:
947+ raise CRMDCNotFound(str(ex))
948+
949+ current_dc = ''
950+ for line in status.split('\n'):
951+ if line.startswith('Current DC'):
952+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
953+ current_dc = line.split(':')[1].split()[0]
954+ if current_dc == get_unit_hostname():
955+ return True
956+ elif current_dc == 'NONE':
957+ raise CRMDCNotFound('Current DC: NONE')
958+
959+ return False
960+
961+
962+@retry_on_exception(5, base_delay=2,
963+ exc_type=(CRMResourceNotFound, CRMDCNotFound))
964+def is_crm_leader(resource, retry=False):
965+ """
966+ Returns True if the charm calling this is the elected corosync leader,
967+ as returned by calling the external "crm" command.
968+
969+ We allow this operation to be retried to avoid the possibility of getting a
970+ false negative. See LP #1396246 for more info.
971+ """
972+ if resource == DC_RESOURCE_NAME:
973+ return is_crm_dc()
974+ cmd = ['crm', 'resource', 'show', resource]
975+ try:
976+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
977+ if not isinstance(status, six.text_type):
978+ status = six.text_type(status, "utf-8")
979+ except subprocess.CalledProcessError:
980+ status = None
981+
982+ if status and get_unit_hostname() in status:
983+ return True
984+
985+ if status and "resource %s is NOT running" % (resource) in status:
986+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
987+
988+ return False
989+
990+
991+def is_leader(resource):
992+ log("is_leader is deprecated. Please consider using is_crm_leader "
993+ "instead.", level=WARNING)
994+ return is_crm_leader(resource)
995+
996+
997+def peer_units(peer_relation="cluster"):
998+ peers = []
999+ for r_id in (relation_ids(peer_relation) or []):
1000+ for unit in (relation_list(r_id) or []):
1001+ peers.append(unit)
1002+ return peers
1003+
1004+
1005+def peer_ips(peer_relation='cluster', addr_key='private-address'):
1006+ '''Return a dict of peers and their private-address'''
1007+ peers = {}
1008+ for r_id in relation_ids(peer_relation):
1009+ for unit in relation_list(r_id):
1010+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
1011+ return peers
1012+
1013+
1014+def oldest_peer(peers):
1015+ """Determines who the oldest peer is by comparing unit numbers."""
1016+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
1017+ for peer in peers:
1018+ remote_unit_no = int(peer.split('/')[1])
1019+ if remote_unit_no < local_unit_no:
1020+ return False
1021+ return True
1022+
1023+
1024+def eligible_leader(resource):
1025+ log("eligible_leader is deprecated. Please consider using "
1026+ "is_elected_leader instead.", level=WARNING)
1027+ return is_elected_leader(resource)
1028+
1029+
1030+def https():
1031+ '''
1032+ Determines whether enough data has been provided in configuration
1033+ or relation data to configure HTTPS
1034+ .
1035+ returns: boolean
1036+ '''
1037+ use_https = config_get('use-https')
1038+ if use_https and bool_from_string(use_https):
1039+ return True
1040+ if config_get('ssl_cert') and config_get('ssl_key'):
1041+ return True
1042+ for r_id in relation_ids('identity-service'):
1043+ for unit in relation_list(r_id):
1044+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
1045+ rel_state = [
1046+ relation_get('https_keystone', rid=r_id, unit=unit),
1047+ relation_get('ca_cert', rid=r_id, unit=unit),
1048+ ]
1049+ # NOTE: works around (LP: #1203241)
1050+ if (None not in rel_state) and ('' not in rel_state):
1051+ return True
1052+ return False
1053+
1054+
1055+def determine_api_port(public_port, singlenode_mode=False):
1056+ '''
1057+ Determine correct API server listening port based on
1058+ existence of HTTPS reverse proxy and/or haproxy.
1059+
1060+ public_port: int: standard public port for given service
1061+
1062+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1063+
1064+ returns: int: the correct listening port for the API service
1065+ '''
1066+ i = 0
1067+ if singlenode_mode:
1068+ i += 1
1069+ elif len(peer_units()) > 0 or is_clustered():
1070+ i += 1
1071+ if https():
1072+ i += 1
1073+ return public_port - (i * 10)
1074+
1075+
1076+def determine_apache_port(public_port, singlenode_mode=False):
1077+ '''
1078+ Description: Determine correct apache listening port based on public IP +
1079+ state of the cluster.
1080+
1081+ public_port: int: standard public port for given service
1082+
1083+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1084+
1085+ returns: int: the correct listening port for the HAProxy service
1086+ '''
1087+ i = 0
1088+ if singlenode_mode:
1089+ i += 1
1090+ elif len(peer_units()) > 0 or is_clustered():
1091+ i += 1
1092+ return public_port - (i * 10)
1093+
1094+
1095+def get_hacluster_config(exclude_keys=None):
1096+ '''
1097+ Obtains all relevant configuration from charm configuration required
1098+ for initiating a relation to hacluster:
1099+
1100+ ha-bindiface, ha-mcastport, vip
1101+
1102+ param: exclude_keys: list of setting key(s) to be excluded.
1103+ returns: dict: A dict containing settings keyed by setting name.
1104+ raises: HAIncompleteConfig if settings are missing.
1105+ '''
1106+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
1107+ conf = {}
1108+ for setting in settings:
1109+ if exclude_keys and setting in exclude_keys:
1110+ continue
1111+
1112+ conf[setting] = config_get(setting)
1113+ missing = []
1114+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
1115+ if missing:
1116+ log('Insufficient config data to configure hacluster.', level=ERROR)
1117+ raise HAIncompleteConfig
1118+ return conf
1119+
1120+
1121+def canonical_url(configs, vip_setting='vip'):
1122+ '''
1123+ Returns the correct HTTP URL to this host given the state of HTTPS
1124+ configuration and hacluster.
1125+
1126+ :configs : OSTemplateRenderer: A config tempating object to inspect for
1127+ a complete https context.
1128+
1129+ :vip_setting: str: Setting in charm config that specifies
1130+ VIP address.
1131+ '''
1132+ scheme = 'http'
1133+ if 'https' in configs.complete_contexts():
1134+ scheme = 'https'
1135+ if is_clustered():
1136+ addr = config_get(vip_setting)
1137+ else:
1138+ addr = unit_get('private-address')
1139+ return '%s://%s' % (scheme, addr)
1140
1141=== added directory 'charmhelpers/contrib/network'
1142=== added file 'charmhelpers/contrib/network/__init__.py'
1143--- charmhelpers/contrib/network/__init__.py 1970-01-01 00:00:00 +0000
1144+++ charmhelpers/contrib/network/__init__.py 2015-10-08 20:44:37 +0000
1145@@ -0,0 +1,15 @@
1146+# Copyright 2014-2015 Canonical Limited.
1147+#
1148+# This file is part of charm-helpers.
1149+#
1150+# charm-helpers is free software: you can redistribute it and/or modify
1151+# it under the terms of the GNU Lesser General Public License version 3 as
1152+# published by the Free Software Foundation.
1153+#
1154+# charm-helpers is distributed in the hope that it will be useful,
1155+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1156+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1157+# GNU Lesser General Public License for more details.
1158+#
1159+# You should have received a copy of the GNU Lesser General Public License
1160+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1161
1162=== added file 'charmhelpers/contrib/network/ip.py'
1163--- charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
1164+++ charmhelpers/contrib/network/ip.py 2015-10-08 20:44:37 +0000
1165@@ -0,0 +1,456 @@
1166+# Copyright 2014-2015 Canonical Limited.
1167+#
1168+# This file is part of charm-helpers.
1169+#
1170+# charm-helpers is free software: you can redistribute it and/or modify
1171+# it under the terms of the GNU Lesser General Public License version 3 as
1172+# published by the Free Software Foundation.
1173+#
1174+# charm-helpers is distributed in the hope that it will be useful,
1175+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1176+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1177+# GNU Lesser General Public License for more details.
1178+#
1179+# You should have received a copy of the GNU Lesser General Public License
1180+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1181+
1182+import glob
1183+import re
1184+import subprocess
1185+import six
1186+import socket
1187+
1188+from functools import partial
1189+
1190+from charmhelpers.core.hookenv import unit_get
1191+from charmhelpers.fetch import apt_install, apt_update
1192+from charmhelpers.core.hookenv import (
1193+ log,
1194+ WARNING,
1195+)
1196+
1197+try:
1198+ import netifaces
1199+except ImportError:
1200+ apt_update(fatal=True)
1201+ apt_install('python-netifaces', fatal=True)
1202+ import netifaces
1203+
1204+try:
1205+ import netaddr
1206+except ImportError:
1207+ apt_update(fatal=True)
1208+ apt_install('python-netaddr', fatal=True)
1209+ import netaddr
1210+
1211+
1212+def _validate_cidr(network):
1213+ try:
1214+ netaddr.IPNetwork(network)
1215+ except (netaddr.core.AddrFormatError, ValueError):
1216+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1217+ network)
1218+
1219+
1220+def no_ip_found_error_out(network):
1221+ errmsg = ("No IP address found in network: %s" % network)
1222+ raise ValueError(errmsg)
1223+
1224+
1225+def get_address_in_network(network, fallback=None, fatal=False):
1226+ """Get an IPv4 or IPv6 address within the network from the host.
1227+
1228+ :param network (str): CIDR presentation format. For example,
1229+ '192.168.1.0/24'.
1230+ :param fallback (str): If no address is found, return fallback.
1231+ :param fatal (boolean): If no address is found, fallback is not
1232+ set and fatal is True then exit(1).
1233+ """
1234+ if network is None:
1235+ if fallback is not None:
1236+ return fallback
1237+
1238+ if fatal:
1239+ no_ip_found_error_out(network)
1240+ else:
1241+ return None
1242+
1243+ _validate_cidr(network)
1244+ network = netaddr.IPNetwork(network)
1245+ for iface in netifaces.interfaces():
1246+ addresses = netifaces.ifaddresses(iface)
1247+ if network.version == 4 and netifaces.AF_INET in addresses:
1248+ addr = addresses[netifaces.AF_INET][0]['addr']
1249+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1250+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1251+ if cidr in network:
1252+ return str(cidr.ip)
1253+
1254+ if network.version == 6 and netifaces.AF_INET6 in addresses:
1255+ for addr in addresses[netifaces.AF_INET6]:
1256+ if not addr['addr'].startswith('fe80'):
1257+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1258+ addr['netmask']))
1259+ if cidr in network:
1260+ return str(cidr.ip)
1261+
1262+ if fallback is not None:
1263+ return fallback
1264+
1265+ if fatal:
1266+ no_ip_found_error_out(network)
1267+
1268+ return None
1269+
1270+
1271+def is_ipv6(address):
1272+ """Determine whether provided address is IPv6 or not."""
1273+ try:
1274+ address = netaddr.IPAddress(address)
1275+ except netaddr.AddrFormatError:
1276+ # probably a hostname - so not an address at all!
1277+ return False
1278+
1279+ return address.version == 6
1280+
1281+
1282+def is_address_in_network(network, address):
1283+ """
1284+ Determine whether the provided address is within a network range.
1285+
1286+ :param network (str): CIDR presentation format. For example,
1287+ '192.168.1.0/24'.
1288+ :param address: An individual IPv4 or IPv6 address without a net
1289+ mask or subnet prefix. For example, '192.168.1.1'.
1290+ :returns boolean: Flag indicating whether address is in network.
1291+ """
1292+ try:
1293+ network = netaddr.IPNetwork(network)
1294+ except (netaddr.core.AddrFormatError, ValueError):
1295+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1296+ network)
1297+
1298+ try:
1299+ address = netaddr.IPAddress(address)
1300+ except (netaddr.core.AddrFormatError, ValueError):
1301+ raise ValueError("Address (%s) is not in correct presentation format" %
1302+ address)
1303+
1304+ if address in network:
1305+ return True
1306+ else:
1307+ return False
1308+
1309+
1310+def _get_for_address(address, key):
1311+ """Retrieve an attribute of or the physical interface that
1312+ the IP address provided could be bound to.
1313+
1314+ :param address (str): An individual IPv4 or IPv6 address without a net
1315+ mask or subnet prefix. For example, '192.168.1.1'.
1316+ :param key: 'iface' for the physical interface name or an attribute
1317+ of the configured interface, for example 'netmask'.
1318+ :returns str: Requested attribute or None if address is not bindable.
1319+ """
1320+ address = netaddr.IPAddress(address)
1321+ for iface in netifaces.interfaces():
1322+ addresses = netifaces.ifaddresses(iface)
1323+ if address.version == 4 and netifaces.AF_INET in addresses:
1324+ addr = addresses[netifaces.AF_INET][0]['addr']
1325+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1326+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1327+ cidr = network.cidr
1328+ if address in cidr:
1329+ if key == 'iface':
1330+ return iface
1331+ else:
1332+ return addresses[netifaces.AF_INET][0][key]
1333+
1334+ if address.version == 6 and netifaces.AF_INET6 in addresses:
1335+ for addr in addresses[netifaces.AF_INET6]:
1336+ if not addr['addr'].startswith('fe80'):
1337+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1338+ addr['netmask']))
1339+ cidr = network.cidr
1340+ if address in cidr:
1341+ if key == 'iface':
1342+ return iface
1343+ elif key == 'netmask' and cidr:
1344+ return str(cidr).split('/')[1]
1345+ else:
1346+ return addr[key]
1347+
1348+ return None
1349+
1350+
1351+get_iface_for_address = partial(_get_for_address, key='iface')
1352+
1353+
1354+get_netmask_for_address = partial(_get_for_address, key='netmask')
1355+
1356+
1357+def format_ipv6_addr(address):
1358+ """If address is IPv6, wrap it in '[]' otherwise return None.
1359+
1360+ This is required by most configuration files when specifying IPv6
1361+ addresses.
1362+ """
1363+ if is_ipv6(address):
1364+ return "[%s]" % address
1365+
1366+ return None
1367+
1368+
1369+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
1370+ fatal=True, exc_list=None):
1371+ """Return the assigned IP address for a given interface, if any."""
1372+ # Extract nic if passed /dev/ethX
1373+ if '/' in iface:
1374+ iface = iface.split('/')[-1]
1375+
1376+ if not exc_list:
1377+ exc_list = []
1378+
1379+ try:
1380+ inet_num = getattr(netifaces, inet_type)
1381+ except AttributeError:
1382+ raise Exception("Unknown inet type '%s'" % str(inet_type))
1383+
1384+ interfaces = netifaces.interfaces()
1385+ if inc_aliases:
1386+ ifaces = []
1387+ for _iface in interfaces:
1388+ if iface == _iface or _iface.split(':')[0] == iface:
1389+ ifaces.append(_iface)
1390+
1391+ if fatal and not ifaces:
1392+ raise Exception("Invalid interface '%s'" % iface)
1393+
1394+ ifaces.sort()
1395+ else:
1396+ if iface not in interfaces:
1397+ if fatal:
1398+ raise Exception("Interface '%s' not found " % (iface))
1399+ else:
1400+ return []
1401+
1402+ else:
1403+ ifaces = [iface]
1404+
1405+ addresses = []
1406+ for netiface in ifaces:
1407+ net_info = netifaces.ifaddresses(netiface)
1408+ if inet_num in net_info:
1409+ for entry in net_info[inet_num]:
1410+ if 'addr' in entry and entry['addr'] not in exc_list:
1411+ addresses.append(entry['addr'])
1412+
1413+ if fatal and not addresses:
1414+ raise Exception("Interface '%s' doesn't have any %s addresses." %
1415+ (iface, inet_type))
1416+
1417+ return sorted(addresses)
1418+
1419+
1420+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
1421+
1422+
1423+def get_iface_from_addr(addr):
1424+ """Work out on which interface the provided address is configured."""
1425+ for iface in netifaces.interfaces():
1426+ addresses = netifaces.ifaddresses(iface)
1427+ for inet_type in addresses:
1428+ for _addr in addresses[inet_type]:
1429+ _addr = _addr['addr']
1430+ # link local
1431+ ll_key = re.compile("(.+)%.*")
1432+ raw = re.match(ll_key, _addr)
1433+ if raw:
1434+ _addr = raw.group(1)
1435+
1436+ if _addr == addr:
1437+ log("Address '%s' is configured on iface '%s'" %
1438+ (addr, iface))
1439+ return iface
1440+
1441+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
1442+ raise Exception(msg)
1443+
1444+
1445+def sniff_iface(f):
1446+ """Ensure decorated function is called with a value for iface.
1447+
1448+ If no iface provided, inject net iface inferred from unit private address.
1449+ """
1450+ def iface_sniffer(*args, **kwargs):
1451+ if not kwargs.get('iface', None):
1452+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
1453+
1454+ return f(*args, **kwargs)
1455+
1456+ return iface_sniffer
1457+
1458+
1459+@sniff_iface
1460+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
1461+ dynamic_only=True):
1462+ """Get assigned IPv6 address for a given interface.
1463+
1464+ Returns list of addresses found. If no address found, returns empty list.
1465+
1466+ If iface is None, we infer the current primary interface by doing a reverse
1467+ lookup on the unit private-address.
1468+
1469+ We currently only support scope global IPv6 addresses i.e. non-temporary
1470+ addresses. If no global IPv6 address is found, return the first one found
1471+ in the ipv6 address list.
1472+ """
1473+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
1474+ inc_aliases=inc_aliases, fatal=fatal,
1475+ exc_list=exc_list)
1476+
1477+ if addresses:
1478+ global_addrs = []
1479+ for addr in addresses:
1480+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
1481+ m = re.match(key_scope_link_local, addr)
1482+ if m:
1483+ eui_64_mac = m.group(1)
1484+ iface = m.group(2)
1485+ else:
1486+ global_addrs.append(addr)
1487+
1488+ if global_addrs:
1489+ # Make sure any found global addresses are not temporary
1490+ cmd = ['ip', 'addr', 'show', iface]
1491+ out = subprocess.check_output(cmd).decode('UTF-8')
1492+ if dynamic_only:
1493+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
1494+ else:
1495+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
1496+
1497+ addrs = []
1498+ for line in out.split('\n'):
1499+ line = line.strip()
1500+ m = re.match(key, line)
1501+ if m and 'temporary' not in line:
1502+ # Return the first valid address we find
1503+ for addr in global_addrs:
1504+ if m.group(1) == addr:
1505+ if not dynamic_only or \
1506+ m.group(1).endswith(eui_64_mac):
1507+ addrs.append(addr)
1508+
1509+ if addrs:
1510+ return addrs
1511+
1512+ if fatal:
1513+ raise Exception("Interface '%s' does not have a scope global "
1514+ "non-temporary ipv6 address." % iface)
1515+
1516+ return []
1517+
1518+
1519+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
1520+ """Return a list of bridges on the system."""
1521+ b_regex = "%s/*/bridge" % vnic_dir
1522+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
1523+
1524+
1525+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
1526+ """Return a list of nics comprising a given bridge on the system."""
1527+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
1528+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
1529+
1530+
1531+def is_bridge_member(nic):
1532+ """Check if a given nic is a member of a bridge."""
1533+ for bridge in get_bridges():
1534+ if nic in get_bridge_nics(bridge):
1535+ return True
1536+
1537+ return False
1538+
1539+
1540+def is_ip(address):
1541+ """
1542+ Returns True if address is a valid IP address.
1543+ """
1544+ try:
1545+ # Test to see if already an IPv4 address
1546+ socket.inet_aton(address)
1547+ return True
1548+ except socket.error:
1549+ return False
1550+
1551+
1552+def ns_query(address):
1553+ try:
1554+ import dns.resolver
1555+ except ImportError:
1556+ apt_install('python-dnspython')
1557+ import dns.resolver
1558+
1559+ if isinstance(address, dns.name.Name):
1560+ rtype = 'PTR'
1561+ elif isinstance(address, six.string_types):
1562+ rtype = 'A'
1563+ else:
1564+ return None
1565+
1566+ answers = dns.resolver.query(address, rtype)
1567+ if answers:
1568+ return str(answers[0])
1569+ return None
1570+
1571+
1572+def get_host_ip(hostname, fallback=None):
1573+ """
1574+ Resolves the IP for a given hostname, or returns
1575+ the input if it is already an IP.
1576+ """
1577+ if is_ip(hostname):
1578+ return hostname
1579+
1580+ ip_addr = ns_query(hostname)
1581+ if not ip_addr:
1582+ try:
1583+ ip_addr = socket.gethostbyname(hostname)
1584+ except:
1585+ log("Failed to resolve hostname '%s'" % (hostname),
1586+ level=WARNING)
1587+ return fallback
1588+ return ip_addr
1589+
1590+
1591+def get_hostname(address, fqdn=True):
1592+ """
1593+ Resolves hostname for given IP, or returns the input
1594+ if it is already a hostname.
1595+ """
1596+ if is_ip(address):
1597+ try:
1598+ import dns.reversename
1599+ except ImportError:
1600+ apt_install("python-dnspython")
1601+ import dns.reversename
1602+
1603+ rev = dns.reversename.from_address(address)
1604+ result = ns_query(rev)
1605+
1606+ if not result:
1607+ try:
1608+ result = socket.gethostbyaddr(address)[0]
1609+ except:
1610+ return None
1611+ else:
1612+ result = address
1613+
1614+ if fqdn:
1615+ # strip trailing .
1616+ if result.endswith('.'):
1617+ return result[:-1]
1618+ else:
1619+ return result
1620+ else:
1621+ return result.split('.')[0]
1622
1623=== added directory 'charmhelpers/contrib/openstack'
1624=== added file 'charmhelpers/contrib/openstack/__init__.py'
1625--- charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000
1626+++ charmhelpers/contrib/openstack/__init__.py 2015-10-08 20:44:37 +0000
1627@@ -0,0 +1,15 @@
1628+# Copyright 2014-2015 Canonical Limited.
1629+#
1630+# This file is part of charm-helpers.
1631+#
1632+# charm-helpers is free software: you can redistribute it and/or modify
1633+# it under the terms of the GNU Lesser General Public License version 3 as
1634+# published by the Free Software Foundation.
1635+#
1636+# charm-helpers is distributed in the hope that it will be useful,
1637+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1638+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1639+# GNU Lesser General Public License for more details.
1640+#
1641+# You should have received a copy of the GNU Lesser General Public License
1642+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1643
1644=== added file 'charmhelpers/contrib/openstack/alternatives.py'
1645--- charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000
1646+++ charmhelpers/contrib/openstack/alternatives.py 2015-10-08 20:44:37 +0000
1647@@ -0,0 +1,33 @@
1648+# Copyright 2014-2015 Canonical Limited.
1649+#
1650+# This file is part of charm-helpers.
1651+#
1652+# charm-helpers is free software: you can redistribute it and/or modify
1653+# it under the terms of the GNU Lesser General Public License version 3 as
1654+# published by the Free Software Foundation.
1655+#
1656+# charm-helpers is distributed in the hope that it will be useful,
1657+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1658+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1659+# GNU Lesser General Public License for more details.
1660+#
1661+# You should have received a copy of the GNU Lesser General Public License
1662+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1663+
1664+''' Helper for managing alternatives for file conflict resolution '''
1665+
1666+import subprocess
1667+import shutil
1668+import os
1669+
1670+
1671+def install_alternative(name, target, source, priority=50):
1672+ ''' Install alternative configuration '''
1673+ if (os.path.exists(target) and not os.path.islink(target)):
1674+ # Move existing file/directory away before installing
1675+ shutil.move(target, '{}.bak'.format(target))
1676+ cmd = [
1677+ 'update-alternatives', '--force', '--install',
1678+ target, name, source, str(priority)
1679+ ]
1680+ subprocess.check_call(cmd)
1681
1682=== added directory 'charmhelpers/contrib/openstack/amulet'
1683=== added file 'charmhelpers/contrib/openstack/amulet/__init__.py'
1684--- charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000
1685+++ charmhelpers/contrib/openstack/amulet/__init__.py 2015-10-08 20:44:37 +0000
1686@@ -0,0 +1,15 @@
1687+# Copyright 2014-2015 Canonical Limited.
1688+#
1689+# This file is part of charm-helpers.
1690+#
1691+# charm-helpers is free software: you can redistribute it and/or modify
1692+# it under the terms of the GNU Lesser General Public License version 3 as
1693+# published by the Free Software Foundation.
1694+#
1695+# charm-helpers is distributed in the hope that it will be useful,
1696+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1697+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1698+# GNU Lesser General Public License for more details.
1699+#
1700+# You should have received a copy of the GNU Lesser General Public License
1701+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1702
1703=== added file 'charmhelpers/contrib/openstack/amulet/deployment.py'
1704--- charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1705+++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-08 20:44:37 +0000
1706@@ -0,0 +1,197 @@
1707+# Copyright 2014-2015 Canonical Limited.
1708+#
1709+# This file is part of charm-helpers.
1710+#
1711+# charm-helpers is free software: you can redistribute it and/or modify
1712+# it under the terms of the GNU Lesser General Public License version 3 as
1713+# published by the Free Software Foundation.
1714+#
1715+# charm-helpers is distributed in the hope that it will be useful,
1716+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1717+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1718+# GNU Lesser General Public License for more details.
1719+#
1720+# You should have received a copy of the GNU Lesser General Public License
1721+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1722+
1723+import six
1724+from collections import OrderedDict
1725+from charmhelpers.contrib.amulet.deployment import (
1726+ AmuletDeployment
1727+)
1728+
1729+
1730+class OpenStackAmuletDeployment(AmuletDeployment):
1731+ """OpenStack amulet deployment.
1732+
1733+ This class inherits from AmuletDeployment and has additional support
1734+ that is specifically for use by OpenStack charms.
1735+ """
1736+
1737+ def __init__(self, series=None, openstack=None, source=None, stable=True):
1738+ """Initialize the deployment environment."""
1739+ super(OpenStackAmuletDeployment, self).__init__(series)
1740+ self.openstack = openstack
1741+ self.source = source
1742+ self.stable = stable
1743+ # Note(coreycb): this needs to be changed when new next branches come
1744+ # out.
1745+ self.current_next = "trusty"
1746+
1747+ def _determine_branch_locations(self, other_services):
1748+ """Determine the branch locations for the other services.
1749+
1750+ Determine if the local branch being tested is derived from its
1751+ stable or next (dev) branch, and based on this, use the corresonding
1752+ stable or next branches for the other_services."""
1753+
1754+ # Charms outside the lp:~openstack-charmers namespace
1755+ base_charms = ['mysql', 'mongodb', 'nrpe']
1756+
1757+ # Force these charms to current series even when using an older series.
1758+ # ie. Use trusty/nrpe even when series is precise, as the P charm
1759+ # does not possess the necessary external master config and hooks.
1760+ force_series_current = ['nrpe']
1761+
1762+ if self.series in ['precise', 'trusty']:
1763+ base_series = self.series
1764+ else:
1765+ base_series = self.current_next
1766+
1767+ for svc in other_services:
1768+ if svc['name'] in force_series_current:
1769+ base_series = self.current_next
1770+ # If a location has been explicitly set, use it
1771+ if svc.get('location'):
1772+ continue
1773+ if self.stable:
1774+ temp = 'lp:charms/{}/{}'
1775+ svc['location'] = temp.format(base_series,
1776+ svc['name'])
1777+ else:
1778+ if svc['name'] in base_charms:
1779+ temp = 'lp:charms/{}/{}'
1780+ svc['location'] = temp.format(base_series,
1781+ svc['name'])
1782+ else:
1783+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1784+ svc['location'] = temp.format(self.current_next,
1785+ svc['name'])
1786+
1787+ return other_services
1788+
1789+ def _add_services(self, this_service, other_services):
1790+ """Add services to the deployment and set openstack-origin/source."""
1791+ other_services = self._determine_branch_locations(other_services)
1792+
1793+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1794+ other_services)
1795+
1796+ services = other_services
1797+ services.append(this_service)
1798+
1799+ # Charms which should use the source config option
1800+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1801+ 'ceph-osd', 'ceph-radosgw']
1802+
1803+ # Charms which can not use openstack-origin, ie. many subordinates
1804+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
1805+
1806+ if self.openstack:
1807+ for svc in services:
1808+ if svc['name'] not in use_source + no_origin:
1809+ config = {'openstack-origin': self.openstack}
1810+ self.d.configure(svc['name'], config)
1811+
1812+ if self.source:
1813+ for svc in services:
1814+ if svc['name'] in use_source and svc['name'] not in no_origin:
1815+ config = {'source': self.source}
1816+ self.d.configure(svc['name'], config)
1817+
1818+ def _configure_services(self, configs):
1819+ """Configure all of the services."""
1820+ for service, config in six.iteritems(configs):
1821+ self.d.configure(service, config)
1822+
1823+ def _get_openstack_release(self):
1824+ """Get openstack release.
1825+
1826+ Return an integer representing the enum value of the openstack
1827+ release.
1828+ """
1829+ # Must be ordered by OpenStack release (not by Ubuntu release):
1830+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1831+ self.precise_havana, self.precise_icehouse,
1832+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
1833+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
1834+ self.wily_liberty) = range(12)
1835+
1836+ releases = {
1837+ ('precise', None): self.precise_essex,
1838+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1839+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1840+ ('precise', 'cloud:precise-havana'): self.precise_havana,
1841+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1842+ ('trusty', None): self.trusty_icehouse,
1843+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
1844+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
1845+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
1846+ ('utopic', None): self.utopic_juno,
1847+ ('vivid', None): self.vivid_kilo,
1848+ ('wily', None): self.wily_liberty}
1849+ return releases[(self.series, self.openstack)]
1850+
1851+ def _get_openstack_release_string(self):
1852+ """Get openstack release string.
1853+
1854+ Return a string representing the openstack release.
1855+ """
1856+ releases = OrderedDict([
1857+ ('precise', 'essex'),
1858+ ('quantal', 'folsom'),
1859+ ('raring', 'grizzly'),
1860+ ('saucy', 'havana'),
1861+ ('trusty', 'icehouse'),
1862+ ('utopic', 'juno'),
1863+ ('vivid', 'kilo'),
1864+ ('wily', 'liberty'),
1865+ ])
1866+ if self.openstack:
1867+ os_origin = self.openstack.split(':')[1]
1868+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
1869+ else:
1870+ return releases[self.series]
1871+
1872+ def get_ceph_expected_pools(self, radosgw=False):
1873+ """Return a list of expected ceph pools in a ceph + cinder + glance
1874+ test scenario, based on OpenStack release and whether ceph radosgw
1875+ is flagged as present or not."""
1876+
1877+ if self._get_openstack_release() >= self.trusty_kilo:
1878+ # Kilo or later
1879+ pools = [
1880+ 'rbd',
1881+ 'cinder',
1882+ 'glance'
1883+ ]
1884+ else:
1885+ # Juno or earlier
1886+ pools = [
1887+ 'data',
1888+ 'metadata',
1889+ 'rbd',
1890+ 'cinder',
1891+ 'glance'
1892+ ]
1893+
1894+ if radosgw:
1895+ pools.extend([
1896+ '.rgw.root',
1897+ '.rgw.control',
1898+ '.rgw',
1899+ '.rgw.gc',
1900+ '.users.uid'
1901+ ])
1902+
1903+ return pools
1904
1905=== added file 'charmhelpers/contrib/openstack/amulet/utils.py'
1906--- charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
1907+++ charmhelpers/contrib/openstack/amulet/utils.py 2015-10-08 20:44:37 +0000
1908@@ -0,0 +1,963 @@
1909+# Copyright 2014-2015 Canonical Limited.
1910+#
1911+# This file is part of charm-helpers.
1912+#
1913+# charm-helpers is free software: you can redistribute it and/or modify
1914+# it under the terms of the GNU Lesser General Public License version 3 as
1915+# published by the Free Software Foundation.
1916+#
1917+# charm-helpers is distributed in the hope that it will be useful,
1918+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1919+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1920+# GNU Lesser General Public License for more details.
1921+#
1922+# You should have received a copy of the GNU Lesser General Public License
1923+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1924+
1925+import amulet
1926+import json
1927+import logging
1928+import os
1929+import six
1930+import time
1931+import urllib
1932+
1933+import cinderclient.v1.client as cinder_client
1934+import glanceclient.v1.client as glance_client
1935+import heatclient.v1.client as heat_client
1936+import keystoneclient.v2_0 as keystone_client
1937+import novaclient.v1_1.client as nova_client
1938+import pika
1939+import swiftclient
1940+
1941+from charmhelpers.contrib.amulet.utils import (
1942+ AmuletUtils
1943+)
1944+
1945+DEBUG = logging.DEBUG
1946+ERROR = logging.ERROR
1947+
1948+
1949+class OpenStackAmuletUtils(AmuletUtils):
1950+ """OpenStack amulet utilities.
1951+
1952+ This class inherits from AmuletUtils and has additional support
1953+ that is specifically for use by OpenStack charm tests.
1954+ """
1955+
1956+ def __init__(self, log_level=ERROR):
1957+ """Initialize the deployment environment."""
1958+ super(OpenStackAmuletUtils, self).__init__(log_level)
1959+
1960+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
1961+ public_port, expected):
1962+ """Validate endpoint data.
1963+
1964+ Validate actual endpoint data vs expected endpoint data. The ports
1965+ are used to find the matching endpoint.
1966+ """
1967+ self.log.debug('Validating endpoint data...')
1968+ self.log.debug('actual: {}'.format(repr(endpoints)))
1969+ found = False
1970+ for ep in endpoints:
1971+ self.log.debug('endpoint: {}'.format(repr(ep)))
1972+ if (admin_port in ep.adminurl and
1973+ internal_port in ep.internalurl and
1974+ public_port in ep.publicurl):
1975+ found = True
1976+ actual = {'id': ep.id,
1977+ 'region': ep.region,
1978+ 'adminurl': ep.adminurl,
1979+ 'internalurl': ep.internalurl,
1980+ 'publicurl': ep.publicurl,
1981+ 'service_id': ep.service_id}
1982+ ret = self._validate_dict_data(expected, actual)
1983+ if ret:
1984+ return 'unexpected endpoint data - {}'.format(ret)
1985+
1986+ if not found:
1987+ return 'endpoint not found'
1988+
1989+ def validate_svc_catalog_endpoint_data(self, expected, actual):
1990+ """Validate service catalog endpoint data.
1991+
1992+ Validate a list of actual service catalog endpoints vs a list of
1993+ expected service catalog endpoints.
1994+ """
1995+ self.log.debug('Validating service catalog endpoint data...')
1996+ self.log.debug('actual: {}'.format(repr(actual)))
1997+ for k, v in six.iteritems(expected):
1998+ if k in actual:
1999+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
2000+ if ret:
2001+ return self.endpoint_error(k, ret)
2002+ else:
2003+ return "endpoint {} does not exist".format(k)
2004+ return ret
2005+
2006+ def validate_tenant_data(self, expected, actual):
2007+ """Validate tenant data.
2008+
2009+ Validate a list of actual tenant data vs list of expected tenant
2010+ data.
2011+ """
2012+ self.log.debug('Validating tenant data...')
2013+ self.log.debug('actual: {}'.format(repr(actual)))
2014+ for e in expected:
2015+ found = False
2016+ for act in actual:
2017+ a = {'enabled': act.enabled, 'description': act.description,
2018+ 'name': act.name, 'id': act.id}
2019+ if e['name'] == a['name']:
2020+ found = True
2021+ ret = self._validate_dict_data(e, a)
2022+ if ret:
2023+ return "unexpected tenant data - {}".format(ret)
2024+ if not found:
2025+ return "tenant {} does not exist".format(e['name'])
2026+ return ret
2027+
2028+ def validate_role_data(self, expected, actual):
2029+ """Validate role data.
2030+
2031+ Validate a list of actual role data vs a list of expected role
2032+ data.
2033+ """
2034+ self.log.debug('Validating role data...')
2035+ self.log.debug('actual: {}'.format(repr(actual)))
2036+ for e in expected:
2037+ found = False
2038+ for act in actual:
2039+ a = {'name': act.name, 'id': act.id}
2040+ if e['name'] == a['name']:
2041+ found = True
2042+ ret = self._validate_dict_data(e, a)
2043+ if ret:
2044+ return "unexpected role data - {}".format(ret)
2045+ if not found:
2046+ return "role {} does not exist".format(e['name'])
2047+ return ret
2048+
2049+ def validate_user_data(self, expected, actual):
2050+ """Validate user data.
2051+
2052+ Validate a list of actual user data vs a list of expected user
2053+ data.
2054+ """
2055+ self.log.debug('Validating user data...')
2056+ self.log.debug('actual: {}'.format(repr(actual)))
2057+ for e in expected:
2058+ found = False
2059+ for act in actual:
2060+ a = {'enabled': act.enabled, 'name': act.name,
2061+ 'email': act.email, 'tenantId': act.tenantId,
2062+ 'id': act.id}
2063+ if e['name'] == a['name']:
2064+ found = True
2065+ ret = self._validate_dict_data(e, a)
2066+ if ret:
2067+ return "unexpected user data - {}".format(ret)
2068+ if not found:
2069+ return "user {} does not exist".format(e['name'])
2070+ return ret
2071+
2072+ def validate_flavor_data(self, expected, actual):
2073+ """Validate flavor data.
2074+
2075+ Validate a list of actual flavors vs a list of expected flavors.
2076+ """
2077+ self.log.debug('Validating flavor data...')
2078+ self.log.debug('actual: {}'.format(repr(actual)))
2079+ act = [a.name for a in actual]
2080+ return self._validate_list_data(expected, act)
2081+
2082+ def tenant_exists(self, keystone, tenant):
2083+ """Return True if tenant exists."""
2084+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
2085+ return tenant in [t.name for t in keystone.tenants.list()]
2086+
2087+ def authenticate_cinder_admin(self, keystone_sentry, username,
2088+ password, tenant):
2089+ """Authenticates admin user with cinder."""
2090+ # NOTE(beisner): cinder python client doesn't accept tokens.
2091+ service_ip = \
2092+ keystone_sentry.relation('shared-db',
2093+ 'mysql:shared-db')['private-address']
2094+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
2095+ return cinder_client.Client(username, password, tenant, ept)
2096+
2097+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
2098+ tenant):
2099+ """Authenticates admin user with the keystone admin endpoint."""
2100+ self.log.debug('Authenticating keystone admin...')
2101+ unit = keystone_sentry
2102+ service_ip = unit.relation('shared-db',
2103+ 'mysql:shared-db')['private-address']
2104+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
2105+ return keystone_client.Client(username=user, password=password,
2106+ tenant_name=tenant, auth_url=ep)
2107+
2108+ def authenticate_keystone_user(self, keystone, user, password, tenant):
2109+ """Authenticates a regular user with the keystone public endpoint."""
2110+ self.log.debug('Authenticating keystone user ({})...'.format(user))
2111+ ep = keystone.service_catalog.url_for(service_type='identity',
2112+ endpoint_type='publicURL')
2113+ return keystone_client.Client(username=user, password=password,
2114+ tenant_name=tenant, auth_url=ep)
2115+
2116+ def authenticate_glance_admin(self, keystone):
2117+ """Authenticates admin user with glance."""
2118+ self.log.debug('Authenticating glance admin...')
2119+ ep = keystone.service_catalog.url_for(service_type='image',
2120+ endpoint_type='adminURL')
2121+ return glance_client.Client(ep, token=keystone.auth_token)
2122+
2123+ def authenticate_heat_admin(self, keystone):
2124+ """Authenticates the admin user with heat."""
2125+ self.log.debug('Authenticating heat admin...')
2126+ ep = keystone.service_catalog.url_for(service_type='orchestration',
2127+ endpoint_type='publicURL')
2128+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
2129+
2130+ def authenticate_nova_user(self, keystone, user, password, tenant):
2131+ """Authenticates a regular user with nova-api."""
2132+ self.log.debug('Authenticating nova user ({})...'.format(user))
2133+ ep = keystone.service_catalog.url_for(service_type='identity',
2134+ endpoint_type='publicURL')
2135+ return nova_client.Client(username=user, api_key=password,
2136+ project_id=tenant, auth_url=ep)
2137+
2138+ def authenticate_swift_user(self, keystone, user, password, tenant):
2139+ """Authenticates a regular user with swift api."""
2140+ self.log.debug('Authenticating swift user ({})...'.format(user))
2141+ ep = keystone.service_catalog.url_for(service_type='identity',
2142+ endpoint_type='publicURL')
2143+ return swiftclient.Connection(authurl=ep,
2144+ user=user,
2145+ key=password,
2146+ tenant_name=tenant,
2147+ auth_version='2.0')
2148+
2149+ def create_cirros_image(self, glance, image_name):
2150+ """Download the latest cirros image and upload it to glance,
2151+ validate and return a resource pointer.
2152+
2153+ :param glance: pointer to authenticated glance connection
2154+ :param image_name: display name for new image
2155+ :returns: glance image pointer
2156+ """
2157+ self.log.debug('Creating glance cirros image '
2158+ '({})...'.format(image_name))
2159+
2160+ # Download cirros image
2161+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
2162+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
2163+ if http_proxy:
2164+ proxies = {'http': http_proxy}
2165+ opener = urllib.FancyURLopener(proxies)
2166+ else:
2167+ opener = urllib.FancyURLopener()
2168+
2169+ f = opener.open('http://download.cirros-cloud.net/version/released')
2170+ version = f.read().strip()
2171+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
2172+ local_path = os.path.join('tests', cirros_img)
2173+
2174+ if not os.path.exists(local_path):
2175+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
2176+ version, cirros_img)
2177+ opener.retrieve(cirros_url, local_path)
2178+ f.close()
2179+
2180+ # Create glance image
2181+ with open(local_path) as f:
2182+ image = glance.images.create(name=image_name, is_public=True,
2183+ disk_format='qcow2',
2184+ container_format='bare', data=f)
2185+
2186+ # Wait for image to reach active status
2187+ img_id = image.id
2188+ ret = self.resource_reaches_status(glance.images, img_id,
2189+ expected_stat='active',
2190+ msg='Image status wait')
2191+ if not ret:
2192+ msg = 'Glance image failed to reach expected state.'
2193+ amulet.raise_status(amulet.FAIL, msg=msg)
2194+
2195+ # Re-validate new image
2196+ self.log.debug('Validating image attributes...')
2197+ val_img_name = glance.images.get(img_id).name
2198+ val_img_stat = glance.images.get(img_id).status
2199+ val_img_pub = glance.images.get(img_id).is_public
2200+ val_img_cfmt = glance.images.get(img_id).container_format
2201+ val_img_dfmt = glance.images.get(img_id).disk_format
2202+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
2203+ 'container fmt:{} disk fmt:{}'.format(
2204+ val_img_name, val_img_pub, img_id,
2205+ val_img_stat, val_img_cfmt, val_img_dfmt))
2206+
2207+ if val_img_name == image_name and val_img_stat == 'active' \
2208+ and val_img_pub is True and val_img_cfmt == 'bare' \
2209+ and val_img_dfmt == 'qcow2':
2210+ self.log.debug(msg_attr)
2211+ else:
2212+ msg = ('Volume validation failed, {}'.format(msg_attr))
2213+ amulet.raise_status(amulet.FAIL, msg=msg)
2214+
2215+ return image
2216+
2217+ def delete_image(self, glance, image):
2218+ """Delete the specified image."""
2219+
2220+ # /!\ DEPRECATION WARNING
2221+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2222+ 'delete_resource instead of delete_image.')
2223+ self.log.debug('Deleting glance image ({})...'.format(image))
2224+ return self.delete_resource(glance.images, image, msg='glance image')
2225+
2226+ def create_instance(self, nova, image_name, instance_name, flavor):
2227+ """Create the specified instance."""
2228+ self.log.debug('Creating instance '
2229+ '({}|{}|{})'.format(instance_name, image_name, flavor))
2230+ image = nova.images.find(name=image_name)
2231+ flavor = nova.flavors.find(name=flavor)
2232+ instance = nova.servers.create(name=instance_name, image=image,
2233+ flavor=flavor)
2234+
2235+ count = 1
2236+ status = instance.status
2237+ while status != 'ACTIVE' and count < 60:
2238+ time.sleep(3)
2239+ instance = nova.servers.get(instance.id)
2240+ status = instance.status
2241+ self.log.debug('instance status: {}'.format(status))
2242+ count += 1
2243+
2244+ if status != 'ACTIVE':
2245+ self.log.error('instance creation timed out')
2246+ return None
2247+
2248+ return instance
2249+
2250+ def delete_instance(self, nova, instance):
2251+ """Delete the specified instance."""
2252+
2253+ # /!\ DEPRECATION WARNING
2254+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2255+ 'delete_resource instead of delete_instance.')
2256+ self.log.debug('Deleting instance ({})...'.format(instance))
2257+ return self.delete_resource(nova.servers, instance,
2258+ msg='nova instance')
2259+
2260+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
2261+ """Create a new keypair, or return pointer if it already exists."""
2262+ try:
2263+ _keypair = nova.keypairs.get(keypair_name)
2264+ self.log.debug('Keypair ({}) already exists, '
2265+ 'using it.'.format(keypair_name))
2266+ return _keypair
2267+ except:
2268+ self.log.debug('Keypair ({}) does not exist, '
2269+ 'creating it.'.format(keypair_name))
2270+
2271+ _keypair = nova.keypairs.create(name=keypair_name)
2272+ return _keypair
2273+
2274+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
2275+ img_id=None, src_vol_id=None, snap_id=None):
2276+ """Create cinder volume, optionally from a glance image, OR
2277+ optionally as a clone of an existing volume, OR optionally
2278+ from a snapshot. Wait for the new volume status to reach
2279+ the expected status, validate and return a resource pointer.
2280+
2281+ :param vol_name: cinder volume display name
2282+ :param vol_size: size in gigabytes
2283+ :param img_id: optional glance image id
2284+ :param src_vol_id: optional source volume id to clone
2285+ :param snap_id: optional snapshot id to use
2286+ :returns: cinder volume pointer
2287+ """
2288+ # Handle parameter input and avoid impossible combinations
2289+ if img_id and not src_vol_id and not snap_id:
2290+ # Create volume from image
2291+ self.log.debug('Creating cinder volume from glance image...')
2292+ bootable = 'true'
2293+ elif src_vol_id and not img_id and not snap_id:
2294+ # Clone an existing volume
2295+ self.log.debug('Cloning cinder volume...')
2296+ bootable = cinder.volumes.get(src_vol_id).bootable
2297+ elif snap_id and not src_vol_id and not img_id:
2298+ # Create volume from snapshot
2299+ self.log.debug('Creating cinder volume from snapshot...')
2300+ snap = cinder.volume_snapshots.find(id=snap_id)
2301+ vol_size = snap.size
2302+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
2303+ bootable = cinder.volumes.get(snap_vol_id).bootable
2304+ elif not img_id and not src_vol_id and not snap_id:
2305+ # Create volume
2306+ self.log.debug('Creating cinder volume...')
2307+ bootable = 'false'
2308+ else:
2309+ # Impossible combination of parameters
2310+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
2311+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
2312+ img_id, src_vol_id,
2313+ snap_id))
2314+ amulet.raise_status(amulet.FAIL, msg=msg)
2315+
2316+ # Create new volume
2317+ try:
2318+ vol_new = cinder.volumes.create(display_name=vol_name,
2319+ imageRef=img_id,
2320+ size=vol_size,
2321+ source_volid=src_vol_id,
2322+ snapshot_id=snap_id)
2323+ vol_id = vol_new.id
2324+ except Exception as e:
2325+ msg = 'Failed to create volume: {}'.format(e)
2326+ amulet.raise_status(amulet.FAIL, msg=msg)
2327+
2328+ # Wait for volume to reach available status
2329+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
2330+ expected_stat="available",
2331+ msg="Volume status wait")
2332+ if not ret:
2333+ msg = 'Cinder volume failed to reach expected state.'
2334+ amulet.raise_status(amulet.FAIL, msg=msg)
2335+
2336+ # Re-validate new volume
2337+ self.log.debug('Validating volume attributes...')
2338+ val_vol_name = cinder.volumes.get(vol_id).display_name
2339+ val_vol_boot = cinder.volumes.get(vol_id).bootable
2340+ val_vol_stat = cinder.volumes.get(vol_id).status
2341+ val_vol_size = cinder.volumes.get(vol_id).size
2342+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
2343+ '{} size:{}'.format(val_vol_name, vol_id,
2344+ val_vol_stat, val_vol_boot,
2345+ val_vol_size))
2346+
2347+ if val_vol_boot == bootable and val_vol_stat == 'available' \
2348+ and val_vol_name == vol_name and val_vol_size == vol_size:
2349+ self.log.debug(msg_attr)
2350+ else:
2351+ msg = ('Volume validation failed, {}'.format(msg_attr))
2352+ amulet.raise_status(amulet.FAIL, msg=msg)
2353+
2354+ return vol_new
2355+
2356+ def delete_resource(self, resource, resource_id,
2357+ msg="resource", max_wait=120):
2358+ """Delete one openstack resource, such as one instance, keypair,
2359+ image, volume, stack, etc., and confirm deletion within max wait time.
2360+
2361+ :param resource: pointer to os resource type, ex:glance_client.images
2362+ :param resource_id: unique name or id for the openstack resource
2363+ :param msg: text to identify purpose in logging
2364+ :param max_wait: maximum wait time in seconds
2365+ :returns: True if successful, otherwise False
2366+ """
2367+ self.log.debug('Deleting OpenStack resource '
2368+ '{} ({})'.format(resource_id, msg))
2369+ num_before = len(list(resource.list()))
2370+ resource.delete(resource_id)
2371+
2372+ tries = 0
2373+ num_after = len(list(resource.list()))
2374+ while num_after != (num_before - 1) and tries < (max_wait / 4):
2375+ self.log.debug('{} delete check: '
2376+ '{} [{}:{}] {}'.format(msg, tries,
2377+ num_before,
2378+ num_after,
2379+ resource_id))
2380+ time.sleep(4)
2381+ num_after = len(list(resource.list()))
2382+ tries += 1
2383+
2384+ self.log.debug('{}: expected, actual count = {}, '
2385+ '{}'.format(msg, num_before - 1, num_after))
2386+
2387+ if num_after == (num_before - 1):
2388+ return True
2389+ else:
2390+ self.log.error('{} delete timed out'.format(msg))
2391+ return False
2392+
2393+ def resource_reaches_status(self, resource, resource_id,
2394+ expected_stat='available',
2395+ msg='resource', max_wait=120):
2396+ """Wait for an openstack resources status to reach an
2397+ expected status within a specified time. Useful to confirm that
2398+ nova instances, cinder vols, snapshots, glance images, heat stacks
2399+ and other resources eventually reach the expected status.
2400+
2401+ :param resource: pointer to os resource type, ex: heat_client.stacks
2402+ :param resource_id: unique id for the openstack resource
2403+ :param expected_stat: status to expect resource to reach
2404+ :param msg: text to identify purpose in logging
2405+ :param max_wait: maximum wait time in seconds
2406+ :returns: True if successful, False if status is not reached
2407+ """
2408+
2409+ tries = 0
2410+ resource_stat = resource.get(resource_id).status
2411+ while resource_stat != expected_stat and tries < (max_wait / 4):
2412+ self.log.debug('{} status check: '
2413+ '{} [{}:{}] {}'.format(msg, tries,
2414+ resource_stat,
2415+ expected_stat,
2416+ resource_id))
2417+ time.sleep(4)
2418+ resource_stat = resource.get(resource_id).status
2419+ tries += 1
2420+
2421+ self.log.debug('{}: expected, actual status = {}, '
2422+ '{}'.format(msg, resource_stat, expected_stat))
2423+
2424+ if resource_stat == expected_stat:
2425+ return True
2426+ else:
2427+ self.log.debug('{} never reached expected status: '
2428+ '{}'.format(resource_id, expected_stat))
2429+ return False
2430+
2431+ def get_ceph_osd_id_cmd(self, index):
2432+ """Produce a shell command that will return a ceph-osd id."""
2433+ return ("`initctl list | grep 'ceph-osd ' | "
2434+ "awk 'NR=={} {{ print $2 }}' | "
2435+ "grep -o '[0-9]*'`".format(index + 1))
2436+
2437+ def get_ceph_pools(self, sentry_unit):
2438+ """Return a dict of ceph pools from a single ceph unit, with
2439+ pool name as keys, pool id as vals."""
2440+ pools = {}
2441+ cmd = 'sudo ceph osd lspools'
2442+ output, code = sentry_unit.run(cmd)
2443+ if code != 0:
2444+ msg = ('{} `{}` returned {} '
2445+ '{}'.format(sentry_unit.info['unit_name'],
2446+ cmd, code, output))
2447+ amulet.raise_status(amulet.FAIL, msg=msg)
2448+
2449+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
2450+ for pool in str(output).split(','):
2451+ pool_id_name = pool.split(' ')
2452+ if len(pool_id_name) == 2:
2453+ pool_id = pool_id_name[0]
2454+ pool_name = pool_id_name[1]
2455+ pools[pool_name] = int(pool_id)
2456+
2457+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
2458+ pools))
2459+ return pools
2460+
2461+ def get_ceph_df(self, sentry_unit):
2462+ """Return dict of ceph df json output, including ceph pool state.
2463+
2464+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2465+ :returns: Dict of ceph df output
2466+ """
2467+ cmd = 'sudo ceph df --format=json'
2468+ output, code = sentry_unit.run(cmd)
2469+ if code != 0:
2470+ msg = ('{} `{}` returned {} '
2471+ '{}'.format(sentry_unit.info['unit_name'],
2472+ cmd, code, output))
2473+ amulet.raise_status(amulet.FAIL, msg=msg)
2474+ return json.loads(output)
2475+
2476+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
2477+ """Take a sample of attributes of a ceph pool, returning ceph
2478+ pool name, object count and disk space used for the specified
2479+ pool ID number.
2480+
2481+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2482+ :param pool_id: Ceph pool ID
2483+ :returns: List of pool name, object count, kb disk space used
2484+ """
2485+ df = self.get_ceph_df(sentry_unit)
2486+ pool_name = df['pools'][pool_id]['name']
2487+ obj_count = df['pools'][pool_id]['stats']['objects']
2488+ kb_used = df['pools'][pool_id]['stats']['kb_used']
2489+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
2490+ '{} kb used'.format(pool_name, pool_id,
2491+ obj_count, kb_used))
2492+ return pool_name, obj_count, kb_used
2493+
2494+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
2495+ """Validate ceph pool samples taken over time, such as pool
2496+ object counts or pool kb used, before adding, after adding, and
2497+ after deleting items which affect those pool attributes. The
2498+ 2nd element is expected to be greater than the 1st; 3rd is expected
2499+ to be less than the 2nd.
2500+
2501+ :param samples: List containing 3 data samples
2502+ :param sample_type: String for logging and usage context
2503+ :returns: None if successful, Failure message otherwise
2504+ """
2505+ original, created, deleted = range(3)
2506+ if samples[created] <= samples[original] or \
2507+ samples[deleted] >= samples[created]:
2508+ return ('Ceph {} samples ({}) '
2509+ 'unexpected.'.format(sample_type, samples))
2510+ else:
2511+ self.log.debug('Ceph {} samples (OK): '
2512+ '{}'.format(sample_type, samples))
2513+ return None
2514+
2515+# rabbitmq/amqp specific helpers:
2516+ def add_rmq_test_user(self, sentry_units,
2517+ username="testuser1", password="changeme"):
2518+ """Add a test user via the first rmq juju unit, check connection as
2519+ the new user against all sentry units.
2520+
2521+ :param sentry_units: list of sentry unit pointers
2522+ :param username: amqp user name, default to testuser1
2523+ :param password: amqp user password
2524+ :returns: None if successful. Raise on error.
2525+ """
2526+ self.log.debug('Adding rmq user ({})...'.format(username))
2527+
2528+ # Check that user does not already exist
2529+ cmd_user_list = 'rabbitmqctl list_users'
2530+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2531+ if username in output:
2532+ self.log.warning('User ({}) already exists, returning '
2533+ 'gracefully.'.format(username))
2534+ return
2535+
2536+ perms = '".*" ".*" ".*"'
2537+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
2538+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
2539+
2540+ # Add user via first unit
2541+ for cmd in cmds:
2542+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
2543+
2544+ # Check connection against the other sentry_units
2545+ self.log.debug('Checking user connect against units...')
2546+ for sentry_unit in sentry_units:
2547+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
2548+ username=username,
2549+ password=password)
2550+ connection.close()
2551+
2552+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
2553+ """Delete a rabbitmq user via the first rmq juju unit.
2554+
2555+ :param sentry_units: list of sentry unit pointers
2556+ :param username: amqp user name, default to testuser1
2557+ :param password: amqp user password
2558+ :returns: None if successful or no such user.
2559+ """
2560+ self.log.debug('Deleting rmq user ({})...'.format(username))
2561+
2562+ # Check that the user exists
2563+ cmd_user_list = 'rabbitmqctl list_users'
2564+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2565+
2566+ if username not in output:
2567+ self.log.warning('User ({}) does not exist, returning '
2568+ 'gracefully.'.format(username))
2569+ return
2570+
2571+ # Delete the user
2572+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
2573+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
2574+
2575+ def get_rmq_cluster_status(self, sentry_unit):
2576+ """Execute rabbitmq cluster status command on a unit and return
2577+ the full output.
2578+
2579+ :param unit: sentry unit
2580+ :returns: String containing console output of cluster status command
2581+ """
2582+ cmd = 'rabbitmqctl cluster_status'
2583+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
2584+ self.log.debug('{} cluster_status:\n{}'.format(
2585+ sentry_unit.info['unit_name'], output))
2586+ return str(output)
2587+
2588+ def get_rmq_cluster_running_nodes(self, sentry_unit):
2589+ """Parse rabbitmqctl cluster_status output string, return list of
2590+ running rabbitmq cluster nodes.
2591+
2592+ :param unit: sentry unit
2593+ :returns: List containing node names of running nodes
2594+ """
2595+ # NOTE(beisner): rabbitmqctl cluster_status output is not
2596+ # json-parsable, do string chop foo, then json.loads that.
2597+ str_stat = self.get_rmq_cluster_status(sentry_unit)
2598+ if 'running_nodes' in str_stat:
2599+ pos_start = str_stat.find("{running_nodes,") + 15
2600+ pos_end = str_stat.find("]},", pos_start) + 1
2601+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
2602+ run_nodes = json.loads(str_run_nodes)
2603+ return run_nodes
2604+ else:
2605+ return []
2606+
2607+ def validate_rmq_cluster_running_nodes(self, sentry_units):
2608+ """Check that all rmq unit hostnames are represented in the
2609+ cluster_status output of all units.
2610+
2611+ :param host_names: dict of juju unit names to host names
2612+ :param units: list of sentry unit pointers (all rmq units)
2613+ :returns: None if successful, otherwise return error message
2614+ """
2615+ host_names = self.get_unit_hostnames(sentry_units)
2616+ errors = []
2617+
2618+ # Query every unit for cluster_status running nodes
2619+ for query_unit in sentry_units:
2620+ query_unit_name = query_unit.info['unit_name']
2621+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
2622+
2623+ # Confirm that every unit is represented in the queried unit's
2624+ # cluster_status running nodes output.
2625+ for validate_unit in sentry_units:
2626+ val_host_name = host_names[validate_unit.info['unit_name']]
2627+ val_node_name = 'rabbit@{}'.format(val_host_name)
2628+
2629+ if val_node_name not in running_nodes:
2630+ errors.append('Cluster member check failed on {}: {} not '
2631+ 'in {}\n'.format(query_unit_name,
2632+ val_node_name,
2633+ running_nodes))
2634+ if errors:
2635+ return ''.join(errors)
2636+
2637+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
2638+ """Check a single juju rmq unit for ssl and port in the config file."""
2639+ host = sentry_unit.info['public-address']
2640+ unit_name = sentry_unit.info['unit_name']
2641+
2642+ conf_file = '/etc/rabbitmq/rabbitmq.config'
2643+ conf_contents = str(self.file_contents_safe(sentry_unit,
2644+ conf_file, max_wait=16))
2645+ # Checks
2646+ conf_ssl = 'ssl' in conf_contents
2647+ conf_port = str(port) in conf_contents
2648+
2649+ # Port explicitly checked in config
2650+ if port and conf_port and conf_ssl:
2651+ self.log.debug('SSL is enabled @{}:{} '
2652+ '({})'.format(host, port, unit_name))
2653+ return True
2654+ elif port and not conf_port and conf_ssl:
2655+ self.log.debug('SSL is enabled @{} but not on port {} '
2656+ '({})'.format(host, port, unit_name))
2657+ return False
2658+ # Port not checked (useful when checking that ssl is disabled)
2659+ elif not port and conf_ssl:
2660+ self.log.debug('SSL is enabled @{}:{} '
2661+ '({})'.format(host, port, unit_name))
2662+ return True
2663+ elif not conf_ssl:
2664+ self.log.debug('SSL not enabled @{}:{} '
2665+ '({})'.format(host, port, unit_name))
2666+ return False
2667+ else:
2668+ msg = ('Unknown condition when checking SSL status @{}:{} '
2669+ '({})'.format(host, port, unit_name))
2670+ amulet.raise_status(amulet.FAIL, msg)
2671+
2672+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
2673+ """Check that ssl is enabled on rmq juju sentry units.
2674+
2675+ :param sentry_units: list of all rmq sentry units
2676+ :param port: optional ssl port override to validate
2677+ :returns: None if successful, otherwise return error message
2678+ """
2679+ for sentry_unit in sentry_units:
2680+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
2681+ return ('Unexpected condition: ssl is disabled on unit '
2682+ '({})'.format(sentry_unit.info['unit_name']))
2683+ return None
2684+
2685+ def validate_rmq_ssl_disabled_units(self, sentry_units):
2686+ """Check that ssl is enabled on listed rmq juju sentry units.
2687+
2688+ :param sentry_units: list of all rmq sentry units
2689+ :returns: True if successful. Raise on error.
2690+ """
2691+ for sentry_unit in sentry_units:
2692+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
2693+ return ('Unexpected condition: ssl is enabled on unit '
2694+ '({})'.format(sentry_unit.info['unit_name']))
2695+ return None
2696+
2697+ def configure_rmq_ssl_on(self, sentry_units, deployment,
2698+ port=None, max_wait=60):
2699+ """Turn ssl charm config option on, with optional non-default
2700+ ssl port specification. Confirm that it is enabled on every
2701+ unit.
2702+
2703+ :param sentry_units: list of sentry units
2704+ :param deployment: amulet deployment object pointer
2705+ :param port: amqp port, use defaults if None
2706+ :param max_wait: maximum time to wait in seconds to confirm
2707+ :returns: None if successful. Raise on error.
2708+ """
2709+ self.log.debug('Setting ssl charm config option: on')
2710+
2711+ # Enable RMQ SSL
2712+ config = {'ssl': 'on'}
2713+ if port:
2714+ config['ssl_port'] = port
2715+
2716+ deployment.configure('rabbitmq-server', config)
2717+
2718+ # Confirm
2719+ tries = 0
2720+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2721+ while ret and tries < (max_wait / 4):
2722+ time.sleep(4)
2723+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2724+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2725+ tries += 1
2726+
2727+ if ret:
2728+ amulet.raise_status(amulet.FAIL, ret)
2729+
2730+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
2731+ """Turn ssl charm config option off, confirm that it is disabled
2732+ on every unit.
2733+
2734+ :param sentry_units: list of sentry units
2735+ :param deployment: amulet deployment object pointer
2736+ :param max_wait: maximum time to wait in seconds to confirm
2737+ :returns: None if successful. Raise on error.
2738+ """
2739+ self.log.debug('Setting ssl charm config option: off')
2740+
2741+ # Disable RMQ SSL
2742+ config = {'ssl': 'off'}
2743+ deployment.configure('rabbitmq-server', config)
2744+
2745+ # Confirm
2746+ tries = 0
2747+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2748+ while ret and tries < (max_wait / 4):
2749+ time.sleep(4)
2750+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2751+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2752+ tries += 1
2753+
2754+ if ret:
2755+ amulet.raise_status(amulet.FAIL, ret)
2756+
2757+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
2758+ port=None, fatal=True,
2759+ username="testuser1", password="changeme"):
2760+ """Establish and return a pika amqp connection to the rabbitmq service
2761+ running on a rmq juju unit.
2762+
2763+ :param sentry_unit: sentry unit pointer
2764+ :param ssl: boolean, default to False
2765+ :param port: amqp port, use defaults if None
2766+ :param fatal: boolean, default to True (raises on connect error)
2767+ :param username: amqp user name, default to testuser1
2768+ :param password: amqp user password
2769+ :returns: pika amqp connection pointer or None if failed and non-fatal
2770+ """
2771+ host = sentry_unit.info['public-address']
2772+ unit_name = sentry_unit.info['unit_name']
2773+
2774+ # Default port logic if port is not specified
2775+ if ssl and not port:
2776+ port = 5671
2777+ elif not ssl and not port:
2778+ port = 5672
2779+
2780+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
2781+ '{}...'.format(host, port, unit_name, username))
2782+
2783+ try:
2784+ credentials = pika.PlainCredentials(username, password)
2785+ parameters = pika.ConnectionParameters(host=host, port=port,
2786+ credentials=credentials,
2787+ ssl=ssl,
2788+ connection_attempts=3,
2789+ retry_delay=5,
2790+ socket_timeout=1)
2791+ connection = pika.BlockingConnection(parameters)
2792+ assert connection.server_properties['product'] == 'RabbitMQ'
2793+ self.log.debug('Connect OK')
2794+ return connection
2795+ except Exception as e:
2796+ msg = ('amqp connection failed to {}:{} as '
2797+ '{} ({})'.format(host, port, username, str(e)))
2798+ if fatal:
2799+ amulet.raise_status(amulet.FAIL, msg)
2800+ else:
2801+ self.log.warn(msg)
2802+ return None
2803+
2804+ def publish_amqp_message_by_unit(self, sentry_unit, message,
2805+ queue="test", ssl=False,
2806+ username="testuser1",
2807+ password="changeme",
2808+ port=None):
2809+ """Publish an amqp message to a rmq juju unit.
2810+
2811+ :param sentry_unit: sentry unit pointer
2812+ :param message: amqp message string
2813+ :param queue: message queue, default to test
2814+ :param username: amqp user name, default to testuser1
2815+ :param password: amqp user password
2816+ :param ssl: boolean, default to False
2817+ :param port: amqp port, use defaults if None
2818+ :returns: None. Raises exception if publish failed.
2819+ """
2820+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
2821+ message))
2822+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
2823+ port=port,
2824+ username=username,
2825+ password=password)
2826+
2827+ # NOTE(beisner): extra debug here re: pika hang potential:
2828+ # https://github.com/pika/pika/issues/297
2829+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
2830+ self.log.debug('Defining channel...')
2831+ channel = connection.channel()
2832+ self.log.debug('Declaring queue...')
2833+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
2834+ self.log.debug('Publishing message...')
2835+ channel.basic_publish(exchange='', routing_key=queue, body=message)
2836+ self.log.debug('Closing channel...')
2837+ channel.close()
2838+ self.log.debug('Closing connection...')
2839+ connection.close()
2840+
2841+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
2842+ username="testuser1",
2843+ password="changeme",
2844+ ssl=False, port=None):
2845+ """Get an amqp message from a rmq juju unit.
2846+
2847+ :param sentry_unit: sentry unit pointer
2848+ :param queue: message queue, default to test
2849+ :param username: amqp user name, default to testuser1
2850+ :param password: amqp user password
2851+ :param ssl: boolean, default to False
2852+ :param port: amqp port, use defaults if None
2853+ :returns: amqp message body as string. Raise if get fails.
2854+ """
2855+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
2856+ port=port,
2857+ username=username,
2858+ password=password)
2859+ channel = connection.channel()
2860+ method_frame, _, body = channel.basic_get(queue)
2861+
2862+ if method_frame:
2863+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
2864+ body))
2865+ channel.basic_ack(method_frame.delivery_tag)
2866+ channel.close()
2867+ connection.close()
2868+ return body
2869+ else:
2870+ msg = 'No message retrieved.'
2871+ amulet.raise_status(amulet.FAIL, msg)
2872
2873=== added file 'charmhelpers/contrib/openstack/context.py'
2874--- charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
2875+++ charmhelpers/contrib/openstack/context.py 2015-10-08 20:44:37 +0000
2876@@ -0,0 +1,1427 @@
2877+# Copyright 2014-2015 Canonical Limited.
2878+#
2879+# This file is part of charm-helpers.
2880+#
2881+# charm-helpers is free software: you can redistribute it and/or modify
2882+# it under the terms of the GNU Lesser General Public License version 3 as
2883+# published by the Free Software Foundation.
2884+#
2885+# charm-helpers is distributed in the hope that it will be useful,
2886+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2887+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2888+# GNU Lesser General Public License for more details.
2889+#
2890+# You should have received a copy of the GNU Lesser General Public License
2891+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2892+
2893+import glob
2894+import json
2895+import os
2896+import re
2897+import time
2898+from base64 import b64decode
2899+from subprocess import check_call
2900+
2901+import six
2902+import yaml
2903+
2904+from charmhelpers.fetch import (
2905+ apt_install,
2906+ filter_installed_packages,
2907+)
2908+from charmhelpers.core.hookenv import (
2909+ config,
2910+ is_relation_made,
2911+ local_unit,
2912+ log,
2913+ relation_get,
2914+ relation_ids,
2915+ related_units,
2916+ relation_set,
2917+ unit_get,
2918+ unit_private_ip,
2919+ charm_name,
2920+ DEBUG,
2921+ INFO,
2922+ WARNING,
2923+ ERROR,
2924+)
2925+
2926+from charmhelpers.core.sysctl import create as sysctl_create
2927+from charmhelpers.core.strutils import bool_from_string
2928+
2929+from charmhelpers.core.host import (
2930+ get_bond_master,
2931+ is_phy_iface,
2932+ list_nics,
2933+ get_nic_hwaddr,
2934+ mkdir,
2935+ write_file,
2936+)
2937+from charmhelpers.contrib.hahelpers.cluster import (
2938+ determine_apache_port,
2939+ determine_api_port,
2940+ https,
2941+ is_clustered,
2942+)
2943+from charmhelpers.contrib.hahelpers.apache import (
2944+ get_cert,
2945+ get_ca_cert,
2946+ install_ca_cert,
2947+)
2948+from charmhelpers.contrib.openstack.neutron import (
2949+ neutron_plugin_attribute,
2950+ parse_data_port_mappings,
2951+)
2952+from charmhelpers.contrib.openstack.ip import (
2953+ resolve_address,
2954+ INTERNAL,
2955+)
2956+from charmhelpers.contrib.network.ip import (
2957+ get_address_in_network,
2958+ get_ipv4_addr,
2959+ get_ipv6_addr,
2960+ get_netmask_for_address,
2961+ format_ipv6_addr,
2962+ is_address_in_network,
2963+ is_bridge_member,
2964+)
2965+from charmhelpers.contrib.openstack.utils import get_host_ip
2966+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
2967+ADDRESS_TYPES = ['admin', 'internal', 'public']
2968+
2969+
2970+class OSContextError(Exception):
2971+ pass
2972+
2973+
2974+def ensure_packages(packages):
2975+ """Install but do not upgrade required plugin packages."""
2976+ required = filter_installed_packages(packages)
2977+ if required:
2978+ apt_install(required, fatal=True)
2979+
2980+
2981+def context_complete(ctxt):
2982+ _missing = []
2983+ for k, v in six.iteritems(ctxt):
2984+ if v is None or v == '':
2985+ _missing.append(k)
2986+
2987+ if _missing:
2988+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
2989+ return False
2990+
2991+ return True
2992+
2993+
2994+def config_flags_parser(config_flags):
2995+ """Parses config flags string into dict.
2996+
2997+ This parsing method supports a few different formats for the config
2998+ flag values to be parsed:
2999+
3000+ 1. A string in the simple format of key=value pairs, with the possibility
3001+ of specifying multiple key value pairs within the same string. For
3002+ example, a string in the format of 'key1=value1, key2=value2' will
3003+ return a dict of:
3004+
3005+ {'key1': 'value1',
3006+ 'key2': 'value2'}.
3007+
3008+ 2. A string in the above format, but supporting a comma-delimited list
3009+ of values for the same key. For example, a string in the format of
3010+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
3011+
3012+ {'key1', 'value1',
3013+ 'key2', 'value2,value3,value4'}
3014+
3015+ 3. A string containing a colon character (:) prior to an equal
3016+ character (=) will be treated as yaml and parsed as such. This can be
3017+ used to specify more complex key value pairs. For example,
3018+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
3019+ return a dict of:
3020+
3021+ {'key1', 'subkey1=value1, subkey2=value2'}
3022+
3023+ The provided config_flags string may be a list of comma-separated values
3024+ which themselves may be comma-separated list of values.
3025+ """
3026+ # If we find a colon before an equals sign then treat it as yaml.
3027+ # Note: limit it to finding the colon first since this indicates assignment
3028+ # for inline yaml.
3029+ colon = config_flags.find(':')
3030+ equals = config_flags.find('=')
3031+ if colon > 0:
3032+ if colon < equals or equals < 0:
3033+ return yaml.safe_load(config_flags)
3034+
3035+ if config_flags.find('==') >= 0:
3036+ log("config_flags is not in expected format (key=value)", level=ERROR)
3037+ raise OSContextError
3038+
3039+ # strip the following from each value.
3040+ post_strippers = ' ,'
3041+ # we strip any leading/trailing '=' or ' ' from the string then
3042+ # split on '='.
3043+ split = config_flags.strip(' =').split('=')
3044+ limit = len(split)
3045+ flags = {}
3046+ for i in range(0, limit - 1):
3047+ current = split[i]
3048+ next = split[i + 1]
3049+ vindex = next.rfind(',')
3050+ if (i == limit - 2) or (vindex < 0):
3051+ value = next
3052+ else:
3053+ value = next[:vindex]
3054+
3055+ if i == 0:
3056+ key = current
3057+ else:
3058+ # if this not the first entry, expect an embedded key.
3059+ index = current.rfind(',')
3060+ if index < 0:
3061+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
3062+ raise OSContextError
3063+ key = current[index + 1:]
3064+
3065+ # Add to collection.
3066+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
3067+
3068+ return flags
3069+
3070+
3071+class OSContextGenerator(object):
3072+ """Base class for all context generators."""
3073+ interfaces = []
3074+ related = False
3075+ complete = False
3076+ missing_data = []
3077+
3078+ def __call__(self):
3079+ raise NotImplementedError
3080+
3081+ def context_complete(self, ctxt):
3082+ """Check for missing data for the required context data.
3083+ Set self.missing_data if it exists and return False.
3084+ Set self.complete if no missing data and return True.
3085+ """
3086+ # Fresh start
3087+ self.complete = False
3088+ self.missing_data = []
3089+ for k, v in six.iteritems(ctxt):
3090+ if v is None or v == '':
3091+ if k not in self.missing_data:
3092+ self.missing_data.append(k)
3093+
3094+ if self.missing_data:
3095+ self.complete = False
3096+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
3097+ else:
3098+ self.complete = True
3099+ return self.complete
3100+
3101+ def get_related(self):
3102+ """Check if any of the context interfaces have relation ids.
3103+ Set self.related and return True if one of the interfaces
3104+ has relation ids.
3105+ """
3106+ # Fresh start
3107+ self.related = False
3108+ try:
3109+ for interface in self.interfaces:
3110+ if relation_ids(interface):
3111+ self.related = True
3112+ return self.related
3113+ except AttributeError as e:
3114+ log("{} {}"
3115+ "".format(self, e), 'INFO')
3116+ return self.related
3117+
3118+
3119+class SharedDBContext(OSContextGenerator):
3120+ interfaces = ['shared-db']
3121+
3122+ def __init__(self,
3123+ database=None, user=None, relation_prefix=None, ssl_dir=None):
3124+ """Allows inspecting relation for settings prefixed with
3125+ relation_prefix. This is useful for parsing access for multiple
3126+ databases returned via the shared-db interface (eg, nova_password,
3127+ quantum_password)
3128+ """
3129+ self.relation_prefix = relation_prefix
3130+ self.database = database
3131+ self.user = user
3132+ self.ssl_dir = ssl_dir
3133+ self.rel_name = self.interfaces[0]
3134+
3135+ def __call__(self):
3136+ self.database = self.database or config('database')
3137+ self.user = self.user or config('database-user')
3138+ if None in [self.database, self.user]:
3139+ log("Could not generate shared_db context. Missing required charm "
3140+ "config options. (database name and user)", level=ERROR)
3141+ raise OSContextError
3142+
3143+ ctxt = {}
3144+
3145+ # NOTE(jamespage) if mysql charm provides a network upon which
3146+ # access to the database should be made, reconfigure relation
3147+ # with the service units local address and defer execution
3148+ access_network = relation_get('access-network')
3149+ if access_network is not None:
3150+ if self.relation_prefix is not None:
3151+ hostname_key = "{}_hostname".format(self.relation_prefix)
3152+ else:
3153+ hostname_key = "hostname"
3154+ access_hostname = get_address_in_network(access_network,
3155+ unit_get('private-address'))
3156+ set_hostname = relation_get(attribute=hostname_key,
3157+ unit=local_unit())
3158+ if set_hostname != access_hostname:
3159+ relation_set(relation_settings={hostname_key: access_hostname})
3160+ return None # Defer any further hook execution for now....
3161+
3162+ password_setting = 'password'
3163+ if self.relation_prefix:
3164+ password_setting = self.relation_prefix + '_password'
3165+
3166+ for rid in relation_ids(self.interfaces[0]):
3167+ self.related = True
3168+ for unit in related_units(rid):
3169+ rdata = relation_get(rid=rid, unit=unit)
3170+ host = rdata.get('db_host')
3171+ host = format_ipv6_addr(host) or host
3172+ ctxt = {
3173+ 'database_host': host,
3174+ 'database': self.database,
3175+ 'database_user': self.user,
3176+ 'database_password': rdata.get(password_setting),
3177+ 'database_type': 'mysql'
3178+ }
3179+ if self.context_complete(ctxt):
3180+ db_ssl(rdata, ctxt, self.ssl_dir)
3181+ return ctxt
3182+ return {}
3183+
3184+
3185+class PostgresqlDBContext(OSContextGenerator):
3186+ interfaces = ['pgsql-db']
3187+
3188+ def __init__(self, database=None):
3189+ self.database = database
3190+
3191+ def __call__(self):
3192+ self.database = self.database or config('database')
3193+ if self.database is None:
3194+ log('Could not generate postgresql_db context. Missing required '
3195+ 'charm config options. (database name)', level=ERROR)
3196+ raise OSContextError
3197+
3198+ ctxt = {}
3199+ for rid in relation_ids(self.interfaces[0]):
3200+ self.related = True
3201+ for unit in related_units(rid):
3202+ rel_host = relation_get('host', rid=rid, unit=unit)
3203+ rel_user = relation_get('user', rid=rid, unit=unit)
3204+ rel_passwd = relation_get('password', rid=rid, unit=unit)
3205+ ctxt = {'database_host': rel_host,
3206+ 'database': self.database,
3207+ 'database_user': rel_user,
3208+ 'database_password': rel_passwd,
3209+ 'database_type': 'postgresql'}
3210+ if self.context_complete(ctxt):
3211+ return ctxt
3212+
3213+ return {}
3214+
3215+
3216+def db_ssl(rdata, ctxt, ssl_dir):
3217+ if 'ssl_ca' in rdata and ssl_dir:
3218+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
3219+ with open(ca_path, 'w') as fh:
3220+ fh.write(b64decode(rdata['ssl_ca']))
3221+
3222+ ctxt['database_ssl_ca'] = ca_path
3223+ elif 'ssl_ca' in rdata:
3224+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
3225+ return ctxt
3226+
3227+ if 'ssl_cert' in rdata:
3228+ cert_path = os.path.join(
3229+ ssl_dir, 'db-client.cert')
3230+ if not os.path.exists(cert_path):
3231+ log("Waiting 1m for ssl client cert validity", level=INFO)
3232+ time.sleep(60)
3233+
3234+ with open(cert_path, 'w') as fh:
3235+ fh.write(b64decode(rdata['ssl_cert']))
3236+
3237+ ctxt['database_ssl_cert'] = cert_path
3238+ key_path = os.path.join(ssl_dir, 'db-client.key')
3239+ with open(key_path, 'w') as fh:
3240+ fh.write(b64decode(rdata['ssl_key']))
3241+
3242+ ctxt['database_ssl_key'] = key_path
3243+
3244+ return ctxt
3245+
3246+
3247+class IdentityServiceContext(OSContextGenerator):
3248+
3249+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
3250+ self.service = service
3251+ self.service_user = service_user
3252+ self.rel_name = rel_name
3253+ self.interfaces = [self.rel_name]
3254+
3255+ def __call__(self):
3256+ log('Generating template context for ' + self.rel_name, level=DEBUG)
3257+ ctxt = {}
3258+
3259+ if self.service and self.service_user:
3260+ # This is required for pki token signing if we don't want /tmp to
3261+ # be used.
3262+ cachedir = '/var/cache/%s' % (self.service)
3263+ if not os.path.isdir(cachedir):
3264+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
3265+ mkdir(path=cachedir, owner=self.service_user,
3266+ group=self.service_user, perms=0o700)
3267+
3268+ ctxt['signing_dir'] = cachedir
3269+
3270+ for rid in relation_ids(self.rel_name):
3271+ self.related = True
3272+ for unit in related_units(rid):
3273+ rdata = relation_get(rid=rid, unit=unit)
3274+ serv_host = rdata.get('service_host')
3275+ serv_host = format_ipv6_addr(serv_host) or serv_host
3276+ auth_host = rdata.get('auth_host')
3277+ auth_host = format_ipv6_addr(auth_host) or auth_host
3278+ svc_protocol = rdata.get('service_protocol') or 'http'
3279+ auth_protocol = rdata.get('auth_protocol') or 'http'
3280+ ctxt.update({'service_port': rdata.get('service_port'),
3281+ 'service_host': serv_host,
3282+ 'auth_host': auth_host,
3283+ 'auth_port': rdata.get('auth_port'),
3284+ 'admin_tenant_name': rdata.get('service_tenant'),
3285+ 'admin_user': rdata.get('service_username'),
3286+ 'admin_password': rdata.get('service_password'),
3287+ 'service_protocol': svc_protocol,
3288+ 'auth_protocol': auth_protocol})
3289+
3290+ if self.context_complete(ctxt):
3291+ # NOTE(jamespage) this is required for >= icehouse
3292+ # so a missing value just indicates keystone needs
3293+ # upgrading
3294+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
3295+ return ctxt
3296+
3297+ return {}
3298+
3299+
3300+class AMQPContext(OSContextGenerator):
3301+
3302+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
3303+ self.ssl_dir = ssl_dir
3304+ self.rel_name = rel_name
3305+ self.relation_prefix = relation_prefix
3306+ self.interfaces = [rel_name]
3307+
3308+ def __call__(self):
3309+ log('Generating template context for amqp', level=DEBUG)
3310+ conf = config()
3311+ if self.relation_prefix:
3312+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
3313+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
3314+ else:
3315+ user_setting = 'rabbit-user'
3316+ vhost_setting = 'rabbit-vhost'
3317+
3318+ try:
3319+ username = conf[user_setting]
3320+ vhost = conf[vhost_setting]
3321+ except KeyError as e:
3322+ log('Could not generate shared_db context. Missing required charm '
3323+ 'config options: %s.' % e, level=ERROR)
3324+ raise OSContextError
3325+
3326+ ctxt = {}
3327+ for rid in relation_ids(self.rel_name):
3328+ ha_vip_only = False
3329+ self.related = True
3330+ for unit in related_units(rid):
3331+ if relation_get('clustered', rid=rid, unit=unit):
3332+ ctxt['clustered'] = True
3333+ vip = relation_get('vip', rid=rid, unit=unit)
3334+ vip = format_ipv6_addr(vip) or vip
3335+ ctxt['rabbitmq_host'] = vip
3336+ else:
3337+ host = relation_get('private-address', rid=rid, unit=unit)
3338+ host = format_ipv6_addr(host) or host
3339+ ctxt['rabbitmq_host'] = host
3340+
3341+ ctxt.update({
3342+ 'rabbitmq_user': username,
3343+ 'rabbitmq_password': relation_get('password', rid=rid,
3344+ unit=unit),
3345+ 'rabbitmq_virtual_host': vhost,
3346+ })
3347+
3348+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
3349+ if ssl_port:
3350+ ctxt['rabbit_ssl_port'] = ssl_port
3351+
3352+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
3353+ if ssl_ca:
3354+ ctxt['rabbit_ssl_ca'] = ssl_ca
3355+
3356+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
3357+ ctxt['rabbitmq_ha_queues'] = True
3358+
3359+ ha_vip_only = relation_get('ha-vip-only',
3360+ rid=rid, unit=unit) is not None
3361+
3362+ if self.context_complete(ctxt):
3363+ if 'rabbit_ssl_ca' in ctxt:
3364+ if not self.ssl_dir:
3365+ log("Charm not setup for ssl support but ssl ca "
3366+ "found", level=INFO)
3367+ break
3368+
3369+ ca_path = os.path.join(
3370+ self.ssl_dir, 'rabbit-client-ca.pem')
3371+ with open(ca_path, 'w') as fh:
3372+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
3373+ ctxt['rabbit_ssl_ca'] = ca_path
3374+
3375+ # Sufficient information found = break out!
3376+ break
3377+
3378+ # Used for active/active rabbitmq >= grizzly
3379+ if (('clustered' not in ctxt or ha_vip_only) and
3380+ len(related_units(rid)) > 1):
3381+ rabbitmq_hosts = []
3382+ for unit in related_units(rid):
3383+ host = relation_get('private-address', rid=rid, unit=unit)
3384+ host = format_ipv6_addr(host) or host
3385+ rabbitmq_hosts.append(host)
3386+
3387+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
3388+
3389+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
3390+ if oslo_messaging_flags:
3391+ ctxt['oslo_messaging_flags'] = config_flags_parser(
3392+ oslo_messaging_flags)
3393+
3394+ if not self.complete:
3395+ return {}
3396+
3397+ return ctxt
3398+
3399+
3400+class CephContext(OSContextGenerator):
3401+ """Generates context for /etc/ceph/ceph.conf templates."""
3402+ interfaces = ['ceph']
3403+
3404+ def __call__(self):
3405+ if not relation_ids('ceph'):
3406+ return {}
3407+
3408+ log('Generating template context for ceph', level=DEBUG)
3409+ mon_hosts = []
3410+ ctxt = {
3411+ 'use_syslog': str(config('use-syslog')).lower()
3412+ }
3413+ for rid in relation_ids('ceph'):
3414+ for unit in related_units(rid):
3415+ if not ctxt.get('auth'):
3416+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
3417+ if not ctxt.get('key'):
3418+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
3419+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
3420+ unit=unit)
3421+ unit_priv_addr = relation_get('private-address', rid=rid,
3422+ unit=unit)
3423+ ceph_addr = ceph_pub_addr or unit_priv_addr
3424+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
3425+ mon_hosts.append(ceph_addr)
3426+
3427+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
3428+
3429+ if not os.path.isdir('/etc/ceph'):
3430+ os.mkdir('/etc/ceph')
3431+
3432+ if not self.context_complete(ctxt):
3433+ return {}
3434+
3435+ ensure_packages(['ceph-common'])
3436+ return ctxt
3437+
3438+
3439+class HAProxyContext(OSContextGenerator):
3440+ """Provides half a context for the haproxy template, which describes
3441+ all peers to be included in the cluster. Each charm needs to include
3442+ its own context generator that describes the port mapping.
3443+ """
3444+ interfaces = ['cluster']
3445+
3446+ def __init__(self, singlenode_mode=False):
3447+ self.singlenode_mode = singlenode_mode
3448+
3449+ def __call__(self):
3450+ if not relation_ids('cluster') and not self.singlenode_mode:
3451+ return {}
3452+
3453+ if config('prefer-ipv6'):
3454+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
3455+ else:
3456+ addr = get_host_ip(unit_get('private-address'))
3457+
3458+ l_unit = local_unit().replace('/', '-')
3459+ cluster_hosts = {}
3460+
3461+ # NOTE(jamespage): build out map of configured network endpoints
3462+ # and associated backends
3463+ for addr_type in ADDRESS_TYPES:
3464+ cfg_opt = 'os-{}-network'.format(addr_type)
3465+ laddr = get_address_in_network(config(cfg_opt))
3466+ if laddr:
3467+ netmask = get_netmask_for_address(laddr)
3468+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
3469+ netmask),
3470+ 'backends': {l_unit: laddr}}
3471+ for rid in relation_ids('cluster'):
3472+ for unit in related_units(rid):
3473+ _laddr = relation_get('{}-address'.format(addr_type),
3474+ rid=rid, unit=unit)
3475+ if _laddr:
3476+ _unit = unit.replace('/', '-')
3477+ cluster_hosts[laddr]['backends'][_unit] = _laddr
3478+
3479+ # NOTE(jamespage) add backend based on private address - this
3480+ # with either be the only backend or the fallback if no acls
3481+ # match in the frontend
3482+ cluster_hosts[addr] = {}
3483+ netmask = get_netmask_for_address(addr)
3484+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
3485+ 'backends': {l_unit: addr}}
3486+ for rid in relation_ids('cluster'):
3487+ for unit in related_units(rid):
3488+ _laddr = relation_get('private-address',
3489+ rid=rid, unit=unit)
3490+ if _laddr:
3491+ _unit = unit.replace('/', '-')
3492+ cluster_hosts[addr]['backends'][_unit] = _laddr
3493+
3494+ ctxt = {
3495+ 'frontends': cluster_hosts,
3496+ 'default_backend': addr
3497+ }
3498+
3499+ if config('haproxy-server-timeout'):
3500+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
3501+
3502+ if config('haproxy-client-timeout'):
3503+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
3504+
3505+ if config('prefer-ipv6'):
3506+ ctxt['ipv6'] = True
3507+ ctxt['local_host'] = 'ip6-localhost'
3508+ ctxt['haproxy_host'] = '::'
3509+ ctxt['stat_port'] = ':::8888'
3510+ else:
3511+ ctxt['local_host'] = '127.0.0.1'
3512+ ctxt['haproxy_host'] = '0.0.0.0'
3513+ ctxt['stat_port'] = ':8888'
3514+
3515+ for frontend in cluster_hosts:
3516+ if (len(cluster_hosts[frontend]['backends']) > 1 or
3517+ self.singlenode_mode):
3518+ # Enable haproxy when we have enough peers.
3519+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
3520+ level=DEBUG)
3521+ with open('/etc/default/haproxy', 'w') as out:
3522+ out.write('ENABLED=1\n')
3523+
3524+ return ctxt
3525+
3526+ log('HAProxy context is incomplete, this unit has no peers.',
3527+ level=INFO)
3528+ return {}
3529+
3530+
3531+class ImageServiceContext(OSContextGenerator):
3532+ interfaces = ['image-service']
3533+
3534+ def __call__(self):
3535+ """Obtains the glance API server from the image-service relation.
3536+ Useful in nova and cinder (currently).
3537+ """
3538+ log('Generating template context for image-service.', level=DEBUG)
3539+ rids = relation_ids('image-service')
3540+ if not rids:
3541+ return {}
3542+
3543+ for rid in rids:
3544+ for unit in related_units(rid):
3545+ api_server = relation_get('glance-api-server',
3546+ rid=rid, unit=unit)
3547+ if api_server:
3548+ return {'glance_api_servers': api_server}
3549+
3550+ log("ImageService context is incomplete. Missing required relation "
3551+ "data.", level=INFO)
3552+ return {}
3553+
3554+
3555+class ApacheSSLContext(OSContextGenerator):
3556+ """Generates a context for an apache vhost configuration that configures
3557+ HTTPS reverse proxying for one or many endpoints. Generated context
3558+ looks something like::
3559+
3560+ {
3561+ 'namespace': 'cinder',
3562+ 'private_address': 'iscsi.mycinderhost.com',
3563+ 'endpoints': [(8776, 8766), (8777, 8767)]
3564+ }
3565+
3566+ The endpoints list consists of a tuples mapping external ports
3567+ to internal ports.
3568+ """
3569+ interfaces = ['https']
3570+
3571+ # charms should inherit this context and set external ports
3572+ # and service namespace accordingly.
3573+ external_ports = []
3574+ service_namespace = None
3575+
3576+ def enable_modules(self):
3577+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
3578+ check_call(cmd)
3579+
3580+ def configure_cert(self, cn=None):
3581+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
3582+ mkdir(path=ssl_dir)
3583+ cert, key = get_cert(cn)
3584+ if cn:
3585+ cert_filename = 'cert_{}'.format(cn)
3586+ key_filename = 'key_{}'.format(cn)
3587+ else:
3588+ cert_filename = 'cert'
3589+ key_filename = 'key'
3590+
3591+ write_file(path=os.path.join(ssl_dir, cert_filename),
3592+ content=b64decode(cert))
3593+ write_file(path=os.path.join(ssl_dir, key_filename),
3594+ content=b64decode(key))
3595+
3596+ def configure_ca(self):
3597+ ca_cert = get_ca_cert()
3598+ if ca_cert:
3599+ install_ca_cert(b64decode(ca_cert))
3600+
3601+ def canonical_names(self):
3602+ """Figure out which canonical names clients will access this service.
3603+ """
3604+ cns = []
3605+ for r_id in relation_ids('identity-service'):
3606+ for unit in related_units(r_id):
3607+ rdata = relation_get(rid=r_id, unit=unit)
3608+ for k in rdata:
3609+ if k.startswith('ssl_key_'):
3610+ cns.append(k.lstrip('ssl_key_'))
3611+
3612+ return sorted(list(set(cns)))
3613+
3614+ def get_network_addresses(self):
3615+ """For each network configured, return corresponding address and vip
3616+ (if available).
3617+
3618+ Returns a list of tuples of the form:
3619+
3620+ [(address_in_net_a, vip_in_net_a),
3621+ (address_in_net_b, vip_in_net_b),
3622+ ...]
3623+
3624+ or, if no vip(s) available:
3625+
3626+ [(address_in_net_a, address_in_net_a),
3627+ (address_in_net_b, address_in_net_b),
3628+ ...]
3629+ """
3630+ addresses = []
3631+ if config('vip'):
3632+ vips = config('vip').split()
3633+ else:
3634+ vips = []
3635+
3636+ for net_type in ['os-internal-network', 'os-admin-network',
3637+ 'os-public-network']:
3638+ addr = get_address_in_network(config(net_type),
3639+ unit_get('private-address'))
3640+ if len(vips) > 1 and is_clustered():
3641+ if not config(net_type):
3642+ log("Multiple networks configured but net_type "
3643+ "is None (%s)." % net_type, level=WARNING)
3644+ continue
3645+
3646+ for vip in vips:
3647+ if is_address_in_network(config(net_type), vip):
3648+ addresses.append((addr, vip))
3649+ break
3650+
3651+ elif is_clustered() and config('vip'):
3652+ addresses.append((addr, config('vip')))
3653+ else:
3654+ addresses.append((addr, addr))
3655+
3656+ return sorted(addresses)
3657+
3658+ def __call__(self):
3659+ if isinstance(self.external_ports, six.string_types):
3660+ self.external_ports = [self.external_ports]
3661+
3662+ if not self.external_ports or not https():
3663+ return {}
3664+
3665+ self.configure_ca()
3666+ self.enable_modules()
3667+
3668+ ctxt = {'namespace': self.service_namespace,
3669+ 'endpoints': [],
3670+ 'ext_ports': []}
3671+
3672+ cns = self.canonical_names()
3673+ if cns:
3674+ for cn in cns:
3675+ self.configure_cert(cn)
3676+ else:
3677+ # Expect cert/key provided in config (currently assumed that ca
3678+ # uses ip for cn)
3679+ cn = resolve_address(endpoint_type=INTERNAL)
3680+ self.configure_cert(cn)
3681+
3682+ addresses = self.get_network_addresses()
3683+ for address, endpoint in sorted(set(addresses)):
3684+ for api_port in self.external_ports:
3685+ ext_port = determine_apache_port(api_port,
3686+ singlenode_mode=True)
3687+ int_port = determine_api_port(api_port, singlenode_mode=True)
3688+ portmap = (address, endpoint, int(ext_port), int(int_port))
3689+ ctxt['endpoints'].append(portmap)
3690+ ctxt['ext_ports'].append(int(ext_port))
3691+
3692+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
3693+ return ctxt
3694+
3695+
3696+class NeutronContext(OSContextGenerator):
3697+ interfaces = []
3698+
3699+ @property
3700+ def plugin(self):
3701+ return None
3702+
3703+ @property
3704+ def network_manager(self):
3705+ return None
3706+
3707+ @property
3708+ def packages(self):
3709+ return neutron_plugin_attribute(self.plugin, 'packages',
3710+ self.network_manager)
3711+
3712+ @property
3713+ def neutron_security_groups(self):
3714+ return None
3715+
3716+ def _ensure_packages(self):
3717+ for pkgs in self.packages:
3718+ ensure_packages(pkgs)
3719+
3720+ def _save_flag_file(self):
3721+ if self.network_manager == 'quantum':
3722+ _file = '/etc/nova/quantum_plugin.conf'
3723+ else:
3724+ _file = '/etc/nova/neutron_plugin.conf'
3725+
3726+ with open(_file, 'wb') as out:
3727+ out.write(self.plugin + '\n')
3728+
3729+ def ovs_ctxt(self):
3730+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3731+ self.network_manager)
3732+ config = neutron_plugin_attribute(self.plugin, 'config',
3733+ self.network_manager)
3734+ ovs_ctxt = {'core_plugin': driver,
3735+ 'neutron_plugin': 'ovs',
3736+ 'neutron_security_groups': self.neutron_security_groups,
3737+ 'local_ip': unit_private_ip(),
3738+ 'config': config}
3739+
3740+ return ovs_ctxt
3741+
3742+ def nuage_ctxt(self):
3743+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3744+ self.network_manager)
3745+ config = neutron_plugin_attribute(self.plugin, 'config',
3746+ self.network_manager)
3747+ nuage_ctxt = {'core_plugin': driver,
3748+ 'neutron_plugin': 'vsp',
3749+ 'neutron_security_groups': self.neutron_security_groups,
3750+ 'local_ip': unit_private_ip(),
3751+ 'config': config}
3752+
3753+ return nuage_ctxt
3754+
3755+ def nvp_ctxt(self):
3756+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3757+ self.network_manager)
3758+ config = neutron_plugin_attribute(self.plugin, 'config',
3759+ self.network_manager)
3760+ nvp_ctxt = {'core_plugin': driver,
3761+ 'neutron_plugin': 'nvp',
3762+ 'neutron_security_groups': self.neutron_security_groups,
3763+ 'local_ip': unit_private_ip(),
3764+ 'config': config}
3765+
3766+ return nvp_ctxt
3767+
3768+ def n1kv_ctxt(self):
3769+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3770+ self.network_manager)
3771+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
3772+ self.network_manager)
3773+ n1kv_user_config_flags = config('n1kv-config-flags')
3774+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
3775+ n1kv_ctxt = {'core_plugin': driver,
3776+ 'neutron_plugin': 'n1kv',
3777+ 'neutron_security_groups': self.neutron_security_groups,
3778+ 'local_ip': unit_private_ip(),
3779+ 'config': n1kv_config,
3780+ 'vsm_ip': config('n1kv-vsm-ip'),
3781+ 'vsm_username': config('n1kv-vsm-username'),
3782+ 'vsm_password': config('n1kv-vsm-password'),
3783+ 'restrict_policy_profiles': restrict_policy_profiles}
3784+
3785+ if n1kv_user_config_flags:
3786+ flags = config_flags_parser(n1kv_user_config_flags)
3787+ n1kv_ctxt['user_config_flags'] = flags
3788+
3789+ return n1kv_ctxt
3790+
3791+ def calico_ctxt(self):
3792+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3793+ self.network_manager)
3794+ config = neutron_plugin_attribute(self.plugin, 'config',
3795+ self.network_manager)
3796+ calico_ctxt = {'core_plugin': driver,
3797+ 'neutron_plugin': 'Calico',
3798+ 'neutron_security_groups': self.neutron_security_groups,
3799+ 'local_ip': unit_private_ip(),
3800+ 'config': config}
3801+
3802+ return calico_ctxt
3803+
3804+ def neutron_ctxt(self):
3805+ if https():
3806+ proto = 'https'
3807+ else:
3808+ proto = 'http'
3809+
3810+ if is_clustered():
3811+ host = config('vip')
3812+ else:
3813+ host = unit_get('private-address')
3814+
3815+ ctxt = {'network_manager': self.network_manager,
3816+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
3817+ return ctxt
3818+
3819+ def pg_ctxt(self):
3820+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3821+ self.network_manager)
3822+ config = neutron_plugin_attribute(self.plugin, 'config',
3823+ self.network_manager)
3824+ ovs_ctxt = {'core_plugin': driver,
3825+ 'neutron_plugin': 'plumgrid',
3826+ 'neutron_security_groups': self.neutron_security_groups,
3827+ 'local_ip': unit_private_ip(),
3828+ 'config': config}
3829+ return ovs_ctxt
3830+
3831+ def __call__(self):
3832+ if self.network_manager not in ['quantum', 'neutron']:
3833+ return {}
3834+
3835+ if not self.plugin:
3836+ return {}
3837+
3838+ ctxt = self.neutron_ctxt()
3839+
3840+ if self.plugin == 'ovs':
3841+ ctxt.update(self.ovs_ctxt())
3842+ elif self.plugin in ['nvp', 'nsx']:
3843+ ctxt.update(self.nvp_ctxt())
3844+ elif self.plugin == 'n1kv':
3845+ ctxt.update(self.n1kv_ctxt())
3846+ elif self.plugin == 'Calico':
3847+ ctxt.update(self.calico_ctxt())
3848+ elif self.plugin == 'vsp':
3849+ ctxt.update(self.nuage_ctxt())
3850+ elif self.plugin == 'plumgrid':
3851+ ctxt.update(self.pg_ctxt())
3852+
3853+ alchemy_flags = config('neutron-alchemy-flags')
3854+ if alchemy_flags:
3855+ flags = config_flags_parser(alchemy_flags)
3856+ ctxt['neutron_alchemy_flags'] = flags
3857+
3858+ self._save_flag_file()
3859+ return ctxt
3860+
3861+
3862+class NeutronPortContext(OSContextGenerator):
3863+
3864+ def resolve_ports(self, ports):
3865+ """Resolve NICs not yet bound to bridge(s)
3866+
3867+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
3868+ """
3869+ if not ports:
3870+ return None
3871+
3872+ hwaddr_to_nic = {}
3873+ hwaddr_to_ip = {}
3874+ for nic in list_nics():
3875+ # Ignore virtual interfaces (bond masters will be identified from
3876+ # their slaves)
3877+ if not is_phy_iface(nic):
3878+ continue
3879+
3880+ _nic = get_bond_master(nic)
3881+ if _nic:
3882+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
3883+ level=DEBUG)
3884+ nic = _nic
3885+
3886+ hwaddr = get_nic_hwaddr(nic)
3887+ hwaddr_to_nic[hwaddr] = nic
3888+ addresses = get_ipv4_addr(nic, fatal=False)
3889+ addresses += get_ipv6_addr(iface=nic, fatal=False)
3890+ hwaddr_to_ip[hwaddr] = addresses
3891+
3892+ resolved = []
3893+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
3894+ for entry in ports:
3895+ if re.match(mac_regex, entry):
3896+ # NIC is in known NICs and does NOT hace an IP address
3897+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
3898+ # If the nic is part of a bridge then don't use it
3899+ if is_bridge_member(hwaddr_to_nic[entry]):
3900+ continue
3901+
3902+ # Entry is a MAC address for a valid interface that doesn't
3903+ # have an IP address assigned yet.
3904+ resolved.append(hwaddr_to_nic[entry])
3905+ else:
3906+ # If the passed entry is not a MAC address, assume it's a valid
3907+ # interface, and that the user put it there on purpose (we can
3908+ # trust it to be the real external network).
3909+ resolved.append(entry)
3910+
3911+ # Ensure no duplicates
3912+ return list(set(resolved))
3913+
3914+
3915+class OSConfigFlagContext(OSContextGenerator):
3916+ """Provides support for user-defined config flags.
3917+
3918+ Users can define a comma-seperated list of key=value pairs
3919+ in the charm configuration and apply them at any point in
3920+ any file by using a template flag.
3921+
3922+ Sometimes users might want config flags inserted within a
3923+ specific section so this class allows users to specify the
3924+ template flag name, allowing for multiple template flags
3925+ (sections) within the same context.
3926+
3927+ NOTE: the value of config-flags may be a comma-separated list of
3928+ key=value pairs and some Openstack config files support
3929+ comma-separated lists as values.
3930+ """
3931+
3932+ def __init__(self, charm_flag='config-flags',
3933+ template_flag='user_config_flags'):
3934+ """
3935+ :param charm_flag: config flags in charm configuration.
3936+ :param template_flag: insert point for user-defined flags in template
3937+ file.
3938+ """
3939+ super(OSConfigFlagContext, self).__init__()
3940+ self._charm_flag = charm_flag
3941+ self._template_flag = template_flag
3942+
3943+ def __call__(self):
3944+ config_flags = config(self._charm_flag)
3945+ if not config_flags:
3946+ return {}
3947+
3948+ return {self._template_flag:
3949+ config_flags_parser(config_flags)}
3950+
3951+
3952+class SubordinateConfigContext(OSContextGenerator):
3953+
3954+ """
3955+ Responsible for inspecting relations to subordinates that
3956+ may be exporting required config via a json blob.
3957+
3958+ The subordinate interface allows subordinates to export their
3959+ configuration requirements to the principle for multiple config
3960+ files and multiple serivces. Ie, a subordinate that has interfaces
3961+ to both glance and nova may export to following yaml blob as json::
3962+
3963+ glance:
3964+ /etc/glance/glance-api.conf:
3965+ sections:
3966+ DEFAULT:
3967+ - [key1, value1]
3968+ /etc/glance/glance-registry.conf:
3969+ MYSECTION:
3970+ - [key2, value2]
3971+ nova:
3972+ /etc/nova/nova.conf:
3973+ sections:
3974+ DEFAULT:
3975+ - [key3, value3]
3976+
3977+
3978+ It is then up to the principle charms to subscribe this context to
3979+ the service+config file it is interestd in. Configuration data will
3980+ be available in the template context, in glance's case, as::
3981+
3982+ ctxt = {
3983+ ... other context ...
3984+ 'subordinate_config': {
3985+ 'DEFAULT': {
3986+ 'key1': 'value1',
3987+ },
3988+ 'MYSECTION': {
3989+ 'key2': 'value2',
3990+ },
3991+ }
3992+ }
3993+ """
3994+
3995+ def __init__(self, service, config_file, interface):
3996+ """
3997+ :param service : Service name key to query in any subordinate
3998+ data found
3999+ :param config_file : Service's config file to query sections
4000+ :param interface : Subordinate interface to inspect
4001+ """
4002+ self.config_file = config_file
4003+ if isinstance(service, list):
4004+ self.services = service
4005+ else:
4006+ self.services = [service]
4007+ if isinstance(interface, list):
4008+ self.interfaces = interface
4009+ else:
4010+ self.interfaces = [interface]
4011+
4012+ def __call__(self):
4013+ ctxt = {'sections': {}}
4014+ rids = []
4015+ for interface in self.interfaces:
4016+ rids.extend(relation_ids(interface))
4017+ for rid in rids:
4018+ for unit in related_units(rid):
4019+ sub_config = relation_get('subordinate_configuration',
4020+ rid=rid, unit=unit)
4021+ if sub_config and sub_config != '':
4022+ try:
4023+ sub_config = json.loads(sub_config)
4024+ except:
4025+ log('Could not parse JSON from subordinate_config '
4026+ 'setting from %s' % rid, level=ERROR)
4027+ continue
4028+
4029+ for service in self.services:
4030+ if service not in sub_config:
4031+ log('Found subordinate_config on %s but it contained'
4032+ 'nothing for %s service' % (rid, service),
4033+ level=INFO)
4034+ continue
4035+
4036+ sub_config = sub_config[service]
4037+ if self.config_file not in sub_config:
4038+ log('Found subordinate_config on %s but it contained'
4039+ 'nothing for %s' % (rid, self.config_file),
4040+ level=INFO)
4041+ continue
4042+
4043+ sub_config = sub_config[self.config_file]
4044+ for k, v in six.iteritems(sub_config):
4045+ if k == 'sections':
4046+ for section, config_list in six.iteritems(v):
4047+ log("adding section '%s'" % (section),
4048+ level=DEBUG)
4049+ if ctxt[k].get(section):
4050+ ctxt[k][section].extend(config_list)
4051+ else:
4052+ ctxt[k][section] = config_list
4053+ else:
4054+ ctxt[k] = v
4055+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
4056+ return ctxt
4057+
4058+
4059+class LogLevelContext(OSContextGenerator):
4060+
4061+ def __call__(self):
4062+ ctxt = {}
4063+ ctxt['debug'] = \
4064+ False if config('debug') is None else config('debug')
4065+ ctxt['verbose'] = \
4066+ False if config('verbose') is None else config('verbose')
4067+
4068+ return ctxt
4069+
4070+
4071+class SyslogContext(OSContextGenerator):
4072+
4073+ def __call__(self):
4074+ ctxt = {'use_syslog': config('use-syslog')}
4075+ return ctxt
4076+
4077+
4078+class BindHostContext(OSContextGenerator):
4079+
4080+ def __call__(self):
4081+ if config('prefer-ipv6'):
4082+ return {'bind_host': '::'}
4083+ else:
4084+ return {'bind_host': '0.0.0.0'}
4085+
4086+
4087+class WorkerConfigContext(OSContextGenerator):
4088+
4089+ @property
4090+ def num_cpus(self):
4091+ try:
4092+ from psutil import NUM_CPUS
4093+ except ImportError:
4094+ apt_install('python-psutil', fatal=True)
4095+ from psutil import NUM_CPUS
4096+
4097+ return NUM_CPUS
4098+
4099+ def __call__(self):
4100+ multiplier = config('worker-multiplier') or 0
4101+ ctxt = {"workers": self.num_cpus * multiplier}
4102+ return ctxt
4103+
4104+
4105+class ZeroMQContext(OSContextGenerator):
4106+ interfaces = ['zeromq-configuration']
4107+
4108+ def __call__(self):
4109+ ctxt = {}
4110+ if is_relation_made('zeromq-configuration', 'host'):
4111+ for rid in relation_ids('zeromq-configuration'):
4112+ for unit in related_units(rid):
4113+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
4114+ ctxt['zmq_host'] = relation_get('host', unit, rid)
4115+ ctxt['zmq_redis_address'] = relation_get(
4116+ 'zmq_redis_address', unit, rid)
4117+
4118+ return ctxt
4119+
4120+
4121+class NotificationDriverContext(OSContextGenerator):
4122+
4123+ def __init__(self, zmq_relation='zeromq-configuration',
4124+ amqp_relation='amqp'):
4125+ """
4126+ :param zmq_relation: Name of Zeromq relation to check
4127+ """
4128+ self.zmq_relation = zmq_relation
4129+ self.amqp_relation = amqp_relation
4130+
4131+ def __call__(self):
4132+ ctxt = {'notifications': 'False'}
4133+ if is_relation_made(self.amqp_relation):
4134+ ctxt['notifications'] = "True"
4135+
4136+ return ctxt
4137+
4138+
4139+class SysctlContext(OSContextGenerator):
4140+ """This context check if the 'sysctl' option exists on configuration
4141+ then creates a file with the loaded contents"""
4142+ def __call__(self):
4143+ sysctl_dict = config('sysctl')
4144+ if sysctl_dict:
4145+ sysctl_create(sysctl_dict,
4146+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
4147+ return {'sysctl': sysctl_dict}
4148+
4149+
4150+class NeutronAPIContext(OSContextGenerator):
4151+ '''
4152+ Inspects current neutron-plugin-api relation for neutron settings. Return
4153+ defaults if it is not present.
4154+ '''
4155+ interfaces = ['neutron-plugin-api']
4156+
4157+ def __call__(self):
4158+ self.neutron_defaults = {
4159+ 'l2_population': {
4160+ 'rel_key': 'l2-population',
4161+ 'default': False,
4162+ },
4163+ 'overlay_network_type': {
4164+ 'rel_key': 'overlay-network-type',
4165+ 'default': 'gre',
4166+ },
4167+ 'neutron_security_groups': {
4168+ 'rel_key': 'neutron-security-groups',
4169+ 'default': False,
4170+ },
4171+ 'network_device_mtu': {
4172+ 'rel_key': 'network-device-mtu',
4173+ 'default': None,
4174+ },
4175+ 'enable_dvr': {
4176+ 'rel_key': 'enable-dvr',
4177+ 'default': False,
4178+ },
4179+ 'enable_l3ha': {
4180+ 'rel_key': 'enable-l3ha',
4181+ 'default': False,
4182+ },
4183+ }
4184+ ctxt = self.get_neutron_options({})
4185+ for rid in relation_ids('neutron-plugin-api'):
4186+ for unit in related_units(rid):
4187+ rdata = relation_get(rid=rid, unit=unit)
4188+ if 'l2-population' in rdata:
4189+ ctxt.update(self.get_neutron_options(rdata))
4190+
4191+ return ctxt
4192+
4193+ def get_neutron_options(self, rdata):
4194+ settings = {}
4195+ for nkey in self.neutron_defaults.keys():
4196+ defv = self.neutron_defaults[nkey]['default']
4197+ rkey = self.neutron_defaults[nkey]['rel_key']
4198+ if rkey in rdata.keys():
4199+ if type(defv) is bool:
4200+ settings[nkey] = bool_from_string(rdata[rkey])
4201+ else:
4202+ settings[nkey] = rdata[rkey]
4203+ else:
4204+ settings[nkey] = defv
4205+ return settings
4206+
4207+
4208+class ExternalPortContext(NeutronPortContext):
4209+
4210+ def __call__(self):
4211+ ctxt = {}
4212+ ports = config('ext-port')
4213+ if ports:
4214+ ports = [p.strip() for p in ports.split()]
4215+ ports = self.resolve_ports(ports)
4216+ if ports:
4217+ ctxt = {"ext_port": ports[0]}
4218+ napi_settings = NeutronAPIContext()()
4219+ mtu = napi_settings.get('network_device_mtu')
4220+ if mtu:
4221+ ctxt['ext_port_mtu'] = mtu
4222+
4223+ return ctxt
4224+
4225+
4226+class DataPortContext(NeutronPortContext):
4227+
4228+ def __call__(self):
4229+ ports = config('data-port')
4230+ if ports:
4231+ # Map of {port/mac:bridge}
4232+ portmap = parse_data_port_mappings(ports)
4233+ ports = portmap.keys()
4234+ # Resolve provided ports or mac addresses and filter out those
4235+ # already attached to a bridge.
4236+ resolved = self.resolve_ports(ports)
4237+ # FIXME: is this necessary?
4238+ normalized = {get_nic_hwaddr(port): port for port in resolved
4239+ if port not in ports}
4240+ normalized.update({port: port for port in resolved
4241+ if port in ports})
4242+ if resolved:
4243+ return {bridge: normalized[port] for port, bridge in
4244+ six.iteritems(portmap) if port in normalized.keys()}
4245+
4246+ return None
4247+
4248+
4249+class PhyNICMTUContext(DataPortContext):
4250+
4251+ def __call__(self):
4252+ ctxt = {}
4253+ mappings = super(PhyNICMTUContext, self).__call__()
4254+ if mappings and mappings.values():
4255+ ports = mappings.values()
4256+ napi_settings = NeutronAPIContext()()
4257+ mtu = napi_settings.get('network_device_mtu')
4258+ all_ports = set()
4259+ # If any of ports is a vlan device, its underlying device must have
4260+ # mtu applied first.
4261+ for port in ports:
4262+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
4263+ lport = os.path.basename(lport)
4264+ all_ports.add(lport.split('_')[1])
4265+
4266+ all_ports = list(all_ports)
4267+ all_ports.extend(ports)
4268+ if mtu:
4269+ ctxt["devs"] = '\\n'.join(all_ports)
4270+ ctxt['mtu'] = mtu
4271+
4272+ return ctxt
4273+
4274+
4275+class NetworkServiceContext(OSContextGenerator):
4276+
4277+ def __init__(self, rel_name='quantum-network-service'):
4278+ self.rel_name = rel_name
4279+ self.interfaces = [rel_name]
4280+
4281+ def __call__(self):
4282+ for rid in relation_ids(self.rel_name):
4283+ for unit in related_units(rid):
4284+ rdata = relation_get(rid=rid, unit=unit)
4285+ ctxt = {
4286+ 'keystone_host': rdata.get('keystone_host'),
4287+ 'service_port': rdata.get('service_port'),
4288+ 'auth_port': rdata.get('auth_port'),
4289+ 'service_tenant': rdata.get('service_tenant'),
4290+ 'service_username': rdata.get('service_username'),
4291+ 'service_password': rdata.get('service_password'),
4292+ 'quantum_host': rdata.get('quantum_host'),
4293+ 'quantum_port': rdata.get('quantum_port'),
4294+ 'quantum_url': rdata.get('quantum_url'),
4295+ 'region': rdata.get('region'),
4296+ 'service_protocol':
4297+ rdata.get('service_protocol') or 'http',
4298+ 'auth_protocol':
4299+ rdata.get('auth_protocol') or 'http',
4300+ }
4301+ if self.context_complete(ctxt):
4302+ return ctxt
4303+ return {}
4304
4305=== added directory 'charmhelpers/contrib/openstack/files'
4306=== added file 'charmhelpers/contrib/openstack/files/__init__.py'
4307--- charmhelpers/contrib/openstack/files/__init__.py 1970-01-01 00:00:00 +0000
4308+++ charmhelpers/contrib/openstack/files/__init__.py 2015-10-08 20:44:37 +0000
4309@@ -0,0 +1,18 @@
4310+# Copyright 2014-2015 Canonical Limited.
4311+#
4312+# This file is part of charm-helpers.
4313+#
4314+# charm-helpers is free software: you can redistribute it and/or modify
4315+# it under the terms of the GNU Lesser General Public License version 3 as
4316+# published by the Free Software Foundation.
4317+#
4318+# charm-helpers is distributed in the hope that it will be useful,
4319+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4320+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4321+# GNU Lesser General Public License for more details.
4322+#
4323+# You should have received a copy of the GNU Lesser General Public License
4324+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4325+
4326+# dummy __init__.py to fool syncer into thinking this is a syncable python
4327+# module
4328
4329=== added file 'charmhelpers/contrib/openstack/files/check_haproxy.sh'
4330--- charmhelpers/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
4331+++ charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-10-08 20:44:37 +0000
4332@@ -0,0 +1,32 @@
4333+#!/bin/bash
4334+#--------------------------------------------
4335+# This file is managed by Juju
4336+#--------------------------------------------
4337+#
4338+# Copyright 2009,2012 Canonical Ltd.
4339+# Author: Tom Haddon
4340+
4341+CRITICAL=0
4342+NOTACTIVE=''
4343+LOGFILE=/var/log/nagios/check_haproxy.log
4344+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
4345+
4346+for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
4347+do
4348+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
4349+ if [ $? != 0 ]; then
4350+ date >> $LOGFILE
4351+ echo $output >> $LOGFILE
4352+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
4353+ CRITICAL=1
4354+ NOTACTIVE="${NOTACTIVE} $appserver"
4355+ fi
4356+done
4357+
4358+if [ $CRITICAL = 1 ]; then
4359+ echo "CRITICAL:${NOTACTIVE}"
4360+ exit 2
4361+fi
4362+
4363+echo "OK: All haproxy instances looking good"
4364+exit 0
4365
4366=== added file 'charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh'
4367--- charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 1970-01-01 00:00:00 +0000
4368+++ charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh 2015-10-08 20:44:37 +0000
4369@@ -0,0 +1,30 @@
4370+#!/bin/bash
4371+#--------------------------------------------
4372+# This file is managed by Juju
4373+#--------------------------------------------
4374+#
4375+# Copyright 2009,2012 Canonical Ltd.
4376+# Author: Tom Haddon
4377+
4378+# These should be config options at some stage
4379+CURRQthrsh=0
4380+MAXQthrsh=100
4381+
4382+AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
4383+
4384+HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
4385+
4386+for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
4387+do
4388+ CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
4389+ MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
4390+
4391+ if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
4392+ echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
4393+ exit 2
4394+ fi
4395+done
4396+
4397+echo "OK: All haproxy queue depths looking good"
4398+exit 0
4399+
4400
4401=== added file 'charmhelpers/contrib/openstack/ip.py'
4402--- charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
4403+++ charmhelpers/contrib/openstack/ip.py 2015-10-08 20:44:37 +0000
4404@@ -0,0 +1,151 @@
4405+# Copyright 2014-2015 Canonical Limited.
4406+#
4407+# This file is part of charm-helpers.
4408+#
4409+# charm-helpers is free software: you can redistribute it and/or modify
4410+# it under the terms of the GNU Lesser General Public License version 3 as
4411+# published by the Free Software Foundation.
4412+#
4413+# charm-helpers is distributed in the hope that it will be useful,
4414+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4415+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4416+# GNU Lesser General Public License for more details.
4417+#
4418+# You should have received a copy of the GNU Lesser General Public License
4419+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4420+
4421+from charmhelpers.core.hookenv import (
4422+ config,
4423+ unit_get,
4424+ service_name,
4425+)
4426+from charmhelpers.contrib.network.ip import (
4427+ get_address_in_network,
4428+ is_address_in_network,
4429+ is_ipv6,
4430+ get_ipv6_addr,
4431+)
4432+from charmhelpers.contrib.hahelpers.cluster import is_clustered
4433+
4434+PUBLIC = 'public'
4435+INTERNAL = 'int'
4436+ADMIN = 'admin'
4437+
4438+ADDRESS_MAP = {
4439+ PUBLIC: {
4440+ 'config': 'os-public-network',
4441+ 'fallback': 'public-address',
4442+ 'override': 'os-public-hostname',
4443+ },
4444+ INTERNAL: {
4445+ 'config': 'os-internal-network',
4446+ 'fallback': 'private-address',
4447+ 'override': 'os-internal-hostname',
4448+ },
4449+ ADMIN: {
4450+ 'config': 'os-admin-network',
4451+ 'fallback': 'private-address',
4452+ 'override': 'os-admin-hostname',
4453+ }
4454+}
4455+
4456+
4457+def canonical_url(configs, endpoint_type=PUBLIC):
4458+ """Returns the correct HTTP URL to this host given the state of HTTPS
4459+ configuration, hacluster and charm configuration.
4460+
4461+ :param configs: OSTemplateRenderer config templating object to inspect
4462+ for a complete https context.
4463+ :param endpoint_type: str endpoint type to resolve.
4464+ :param returns: str base URL for services on the current service unit.
4465+ """
4466+ scheme = _get_scheme(configs)
4467+
4468+ address = resolve_address(endpoint_type)
4469+ if is_ipv6(address):
4470+ address = "[{}]".format(address)
4471+
4472+ return '%s://%s' % (scheme, address)
4473+
4474+
4475+def _get_scheme(configs):
4476+ """Returns the scheme to use for the url (either http or https)
4477+ depending upon whether https is in the configs value.
4478+
4479+ :param configs: OSTemplateRenderer config templating object to inspect
4480+ for a complete https context.
4481+ :returns: either 'http' or 'https' depending on whether https is
4482+ configured within the configs context.
4483+ """
4484+ scheme = 'http'
4485+ if configs and 'https' in configs.complete_contexts():
4486+ scheme = 'https'
4487+ return scheme
4488+
4489+
4490+def _get_address_override(endpoint_type=PUBLIC):
4491+ """Returns any address overrides that the user has defined based on the
4492+ endpoint type.
4493+
4494+ Note: this function allows for the service name to be inserted into the
4495+ address if the user specifies {service_name}.somehost.org.
4496+
4497+ :param endpoint_type: the type of endpoint to retrieve the override
4498+ value for.
4499+ :returns: any endpoint address or hostname that the user has overridden
4500+ or None if an override is not present.
4501+ """
4502+ override_key = ADDRESS_MAP[endpoint_type]['override']
4503+ addr_override = config(override_key)
4504+ if not addr_override:
4505+ return None
4506+ else:
4507+ return addr_override.format(service_name=service_name())
4508+
4509+
4510+def resolve_address(endpoint_type=PUBLIC):
4511+ """Return unit address depending on net config.
4512+
4513+ If unit is clustered with vip(s) and has net splits defined, return vip on
4514+ correct network. If clustered with no nets defined, return primary vip.
4515+
4516+ If not clustered, return unit address ensuring address is on configured net
4517+ split if one is configured.
4518+
4519+ :param endpoint_type: Network endpoing type
4520+ """
4521+ resolved_address = _get_address_override(endpoint_type)
4522+ if resolved_address:
4523+ return resolved_address
4524+
4525+ vips = config('vip')
4526+ if vips:
4527+ vips = vips.split()
4528+
4529+ net_type = ADDRESS_MAP[endpoint_type]['config']
4530+ net_addr = config(net_type)
4531+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
4532+ clustered = is_clustered()
4533+ if clustered:
4534+ if not net_addr:
4535+ # If no net-splits defined, we expect a single vip
4536+ resolved_address = vips[0]
4537+ else:
4538+ for vip in vips:
4539+ if is_address_in_network(net_addr, vip):
4540+ resolved_address = vip
4541+ break
4542+ else:
4543+ if config('prefer-ipv6'):
4544+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
4545+ else:
4546+ fallback_addr = unit_get(net_fallback)
4547+
4548+ resolved_address = get_address_in_network(net_addr, fallback_addr)
4549+
4550+ if resolved_address is None:
4551+ raise ValueError("Unable to resolve a suitable IP address based on "
4552+ "charm state and configuration. (net_type=%s, "
4553+ "clustered=%s)" % (net_type, clustered))
4554+
4555+ return resolved_address
4556
4557=== added file 'charmhelpers/contrib/openstack/neutron.py'
4558--- charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
4559+++ charmhelpers/contrib/openstack/neutron.py 2015-10-08 20:44:37 +0000
4560@@ -0,0 +1,356 @@
4561+# Copyright 2014-2015 Canonical Limited.
4562+#
4563+# This file is part of charm-helpers.
4564+#
4565+# charm-helpers is free software: you can redistribute it and/or modify
4566+# it under the terms of the GNU Lesser General Public License version 3 as
4567+# published by the Free Software Foundation.
4568+#
4569+# charm-helpers is distributed in the hope that it will be useful,
4570+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4571+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4572+# GNU Lesser General Public License for more details.
4573+#
4574+# You should have received a copy of the GNU Lesser General Public License
4575+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4576+
4577+# Various utilies for dealing with Neutron and the renaming from Quantum.
4578+
4579+import six
4580+from subprocess import check_output
4581+
4582+from charmhelpers.core.hookenv import (
4583+ config,
4584+ log,
4585+ ERROR,
4586+)
4587+
4588+from charmhelpers.contrib.openstack.utils import os_release
4589+
4590+
4591+def headers_package():
4592+ """Ensures correct linux-headers for running kernel are installed,
4593+ for building DKMS package"""
4594+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4595+ return 'linux-headers-%s' % kver
4596+
4597+QUANTUM_CONF_DIR = '/etc/quantum'
4598+
4599+
4600+def kernel_version():
4601+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
4602+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4603+ kver = kver.split('.')
4604+ return (int(kver[0]), int(kver[1]))
4605+
4606+
4607+def determine_dkms_package():
4608+ """ Determine which DKMS package should be used based on kernel version """
4609+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
4610+ if kernel_version() >= (3, 13):
4611+ return []
4612+ else:
4613+ return ['openvswitch-datapath-dkms']
4614+
4615+
4616+# legacy
4617+
4618+
4619+def quantum_plugins():
4620+ from charmhelpers.contrib.openstack import context
4621+ return {
4622+ 'ovs': {
4623+ 'config': '/etc/quantum/plugins/openvswitch/'
4624+ 'ovs_quantum_plugin.ini',
4625+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
4626+ 'OVSQuantumPluginV2',
4627+ 'contexts': [
4628+ context.SharedDBContext(user=config('neutron-database-user'),
4629+ database=config('neutron-database'),
4630+ relation_prefix='neutron',
4631+ ssl_dir=QUANTUM_CONF_DIR)],
4632+ 'services': ['quantum-plugin-openvswitch-agent'],
4633+ 'packages': [[headers_package()] + determine_dkms_package(),
4634+ ['quantum-plugin-openvswitch-agent']],
4635+ 'server_packages': ['quantum-server',
4636+ 'quantum-plugin-openvswitch'],
4637+ 'server_services': ['quantum-server']
4638+ },
4639+ 'nvp': {
4640+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
4641+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
4642+ 'QuantumPlugin.NvpPluginV2',
4643+ 'contexts': [
4644+ context.SharedDBContext(user=config('neutron-database-user'),
4645+ database=config('neutron-database'),
4646+ relation_prefix='neutron',
4647+ ssl_dir=QUANTUM_CONF_DIR)],
4648+ 'services': [],
4649+ 'packages': [],
4650+ 'server_packages': ['quantum-server',
4651+ 'quantum-plugin-nicira'],
4652+ 'server_services': ['quantum-server']
4653+ }
4654+ }
4655+
4656+NEUTRON_CONF_DIR = '/etc/neutron'
4657+
4658+
4659+def neutron_plugins():
4660+ from charmhelpers.contrib.openstack import context
4661+ release = os_release('nova-common')
4662+ plugins = {
4663+ 'ovs': {
4664+ 'config': '/etc/neutron/plugins/openvswitch/'
4665+ 'ovs_neutron_plugin.ini',
4666+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
4667+ 'OVSNeutronPluginV2',
4668+ 'contexts': [
4669+ context.SharedDBContext(user=config('neutron-database-user'),
4670+ database=config('neutron-database'),
4671+ relation_prefix='neutron',
4672+ ssl_dir=NEUTRON_CONF_DIR)],
4673+ 'services': ['neutron-plugin-openvswitch-agent'],
4674+ 'packages': [[headers_package()] + determine_dkms_package(),
4675+ ['neutron-plugin-openvswitch-agent']],
4676+ 'server_packages': ['neutron-server',
4677+ 'neutron-plugin-openvswitch'],
4678+ 'server_services': ['neutron-server']
4679+ },
4680+ 'nvp': {
4681+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
4682+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
4683+ 'NeutronPlugin.NvpPluginV2',
4684+ 'contexts': [
4685+ context.SharedDBContext(user=config('neutron-database-user'),
4686+ database=config('neutron-database'),
4687+ relation_prefix='neutron',
4688+ ssl_dir=NEUTRON_CONF_DIR)],
4689+ 'services': [],
4690+ 'packages': [],
4691+ 'server_packages': ['neutron-server',
4692+ 'neutron-plugin-nicira'],
4693+ 'server_services': ['neutron-server']
4694+ },
4695+ 'nsx': {
4696+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
4697+ 'driver': 'vmware',
4698+ 'contexts': [
4699+ context.SharedDBContext(user=config('neutron-database-user'),
4700+ database=config('neutron-database'),
4701+ relation_prefix='neutron',
4702+ ssl_dir=NEUTRON_CONF_DIR)],
4703+ 'services': [],
4704+ 'packages': [],
4705+ 'server_packages': ['neutron-server',
4706+ 'neutron-plugin-vmware'],
4707+ 'server_services': ['neutron-server']
4708+ },
4709+ 'n1kv': {
4710+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
4711+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
4712+ 'contexts': [
4713+ context.SharedDBContext(user=config('neutron-database-user'),
4714+ database=config('neutron-database'),
4715+ relation_prefix='neutron',
4716+ ssl_dir=NEUTRON_CONF_DIR)],
4717+ 'services': [],
4718+ 'packages': [[headers_package()] + determine_dkms_package(),
4719+ ['neutron-plugin-cisco']],
4720+ 'server_packages': ['neutron-server',
4721+ 'neutron-plugin-cisco'],
4722+ 'server_services': ['neutron-server']
4723+ },
4724+ 'Calico': {
4725+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
4726+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
4727+ 'contexts': [
4728+ context.SharedDBContext(user=config('neutron-database-user'),
4729+ database=config('neutron-database'),
4730+ relation_prefix='neutron',
4731+ ssl_dir=NEUTRON_CONF_DIR)],
4732+ 'services': ['calico-felix',
4733+ 'bird',
4734+ 'neutron-dhcp-agent',
4735+ 'nova-api-metadata',
4736+ 'etcd'],
4737+ 'packages': [[headers_package()] + determine_dkms_package(),
4738+ ['calico-compute',
4739+ 'bird',
4740+ 'neutron-dhcp-agent',
4741+ 'nova-api-metadata',
4742+ 'etcd']],
4743+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
4744+ 'server_services': ['neutron-server', 'etcd']
4745+ },
4746+ 'vsp': {
4747+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
4748+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
4749+ 'contexts': [
4750+ context.SharedDBContext(user=config('neutron-database-user'),
4751+ database=config('neutron-database'),
4752+ relation_prefix='neutron',
4753+ ssl_dir=NEUTRON_CONF_DIR)],
4754+ 'services': [],
4755+ 'packages': [],
4756+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
4757+ 'server_services': ['neutron-server']
4758+ },
4759+ 'plumgrid': {
4760+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
4761+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
4762+ 'contexts': [
4763+ context.SharedDBContext(user=config('database-user'),
4764+ database=config('database'),
4765+ ssl_dir=NEUTRON_CONF_DIR)],
4766+ 'services': [],
4767+ 'packages': [['plumgrid-lxc'],
4768+ ['iovisor-dkms']],
4769+ 'server_packages': ['neutron-server',
4770+ 'neutron-plugin-plumgrid'],
4771+ 'server_services': ['neutron-server']
4772+ }
4773+ }
4774+ if release >= 'icehouse':
4775+ # NOTE: patch in ml2 plugin for icehouse onwards
4776+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
4777+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
4778+ plugins['ovs']['server_packages'] = ['neutron-server',
4779+ 'neutron-plugin-ml2']
4780+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
4781+ plugins['nvp'] = plugins['nsx']
4782+ return plugins
4783+
4784+
4785+def neutron_plugin_attribute(plugin, attr, net_manager=None):
4786+ manager = net_manager or network_manager()
4787+ if manager == 'quantum':
4788+ plugins = quantum_plugins()
4789+ elif manager == 'neutron':
4790+ plugins = neutron_plugins()
4791+ else:
4792+ log("Network manager '%s' does not support plugins." % (manager),
4793+ level=ERROR)
4794+ raise Exception
4795+
4796+ try:
4797+ _plugin = plugins[plugin]
4798+ except KeyError:
4799+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
4800+ raise Exception
4801+
4802+ try:
4803+ return _plugin[attr]
4804+ except KeyError:
4805+ return None
4806+
4807+
4808+def network_manager():
4809+ '''
4810+ Deals with the renaming of Quantum to Neutron in H and any situations
4811+ that require compatability (eg, deploying H with network-manager=quantum,
4812+ upgrading from G).
4813+ '''
4814+ release = os_release('nova-common')
4815+ manager = config('network-manager').lower()
4816+
4817+ if manager not in ['quantum', 'neutron']:
4818+ return manager
4819+
4820+ if release in ['essex']:
4821+ # E does not support neutron
4822+ log('Neutron networking not supported in Essex.', level=ERROR)
4823+ raise Exception
4824+ elif release in ['folsom', 'grizzly']:
4825+ # neutron is named quantum in F and G
4826+ return 'quantum'
4827+ else:
4828+ # ensure accurate naming for all releases post-H
4829+ return 'neutron'
4830+
4831+
4832+def parse_mappings(mappings, key_rvalue=False):
4833+ """By default mappings are lvalue keyed.
4834+
4835+ If key_rvalue is True, the mapping will be reversed to allow multiple
4836+ configs for the same lvalue.
4837+ """
4838+ parsed = {}
4839+ if mappings:
4840+ mappings = mappings.split()
4841+ for m in mappings:
4842+ p = m.partition(':')
4843+
4844+ if key_rvalue:
4845+ key_index = 2
4846+ val_index = 0
4847+ # if there is no rvalue skip to next
4848+ if not p[1]:
4849+ continue
4850+ else:
4851+ key_index = 0
4852+ val_index = 2
4853+
4854+ key = p[key_index].strip()
4855+ parsed[key] = p[val_index].strip()
4856+
4857+ return parsed
4858+
4859+
4860+def parse_bridge_mappings(mappings):
4861+ """Parse bridge mappings.
4862+
4863+ Mappings must be a space-delimited list of provider:bridge mappings.
4864+
4865+ Returns dict of the form {provider:bridge}.
4866+ """
4867+ return parse_mappings(mappings)
4868+
4869+
4870+def parse_data_port_mappings(mappings, default_bridge='br-data'):
4871+ """Parse data port mappings.
4872+
4873+ Mappings must be a space-delimited list of port:bridge mappings.
4874+
4875+ Returns dict of the form {port:bridge} where port may be an mac address or
4876+ interface name.
4877+ """
4878+
4879+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
4880+ # proposed for <port> since it may be a mac address which will differ
4881+ # across units this allowing first-known-good to be chosen.
4882+ _mappings = parse_mappings(mappings, key_rvalue=True)
4883+ if not _mappings or list(_mappings.values()) == ['']:
4884+ if not mappings:
4885+ return {}
4886+
4887+ # For backwards-compatibility we need to support port-only provided in
4888+ # config.
4889+ _mappings = {mappings.split()[0]: default_bridge}
4890+
4891+ ports = _mappings.keys()
4892+ if len(set(ports)) != len(ports):
4893+ raise Exception("It is not allowed to have the same port configured "
4894+ "on more than one bridge")
4895+
4896+ return _mappings
4897+
4898+
4899+def parse_vlan_range_mappings(mappings):
4900+ """Parse vlan range mappings.
4901+
4902+ Mappings must be a space-delimited list of provider:start:end mappings.
4903+
4904+ The start:end range is optional and may be omitted.
4905+
4906+ Returns dict of the form {provider: (start, end)}.
4907+ """
4908+ _mappings = parse_mappings(mappings)
4909+ if not _mappings:
4910+ return {}
4911+
4912+ mappings = {}
4913+ for p, r in six.iteritems(_mappings):
4914+ mappings[p] = tuple(r.split(':'))
4915+
4916+ return mappings
4917
4918=== added directory 'charmhelpers/contrib/openstack/templates'
4919=== added file 'charmhelpers/contrib/openstack/templates/__init__.py'
4920--- charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
4921+++ charmhelpers/contrib/openstack/templates/__init__.py 2015-10-08 20:44:37 +0000
4922@@ -0,0 +1,18 @@
4923+# Copyright 2014-2015 Canonical Limited.
4924+#
4925+# This file is part of charm-helpers.
4926+#
4927+# charm-helpers is free software: you can redistribute it and/or modify
4928+# it under the terms of the GNU Lesser General Public License version 3 as
4929+# published by the Free Software Foundation.
4930+#
4931+# charm-helpers is distributed in the hope that it will be useful,
4932+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4933+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4934+# GNU Lesser General Public License for more details.
4935+#
4936+# You should have received a copy of the GNU Lesser General Public License
4937+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4938+
4939+# dummy __init__.py to fool syncer into thinking this is a syncable python
4940+# module
4941
4942=== added file 'charmhelpers/contrib/openstack/templates/ceph.conf'
4943--- charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
4944+++ charmhelpers/contrib/openstack/templates/ceph.conf 2015-10-08 20:44:37 +0000
4945@@ -0,0 +1,21 @@
4946+###############################################################################
4947+# [ WARNING ]
4948+# cinder configuration file maintained by Juju
4949+# local changes may be overwritten.
4950+###############################################################################
4951+[global]
4952+{% if auth -%}
4953+auth_supported = {{ auth }}
4954+keyring = /etc/ceph/$cluster.$name.keyring
4955+mon host = {{ mon_hosts }}
4956+{% endif -%}
4957+log to syslog = {{ use_syslog }}
4958+err to syslog = {{ use_syslog }}
4959+clog to syslog = {{ use_syslog }}
4960+
4961+[client]
4962+{% if rbd_client_cache_settings -%}
4963+{% for key, value in rbd_client_cache_settings.iteritems() -%}
4964+{{ key }} = {{ value }}
4965+{% endfor -%}
4966+{%- endif %}
4967\ No newline at end of file
4968
4969=== added file 'charmhelpers/contrib/openstack/templates/git.upstart'
4970--- charmhelpers/contrib/openstack/templates/git.upstart 1970-01-01 00:00:00 +0000
4971+++ charmhelpers/contrib/openstack/templates/git.upstart 2015-10-08 20:44:37 +0000
4972@@ -0,0 +1,17 @@
4973+description "{{ service_description }}"
4974+author "Juju {{ service_name }} Charm <juju@localhost>"
4975+
4976+start on runlevel [2345]
4977+stop on runlevel [!2345]
4978+
4979+respawn
4980+
4981+exec start-stop-daemon --start --chuid {{ user_name }} \
4982+ --chdir {{ start_dir }} --name {{ process_name }} \
4983+ --exec {{ executable_name }} -- \
4984+ {% for config_file in config_files -%}
4985+ --config-file={{ config_file }} \
4986+ {% endfor -%}
4987+ {% if log_file -%}
4988+ --log-file={{ log_file }}
4989+ {% endif -%}
4990
4991=== added file 'charmhelpers/contrib/openstack/templates/haproxy.cfg'
4992--- charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
4993+++ charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-10-08 20:44:37 +0000
4994@@ -0,0 +1,58 @@
4995+global
4996+ log {{ local_host }} local0
4997+ log {{ local_host }} local1 notice
4998+ maxconn 20000
4999+ user haproxy
5000+ group haproxy
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches