Merge lp:~fcorrea/charms/trusty/glance/fix-pause-action into lp:~openstack-charmers-archive/charms/trusty/glance/trunk

Proposed by Fernando Correa Neto on 2015-11-24
Status: Superseded
Proposed branch: lp:~fcorrea/charms/trusty/glance/fix-pause-action
Merge into: lp:~openstack-charmers-archive/charms/trusty/glance/trunk
Diff against target: 12620 lines (+12105/-44) (has conflicts)
54 files modified
.bzrignore (+2/-0)
.testr.conf (+8/-0)
actions/actions.py (+59/-0)
actions/openstack_upgrade.py (+28/-0)
charm-helpers-hooks.yaml (+5/-0)
charmhelpers.new/cli/__init__.py (+191/-0)
charmhelpers.new/cli/benchmark.py (+36/-0)
charmhelpers.new/cli/commands.py (+32/-0)
charmhelpers.new/cli/hookenv.py (+23/-0)
charmhelpers.new/cli/host.py (+31/-0)
charmhelpers.new/cli/unitdata.py (+39/-0)
charmhelpers.new/contrib/charmsupport/nrpe.py (+396/-0)
charmhelpers.new/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers.new/contrib/network/ip.py (+456/-0)
charmhelpers.new/contrib/openstack/amulet/deployment.py (+297/-0)
charmhelpers.new/contrib/openstack/amulet/utils.py (+985/-0)
charmhelpers.new/contrib/openstack/context.py (+1457/-0)
charmhelpers.new/contrib/openstack/ip.py (+151/-0)
charmhelpers.new/contrib/openstack/neutron.py (+370/-0)
charmhelpers.new/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers.new/contrib/openstack/templating.py (+323/-0)
charmhelpers.new/contrib/openstack/utils.py (+998/-0)
charmhelpers.new/contrib/python/packages.py (+121/-0)
charmhelpers.new/contrib/storage/linux/ceph.py (+673/-0)
charmhelpers.new/core/files.py (+45/-0)
charmhelpers.new/core/hookenv.py (+944/-0)
charmhelpers.new/core/host.py (+617/-0)
charmhelpers.new/core/hugepage.py (+71/-0)
charmhelpers.new/core/kernel.py (+68/-0)
charmhelpers.new/core/services/base.py (+353/-0)
charmhelpers.new/core/services/helpers.py (+286/-0)
charmhelpers.new/core/strutils.py (+72/-0)
charmhelpers.new/core/templating.py (+75/-0)
charmhelpers.new/core/unitdata.py (+521/-0)
charmhelpers.new/fetch/__init__.py (+456/-0)
charmhelpers.new/fetch/archiveurl.py (+167/-0)
charmhelpers.new/fetch/bzrurl.py (+82/-0)
charmhelpers.new/fetch/giturl.py (+73/-0)
hooks/glance_relations.py (+19/-3)
hooks/glance_utils.py (+115/-29)
metadata.yaml (+12/-2)
requirements/requirements-precise.txt (+6/-0)
requirements/requirements-trusty.txt (+6/-0)
requirements/test-requirements.txt (+7/-0)
tests/020-basic-trusty-liberty (+11/-0)
tests/021-basic-wily-liberty (+9/-0)
tests/052-basic-trusty-kilo-git (+12/-0)
tests/basic_deployment.py (+7/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+126/-9)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+650/-1)
tests/tests.yaml (+20/-0)
tox.ini (+35/-0)
unit_tests/test_actions.py (+162/-0)
unit_tests/test_actions_openstack_upgrade.py (+60/-0)
Conflict adding file .testr.conf.  Moved existing file to .testr.conf.moved.
Conflict adding file actions/__init__.py.  Moved existing file to actions/__init__.py.moved.
Conflict adding file actions/actions.py.  Moved existing file to actions/actions.py.moved.
Conflict adding file actions/charmhelpers.  Moved existing file to actions/charmhelpers.moved.
Conflict adding file actions/hooks.  Moved existing file to actions/hooks.moved.
Conflict adding file actions/openstack-upgrade.  Moved existing file to actions/openstack-upgrade.moved.
Conflict adding file actions/openstack_upgrade.py.  Moved existing file to actions/openstack_upgrade.py.moved.
Conflict adding file actions/pause.  Moved existing file to actions/pause.moved.
Conflict adding file actions/resume.  Moved existing file to actions/resume.moved.
Text conflict in charm-helpers-hooks.yaml
Conflict adding file charmhelpers.  Moved existing file to charmhelpers.moved.
Conflict: charmhelpers.new is not a directory, but has files in it.  Created directory.
Conflict adding files to charmhelpers.new/contrib.  Created directory.
Conflict because charmhelpers.new/contrib is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/charmsupport.  Created directory.
Conflict because charmhelpers.new/contrib/charmsupport is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/hahelpers.  Created directory.
Conflict because charmhelpers.new/contrib/hahelpers is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/network.  Created directory.
Conflict because charmhelpers.new/contrib/network is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack.  Created directory.
Conflict because charmhelpers.new/contrib/openstack is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/amulet.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/amulet is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/templates.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/templates is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/python.  Created directory.
Conflict because charmhelpers.new/contrib/python is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage.  Created directory.
Conflict because charmhelpers.new/contrib/storage is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage/linux.  Created directory.
Conflict because charmhelpers.new/contrib/storage/linux is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core.  Created directory.
Conflict because charmhelpers.new/core is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core/services.  Created directory.
Conflict because charmhelpers.new/core/services is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/fetch.  Created directory.
Conflict because charmhelpers.new/fetch is not versioned, but has versioned children.  Versioned directory.
Text conflict in hooks/glance_relations.py
Text conflict in hooks/glance_utils.py
Conflict adding file hooks/install.real.  Moved existing file to hooks/install.real.moved.
Conflict adding file hooks/update-status.  Moved existing file to hooks/update-status.moved.
Text conflict in metadata.yaml
Conflict adding file requirements.  Moved existing file to requirements.moved.
Conflict adding file tests/020-basic-trusty-liberty.  Moved existing file to tests/020-basic-trusty-liberty.moved.
Conflict adding file tests/021-basic-wily-liberty.  Moved existing file to tests/021-basic-wily-liberty.moved.
Conflict adding file tests/052-basic-trusty-kilo-git.  Moved existing file to tests/052-basic-trusty-kilo-git.moved.
Text conflict in tests/basic_deployment.py
Text conflict in tests/charmhelpers/contrib/openstack/amulet/deployment.py
Text conflict in tests/charmhelpers/contrib/openstack/amulet/utils.py
Conflict adding file tests/tests.yaml.  Moved existing file to tests/tests.yaml.moved.
Conflict adding file tox.ini.  Moved existing file to tox.ini.moved.
Conflict adding file unit_tests/test_actions.py.  Moved existing file to unit_tests/test_actions.py.moved.
Conflict adding file unit_tests/test_actions_openstack_upgrade.py.  Moved existing file to unit_tests/test_actions_openstack_upgrade.py.moved.
To merge this branch: bzr merge lp:~fcorrea/charms/trusty/glance/fix-pause-action
Reviewer Review Type Date Requested Status
Landscape 2015-11-24 Pending
Review via email: mp+278498@code.launchpad.net

This proposal has been superseded by a proposal from 2015-11-24.

Description of the change

This branch changes the pause action to change the kv database instead of calling set_os_workload_status directly, which is the same pattern used in the swift charm.
This prevents the charm from immediately bouncing back to active after a 'pause' action was performed.

A follow up branch will add a bit more logic to deal with hacluster so it stops sending requests for the unit.

To post a comment you must log in.

charm_lint_check #14311 glance for fcorrea mp278498
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/14311/

charm_unit_test #13339 glance for fcorrea mp278498
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/13339/

Unmerged revisions

155. By Fernando Correa Neto on 2015-11-24

- backport changes from previous branch

154. By Fernando Correa Neto on 2015-11-24

- get new charmhelpers

153. By James Page on 2015-11-18

Update maintainer

152. By David Ames on 2015-11-02

[ionutbalutoiu, r=thedac] Adds additional variables to the image-service relation. These are available only when a relation with Swift object storage is present.
In case any charm needs to generate temporary URLs from Glance with Swift backend, it needs a temp-url-key which must be posted to Swift with the glance account. (Details: http://docs.openstack.org/liberty/config-reference/content/object-storage-tempurl.html)
This is needed for OpenStack Ironic charm (http://bazaar.launchpad.net/~cloudbaseit/charms/trusty/ironic/trunk/view/head:/hooks/ironic_context.py#L76), but might also be generally useful.

151. By Corey Bryant on 2015-10-09

[ddellav,r=corey.bryant] Action managed upgrade support.

150. By James Page on 2015-10-07

Refactor to assess status after every hook execution, add update-status hook

149. By Corey Bryant on 2015-10-07

[beisner,r=corey.bryant] Add Amulet test dependencies and run unit tests with -v.

148. By David Ames on 2015-10-06

[thedac,r=gnuoy] Make messaging an optional relation for workload status

147. By David Ames on 2015-10-02

[thedac, trivial] s/message/messaging

146. By Corey Bryant on 2015-09-30

[ack,r=corey.bryant] Fix amulet tests for pause/resume actions and sync charm-helpers

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file '.bzrignore'
2--- .bzrignore 2015-04-01 16:48:59 +0000
3+++ .bzrignore 2015-11-24 19:47:41 +0000
4@@ -1,3 +1,5 @@
5 .coverage
6 bin
7 tags
8+.tox
9+.testrepository
10
11=== added file '.testr.conf'
12--- .testr.conf 1970-01-01 00:00:00 +0000
13+++ .testr.conf 2015-11-24 19:47:41 +0000
14@@ -0,0 +1,8 @@
15+[DEFAULT]
16+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
17+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
18+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
19+ ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
20+
21+test_id_option=--load-list $IDFILE
22+test_list_option=--list
23
24=== renamed file '.testr.conf' => '.testr.conf.moved'
25=== added file 'actions/__init__.py'
26=== renamed file 'actions/__init__.py' => 'actions/__init__.py.moved'
27=== added file 'actions/actions.py'
28--- actions/actions.py 1970-01-01 00:00:00 +0000
29+++ actions/actions.py 2015-11-24 19:47:41 +0000
30@@ -0,0 +1,59 @@
31+#!/usr/bin/python
32+
33+import sys
34+import os
35+
36+from charmhelpers.core.host import service_pause, service_resume
37+from charmhelpers.core.hookenv import action_fail, status_set
38+from charmhelpers.core.unitdata import HookData, kv
39+
40+from hooks.glance_utils import services, assess_status
41+
42+
43+def pause(args):
44+ """Pause all the Glance services.
45+
46+ @raises Exception if any services fail to stop
47+ """
48+ for service in services():
49+ stopped = service_pause(service)
50+ if not stopped:
51+ raise Exception("{} didn't stop cleanly.".format(service))
52+ with HookData()():
53+ kv().set('unit-paused', True)
54+ state, message = assess_status()
55+ status_set(state, message)
56+
57+
58+def resume(args):
59+ """Resume all the Glance services.
60+
61+ @raises Exception if any services fail to start
62+ """
63+ for service in services():
64+ started = service_resume(service)
65+ if not started:
66+ raise Exception("{} didn't start cleanly.".format(service))
67+ status_set("active", "")
68+
69+
70+# A dictionary of all the defined actions to callables (which take
71+# parsed arguments).
72+ACTIONS = {"pause": pause, "resume": resume}
73+
74+
75+def main(args):
76+ action_name = os.path.basename(args[0])
77+ try:
78+ action = ACTIONS[action_name]
79+ except KeyError:
80+ return "Action %s undefined" % action_name
81+ else:
82+ try:
83+ action(args)
84+ except Exception as e:
85+ action_fail(str(e))
86+
87+
88+if __name__ == "__main__":
89+ sys.exit(main(sys.argv))
90
91=== renamed file 'actions/actions.py' => 'actions/actions.py.moved'
92=== added symlink 'actions/charmhelpers'
93=== target is u'../charmhelpers'
94=== renamed symlink 'actions/charmhelpers' => 'actions/charmhelpers.moved'
95=== added symlink 'actions/hooks'
96=== target is u'../hooks'
97=== renamed symlink 'actions/hooks' => 'actions/hooks.moved'
98=== added symlink 'actions/openstack-upgrade'
99=== target is u'openstack_upgrade.py'
100=== renamed symlink 'actions/openstack-upgrade' => 'actions/openstack-upgrade.moved'
101=== added file 'actions/openstack_upgrade.py'
102--- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000
103+++ actions/openstack_upgrade.py 2015-11-24 19:47:41 +0000
104@@ -0,0 +1,28 @@
105+#!/usr/bin/python
106+
107+from charmhelpers.contrib.openstack.utils import (
108+ do_action_openstack_upgrade,
109+)
110+
111+from hooks.glance_relations import (
112+ config_changed,
113+ CONFIGS
114+)
115+
116+from hooks.glance_utils import do_openstack_upgrade
117+
118+
119+def openstack_upgrade():
120+ """Upgrade packages to config-set Openstack version.
121+
122+ If the charm was installed from source we cannot upgrade it.
123+ For backwards compatibility a config flag must be set for this
124+ code to run, otherwise a full service level upgrade will fire
125+ on config-changed."""
126+ if (do_action_openstack_upgrade('glance-common',
127+ do_openstack_upgrade,
128+ CONFIGS)):
129+ config_changed()
130+
131+if __name__ == '__main__':
132+ openstack_upgrade()
133
134=== renamed file 'actions/openstack_upgrade.py' => 'actions/openstack_upgrade.py.moved'
135=== added symlink 'actions/pause'
136=== target is u'actions.py'
137=== renamed symlink 'actions/pause' => 'actions/pause.moved'
138=== added symlink 'actions/resume'
139=== target is u'actions.py'
140=== renamed symlink 'actions/resume' => 'actions/resume.moved'
141=== modified file 'charm-helpers-hooks.yaml'
142--- charm-helpers-hooks.yaml 2015-10-22 16:09:23 +0000
143+++ charm-helpers-hooks.yaml 2015-11-24 19:47:41 +0000
144@@ -1,5 +1,10 @@
145+<<<<<<< TREE
146 branch: lp:~openstack-charmers/charm-helpers/stable
147 destination: charmhelpers
148+=======
149+branch: lp:charm-helpers
150+destination: charmhelpers
151+>>>>>>> MERGE-SOURCE
152 include:
153 - core
154 - cli
155
156=== renamed directory 'charmhelpers' => 'charmhelpers.moved'
157=== renamed symlink 'hooks/charmhelpers' => 'charmhelpers.new'
158=== target was u'../charmhelpers'
159=== added directory 'charmhelpers.new/cli'
160=== added file 'charmhelpers.new/cli/__init__.py'
161--- charmhelpers.new/cli/__init__.py 1970-01-01 00:00:00 +0000
162+++ charmhelpers.new/cli/__init__.py 2015-11-24 19:47:41 +0000
163@@ -0,0 +1,191 @@
164+# Copyright 2014-2015 Canonical Limited.
165+#
166+# This file is part of charm-helpers.
167+#
168+# charm-helpers is free software: you can redistribute it and/or modify
169+# it under the terms of the GNU Lesser General Public License version 3 as
170+# published by the Free Software Foundation.
171+#
172+# charm-helpers is distributed in the hope that it will be useful,
173+# but WITHOUT ANY WARRANTY; without even the implied warranty of
174+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
175+# GNU Lesser General Public License for more details.
176+#
177+# You should have received a copy of the GNU Lesser General Public License
178+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
179+
180+import inspect
181+import argparse
182+import sys
183+
184+from six.moves import zip
185+
186+import charmhelpers.core.unitdata
187+
188+
189+class OutputFormatter(object):
190+ def __init__(self, outfile=sys.stdout):
191+ self.formats = (
192+ "raw",
193+ "json",
194+ "py",
195+ "yaml",
196+ "csv",
197+ "tab",
198+ )
199+ self.outfile = outfile
200+
201+ def add_arguments(self, argument_parser):
202+ formatgroup = argument_parser.add_mutually_exclusive_group()
203+ choices = self.supported_formats
204+ formatgroup.add_argument("--format", metavar='FMT',
205+ help="Select output format for returned data, "
206+ "where FMT is one of: {}".format(choices),
207+ choices=choices, default='raw')
208+ for fmt in self.formats:
209+ fmtfunc = getattr(self, fmt)
210+ formatgroup.add_argument("-{}".format(fmt[0]),
211+ "--{}".format(fmt), action='store_const',
212+ const=fmt, dest='format',
213+ help=fmtfunc.__doc__)
214+
215+ @property
216+ def supported_formats(self):
217+ return self.formats
218+
219+ def raw(self, output):
220+ """Output data as raw string (default)"""
221+ if isinstance(output, (list, tuple)):
222+ output = '\n'.join(map(str, output))
223+ self.outfile.write(str(output))
224+
225+ def py(self, output):
226+ """Output data as a nicely-formatted python data structure"""
227+ import pprint
228+ pprint.pprint(output, stream=self.outfile)
229+
230+ def json(self, output):
231+ """Output data in JSON format"""
232+ import json
233+ json.dump(output, self.outfile)
234+
235+ def yaml(self, output):
236+ """Output data in YAML format"""
237+ import yaml
238+ yaml.safe_dump(output, self.outfile)
239+
240+ def csv(self, output):
241+ """Output data as excel-compatible CSV"""
242+ import csv
243+ csvwriter = csv.writer(self.outfile)
244+ csvwriter.writerows(output)
245+
246+ def tab(self, output):
247+ """Output data in excel-compatible tab-delimited format"""
248+ import csv
249+ csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
250+ csvwriter.writerows(output)
251+
252+ def format_output(self, output, fmt='raw'):
253+ fmtfunc = getattr(self, fmt)
254+ fmtfunc(output)
255+
256+
257+class CommandLine(object):
258+ argument_parser = None
259+ subparsers = None
260+ formatter = None
261+ exit_code = 0
262+
263+ def __init__(self):
264+ if not self.argument_parser:
265+ self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
266+ if not self.formatter:
267+ self.formatter = OutputFormatter()
268+ self.formatter.add_arguments(self.argument_parser)
269+ if not self.subparsers:
270+ self.subparsers = self.argument_parser.add_subparsers(help='Commands')
271+
272+ def subcommand(self, command_name=None):
273+ """
274+ Decorate a function as a subcommand. Use its arguments as the
275+ command-line arguments"""
276+ def wrapper(decorated):
277+ cmd_name = command_name or decorated.__name__
278+ subparser = self.subparsers.add_parser(cmd_name,
279+ description=decorated.__doc__)
280+ for args, kwargs in describe_arguments(decorated):
281+ subparser.add_argument(*args, **kwargs)
282+ subparser.set_defaults(func=decorated)
283+ return decorated
284+ return wrapper
285+
286+ def test_command(self, decorated):
287+ """
288+ Subcommand is a boolean test function, so bool return values should be
289+ converted to a 0/1 exit code.
290+ """
291+ decorated._cli_test_command = True
292+ return decorated
293+
294+ def no_output(self, decorated):
295+ """
296+ Subcommand is not expected to return a value, so don't print a spurious None.
297+ """
298+ decorated._cli_no_output = True
299+ return decorated
300+
301+ def subcommand_builder(self, command_name, description=None):
302+ """
303+ Decorate a function that builds a subcommand. Builders should accept a
304+ single argument (the subparser instance) and return the function to be
305+ run as the command."""
306+ def wrapper(decorated):
307+ subparser = self.subparsers.add_parser(command_name)
308+ func = decorated(subparser)
309+ subparser.set_defaults(func=func)
310+ subparser.description = description or func.__doc__
311+ return wrapper
312+
313+ def run(self):
314+ "Run cli, processing arguments and executing subcommands."
315+ arguments = self.argument_parser.parse_args()
316+ argspec = inspect.getargspec(arguments.func)
317+ vargs = []
318+ for arg in argspec.args:
319+ vargs.append(getattr(arguments, arg))
320+ if argspec.varargs:
321+ vargs.extend(getattr(arguments, argspec.varargs))
322+ output = arguments.func(*vargs)
323+ if getattr(arguments.func, '_cli_test_command', False):
324+ self.exit_code = 0 if output else 1
325+ output = ''
326+ if getattr(arguments.func, '_cli_no_output', False):
327+ output = ''
328+ self.formatter.format_output(output, arguments.format)
329+ if charmhelpers.core.unitdata._KV:
330+ charmhelpers.core.unitdata._KV.flush()
331+
332+
333+cmdline = CommandLine()
334+
335+
336+def describe_arguments(func):
337+ """
338+ Analyze a function's signature and return a data structure suitable for
339+ passing in as arguments to an argparse parser's add_argument() method."""
340+
341+ argspec = inspect.getargspec(func)
342+ # we should probably raise an exception somewhere if func includes **kwargs
343+ if argspec.defaults:
344+ positional_args = argspec.args[:-len(argspec.defaults)]
345+ keyword_names = argspec.args[-len(argspec.defaults):]
346+ for arg, default in zip(keyword_names, argspec.defaults):
347+ yield ('--{}'.format(arg),), {'default': default}
348+ else:
349+ positional_args = argspec.args
350+
351+ for arg in positional_args:
352+ yield (arg,), {}
353+ if argspec.varargs:
354+ yield (argspec.varargs,), {'nargs': '*'}
355
356=== added file 'charmhelpers.new/cli/benchmark.py'
357--- charmhelpers.new/cli/benchmark.py 1970-01-01 00:00:00 +0000
358+++ charmhelpers.new/cli/benchmark.py 2015-11-24 19:47:41 +0000
359@@ -0,0 +1,36 @@
360+# Copyright 2014-2015 Canonical Limited.
361+#
362+# This file is part of charm-helpers.
363+#
364+# charm-helpers is free software: you can redistribute it and/or modify
365+# it under the terms of the GNU Lesser General Public License version 3 as
366+# published by the Free Software Foundation.
367+#
368+# charm-helpers is distributed in the hope that it will be useful,
369+# but WITHOUT ANY WARRANTY; without even the implied warranty of
370+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
371+# GNU Lesser General Public License for more details.
372+#
373+# You should have received a copy of the GNU Lesser General Public License
374+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
375+
376+from . import cmdline
377+from charmhelpers.contrib.benchmark import Benchmark
378+
379+
380+@cmdline.subcommand(command_name='benchmark-start')
381+def start():
382+ Benchmark.start()
383+
384+
385+@cmdline.subcommand(command_name='benchmark-finish')
386+def finish():
387+ Benchmark.finish()
388+
389+
390+@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
391+def service(subparser):
392+ subparser.add_argument("value", help="The composite score.")
393+ subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
394+ subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
395+ return Benchmark.set_composite_score
396
397=== added file 'charmhelpers.new/cli/commands.py'
398--- charmhelpers.new/cli/commands.py 1970-01-01 00:00:00 +0000
399+++ charmhelpers.new/cli/commands.py 2015-11-24 19:47:41 +0000
400@@ -0,0 +1,32 @@
401+# Copyright 2014-2015 Canonical Limited.
402+#
403+# This file is part of charm-helpers.
404+#
405+# charm-helpers is free software: you can redistribute it and/or modify
406+# it under the terms of the GNU Lesser General Public License version 3 as
407+# published by the Free Software Foundation.
408+#
409+# charm-helpers is distributed in the hope that it will be useful,
410+# but WITHOUT ANY WARRANTY; without even the implied warranty of
411+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
412+# GNU Lesser General Public License for more details.
413+#
414+# You should have received a copy of the GNU Lesser General Public License
415+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
416+
417+"""
418+This module loads sub-modules into the python runtime so they can be
419+discovered via the inspect module. In order to prevent flake8 from (rightfully)
420+telling us these are unused modules, throw a ' # noqa' at the end of each import
421+so that the warning is suppressed.
422+"""
423+
424+from . import CommandLine # noqa
425+
426+"""
427+Import the sub-modules which have decorated subcommands to register with chlp.
428+"""
429+from . import host # noqa
430+from . import benchmark # noqa
431+from . import unitdata # noqa
432+from . import hookenv # noqa
433
434=== added file 'charmhelpers.new/cli/hookenv.py'
435--- charmhelpers.new/cli/hookenv.py 1970-01-01 00:00:00 +0000
436+++ charmhelpers.new/cli/hookenv.py 2015-11-24 19:47:41 +0000
437@@ -0,0 +1,23 @@
438+# Copyright 2014-2015 Canonical Limited.
439+#
440+# This file is part of charm-helpers.
441+#
442+# charm-helpers is free software: you can redistribute it and/or modify
443+# it under the terms of the GNU Lesser General Public License version 3 as
444+# published by the Free Software Foundation.
445+#
446+# charm-helpers is distributed in the hope that it will be useful,
447+# but WITHOUT ANY WARRANTY; without even the implied warranty of
448+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
449+# GNU Lesser General Public License for more details.
450+#
451+# You should have received a copy of the GNU Lesser General Public License
452+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
453+
454+from . import cmdline
455+from charmhelpers.core import hookenv
456+
457+
458+cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
459+cmdline.subcommand('service-name')(hookenv.service_name)
460+cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
461
462=== added file 'charmhelpers.new/cli/host.py'
463--- charmhelpers.new/cli/host.py 1970-01-01 00:00:00 +0000
464+++ charmhelpers.new/cli/host.py 2015-11-24 19:47:41 +0000
465@@ -0,0 +1,31 @@
466+# Copyright 2014-2015 Canonical Limited.
467+#
468+# This file is part of charm-helpers.
469+#
470+# charm-helpers is free software: you can redistribute it and/or modify
471+# it under the terms of the GNU Lesser General Public License version 3 as
472+# published by the Free Software Foundation.
473+#
474+# charm-helpers is distributed in the hope that it will be useful,
475+# but WITHOUT ANY WARRANTY; without even the implied warranty of
476+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
477+# GNU Lesser General Public License for more details.
478+#
479+# You should have received a copy of the GNU Lesser General Public License
480+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
481+
482+from . import cmdline
483+from charmhelpers.core import host
484+
485+
486+@cmdline.subcommand()
487+def mounts():
488+ "List mounts"
489+ return host.mounts()
490+
491+
492+@cmdline.subcommand_builder('service', description="Control system services")
493+def service(subparser):
494+ subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
495+ subparser.add_argument("service_name", help="Name of the service to control")
496+ return host.service
497
498=== added file 'charmhelpers.new/cli/unitdata.py'
499--- charmhelpers.new/cli/unitdata.py 1970-01-01 00:00:00 +0000
500+++ charmhelpers.new/cli/unitdata.py 2015-11-24 19:47:41 +0000
501@@ -0,0 +1,39 @@
502+# Copyright 2014-2015 Canonical Limited.
503+#
504+# This file is part of charm-helpers.
505+#
506+# charm-helpers is free software: you can redistribute it and/or modify
507+# it under the terms of the GNU Lesser General Public License version 3 as
508+# published by the Free Software Foundation.
509+#
510+# charm-helpers is distributed in the hope that it will be useful,
511+# but WITHOUT ANY WARRANTY; without even the implied warranty of
512+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
513+# GNU Lesser General Public License for more details.
514+#
515+# You should have received a copy of the GNU Lesser General Public License
516+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
517+
518+from . import cmdline
519+from charmhelpers.core import unitdata
520+
521+
522+@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
523+def unitdata_cmd(subparser):
524+ nested = subparser.add_subparsers()
525+ get_cmd = nested.add_parser('get', help='Retrieve data')
526+ get_cmd.add_argument('key', help='Key to retrieve the value of')
527+ get_cmd.set_defaults(action='get', value=None)
528+ set_cmd = nested.add_parser('set', help='Store data')
529+ set_cmd.add_argument('key', help='Key to set')
530+ set_cmd.add_argument('value', help='Value to store')
531+ set_cmd.set_defaults(action='set')
532+
533+ def _unitdata_cmd(action, key, value):
534+ if action == 'get':
535+ return unitdata.kv().get(key)
536+ elif action == 'set':
537+ unitdata.kv().set(key, value)
538+ unitdata.kv().flush()
539+ return ''
540+ return _unitdata_cmd
541
542=== added directory 'charmhelpers.new/contrib'
543=== added directory 'charmhelpers.new/contrib/charmsupport'
544=== added file 'charmhelpers.new/contrib/charmsupport/nrpe.py'
545--- charmhelpers.new/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
546+++ charmhelpers.new/contrib/charmsupport/nrpe.py 2015-11-24 19:47:41 +0000
547@@ -0,0 +1,396 @@
548+# Copyright 2014-2015 Canonical Limited.
549+#
550+# This file is part of charm-helpers.
551+#
552+# charm-helpers is free software: you can redistribute it and/or modify
553+# it under the terms of the GNU Lesser General Public License version 3 as
554+# published by the Free Software Foundation.
555+#
556+# charm-helpers is distributed in the hope that it will be useful,
557+# but WITHOUT ANY WARRANTY; without even the implied warranty of
558+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
559+# GNU Lesser General Public License for more details.
560+#
561+# You should have received a copy of the GNU Lesser General Public License
562+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
563+
564+"""Compatibility with the nrpe-external-master charm"""
565+# Copyright 2012 Canonical Ltd.
566+#
567+# Authors:
568+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
569+
570+import subprocess
571+import pwd
572+import grp
573+import os
574+import glob
575+import shutil
576+import re
577+import shlex
578+import yaml
579+
580+from charmhelpers.core.hookenv import (
581+ config,
582+ local_unit,
583+ log,
584+ relation_ids,
585+ relation_set,
586+ relations_of_type,
587+)
588+
589+from charmhelpers.core.host import service
590+
591+# This module adds compatibility with the nrpe-external-master and plain nrpe
592+# subordinate charms. To use it in your charm:
593+#
594+# 1. Update metadata.yaml
595+#
596+# provides:
597+# (...)
598+# nrpe-external-master:
599+# interface: nrpe-external-master
600+# scope: container
601+#
602+# and/or
603+#
604+# provides:
605+# (...)
606+# local-monitors:
607+# interface: local-monitors
608+# scope: container
609+
610+#
611+# 2. Add the following to config.yaml
612+#
613+# nagios_context:
614+# default: "juju"
615+# type: string
616+# description: |
617+# Used by the nrpe subordinate charms.
618+# A string that will be prepended to instance name to set the host name
619+# in nagios. So for instance the hostname would be something like:
620+# juju-myservice-0
621+# If you're running multiple environments with the same services in them
622+# this allows you to differentiate between them.
623+# nagios_servicegroups:
624+# default: ""
625+# type: string
626+# description: |
627+# A comma-separated list of nagios servicegroups.
628+# If left empty, the nagios_context will be used as the servicegroup
629+#
630+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
631+#
632+# 4. Update your hooks.py with something like this:
633+#
634+# from charmsupport.nrpe import NRPE
635+# (...)
636+# def update_nrpe_config():
637+# nrpe_compat = NRPE()
638+# nrpe_compat.add_check(
639+# shortname = "myservice",
640+# description = "Check MyService",
641+# check_cmd = "check_http -w 2 -c 10 http://localhost"
642+# )
643+# nrpe_compat.add_check(
644+# "myservice_other",
645+# "Check for widget failures",
646+# check_cmd = "/srv/myapp/scripts/widget_check"
647+# )
648+# nrpe_compat.write()
649+#
650+# def config_changed():
651+# (...)
652+# update_nrpe_config()
653+#
654+# def nrpe_external_master_relation_changed():
655+# update_nrpe_config()
656+#
657+# def local_monitors_relation_changed():
658+# update_nrpe_config()
659+#
660+# 5. ln -s hooks.py nrpe-external-master-relation-changed
661+# ln -s hooks.py local-monitors-relation-changed
662+
663+
664+class CheckException(Exception):
665+ pass
666+
667+
668+class Check(object):
669+ shortname_re = '[A-Za-z0-9-_]+$'
670+ service_template = ("""
671+#---------------------------------------------------
672+# This file is Juju managed
673+#---------------------------------------------------
674+define service {{
675+ use active-service
676+ host_name {nagios_hostname}
677+ service_description {nagios_hostname}[{shortname}] """
678+ """{description}
679+ check_command check_nrpe!{command}
680+ servicegroups {nagios_servicegroup}
681+}}
682+""")
683+
684+ def __init__(self, shortname, description, check_cmd):
685+ super(Check, self).__init__()
686+ # XXX: could be better to calculate this from the service name
687+ if not re.match(self.shortname_re, shortname):
688+ raise CheckException("shortname must match {}".format(
689+ Check.shortname_re))
690+ self.shortname = shortname
691+ self.command = "check_{}".format(shortname)
692+ # Note: a set of invalid characters is defined by the
693+ # Nagios server config
694+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
695+ self.description = description
696+ self.check_cmd = self._locate_cmd(check_cmd)
697+
698+ def _get_check_filename(self):
699+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
700+
701+ def _get_service_filename(self, hostname):
702+ return os.path.join(NRPE.nagios_exportdir,
703+ 'service__{}_{}.cfg'.format(hostname, self.command))
704+
705+ def _locate_cmd(self, check_cmd):
706+ search_path = (
707+ '/usr/lib/nagios/plugins',
708+ '/usr/local/lib/nagios/plugins',
709+ )
710+ parts = shlex.split(check_cmd)
711+ for path in search_path:
712+ if os.path.exists(os.path.join(path, parts[0])):
713+ command = os.path.join(path, parts[0])
714+ if len(parts) > 1:
715+ command += " " + " ".join(parts[1:])
716+ return command
717+ log('Check command not found: {}'.format(parts[0]))
718+ return ''
719+
720+ def _remove_service_files(self):
721+ if not os.path.exists(NRPE.nagios_exportdir):
722+ return
723+ for f in os.listdir(NRPE.nagios_exportdir):
724+ if f.endswith('_{}.cfg'.format(self.command)):
725+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
726+
727+ def remove(self, hostname):
728+ nrpe_check_file = self._get_check_filename()
729+ if os.path.exists(nrpe_check_file):
730+ os.remove(nrpe_check_file)
731+ self._remove_service_files()
732+
733+ def write(self, nagios_context, hostname, nagios_servicegroups):
734+ nrpe_check_file = self._get_check_filename()
735+ with open(nrpe_check_file, 'w') as nrpe_check_config:
736+ nrpe_check_config.write("# check {}\n".format(self.shortname))
737+ nrpe_check_config.write("command[{}]={}\n".format(
738+ self.command, self.check_cmd))
739+
740+ if not os.path.exists(NRPE.nagios_exportdir):
741+ log('Not writing service config as {} is not accessible'.format(
742+ NRPE.nagios_exportdir))
743+ else:
744+ self.write_service_config(nagios_context, hostname,
745+ nagios_servicegroups)
746+
747+ def write_service_config(self, nagios_context, hostname,
748+ nagios_servicegroups):
749+ self._remove_service_files()
750+
751+ templ_vars = {
752+ 'nagios_hostname': hostname,
753+ 'nagios_servicegroup': nagios_servicegroups,
754+ 'description': self.description,
755+ 'shortname': self.shortname,
756+ 'command': self.command,
757+ }
758+ nrpe_service_text = Check.service_template.format(**templ_vars)
759+ nrpe_service_file = self._get_service_filename(hostname)
760+ with open(nrpe_service_file, 'w') as nrpe_service_config:
761+ nrpe_service_config.write(str(nrpe_service_text))
762+
763+ def run(self):
764+ subprocess.call(self.check_cmd)
765+
766+
767+class NRPE(object):
768+ nagios_logdir = '/var/log/nagios'
769+ nagios_exportdir = '/var/lib/nagios/export'
770+ nrpe_confdir = '/etc/nagios/nrpe.d'
771+
772+ def __init__(self, hostname=None):
773+ super(NRPE, self).__init__()
774+ self.config = config()
775+ self.nagios_context = self.config['nagios_context']
776+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
777+ self.nagios_servicegroups = self.config['nagios_servicegroups']
778+ else:
779+ self.nagios_servicegroups = self.nagios_context
780+ self.unit_name = local_unit().replace('/', '-')
781+ if hostname:
782+ self.hostname = hostname
783+ else:
784+ nagios_hostname = get_nagios_hostname()
785+ if nagios_hostname:
786+ self.hostname = nagios_hostname
787+ else:
788+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
789+ self.checks = []
790+
791+ def add_check(self, *args, **kwargs):
792+ self.checks.append(Check(*args, **kwargs))
793+
794+ def remove_check(self, *args, **kwargs):
795+ if kwargs.get('shortname') is None:
796+ raise ValueError('shortname of check must be specified')
797+
798+ # Use sensible defaults if they're not specified - these are not
799+ # actually used during removal, but they're required for constructing
800+ # the Check object; check_disk is chosen because it's part of the
801+ # nagios-plugins-basic package.
802+ if kwargs.get('check_cmd') is None:
803+ kwargs['check_cmd'] = 'check_disk'
804+ if kwargs.get('description') is None:
805+ kwargs['description'] = ''
806+
807+ check = Check(*args, **kwargs)
808+ check.remove(self.hostname)
809+
810+ def write(self):
811+ try:
812+ nagios_uid = pwd.getpwnam('nagios').pw_uid
813+ nagios_gid = grp.getgrnam('nagios').gr_gid
814+ except:
815+ log("Nagios user not set up, nrpe checks not updated")
816+ return
817+
818+ if not os.path.exists(NRPE.nagios_logdir):
819+ os.mkdir(NRPE.nagios_logdir)
820+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
821+
822+ nrpe_monitors = {}
823+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
824+ for nrpecheck in self.checks:
825+ nrpecheck.write(self.nagios_context, self.hostname,
826+ self.nagios_servicegroups)
827+ nrpe_monitors[nrpecheck.shortname] = {
828+ "command": nrpecheck.command,
829+ }
830+
831+ service('restart', 'nagios-nrpe-server')
832+
833+ monitor_ids = relation_ids("local-monitors") + \
834+ relation_ids("nrpe-external-master")
835+ for rid in monitor_ids:
836+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
837+
838+
839+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
840+ """
841+ Query relation with nrpe subordinate, return the nagios_host_context
842+
843+ :param str relation_name: Name of relation nrpe sub joined to
844+ """
845+ for rel in relations_of_type(relation_name):
846+ if 'nagios_hostname' in rel:
847+ return rel['nagios_host_context']
848+
849+
850+def get_nagios_hostname(relation_name='nrpe-external-master'):
851+ """
852+ Query relation with nrpe subordinate, return the nagios_hostname
853+
854+ :param str relation_name: Name of relation nrpe sub joined to
855+ """
856+ for rel in relations_of_type(relation_name):
857+ if 'nagios_hostname' in rel:
858+ return rel['nagios_hostname']
859+
860+
861+def get_nagios_unit_name(relation_name='nrpe-external-master'):
862+ """
863+ Return the nagios unit name prepended with host_context if needed
864+
865+ :param str relation_name: Name of relation nrpe sub joined to
866+ """
867+ host_context = get_nagios_hostcontext(relation_name)
868+ if host_context:
869+ unit = "%s:%s" % (host_context, local_unit())
870+ else:
871+ unit = local_unit()
872+ return unit
873+
874+
875+def add_init_service_checks(nrpe, services, unit_name):
876+ """
877+ Add checks for each service in list
878+
879+ :param NRPE nrpe: NRPE object to add check to
880+ :param list services: List of services to check
881+ :param str unit_name: Unit name to use in check description
882+ """
883+ for svc in services:
884+ upstart_init = '/etc/init/%s.conf' % svc
885+ sysv_init = '/etc/init.d/%s' % svc
886+ if os.path.exists(upstart_init):
887+ nrpe.add_check(
888+ shortname=svc,
889+ description='process check {%s}' % unit_name,
890+ check_cmd='check_upstart_job %s' % svc
891+ )
892+ elif os.path.exists(sysv_init):
893+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
894+ cron_file = ('*/5 * * * * root '
895+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
896+ '-s /etc/init.d/%s status > '
897+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
898+ svc)
899+ )
900+ f = open(cronpath, 'w')
901+ f.write(cron_file)
902+ f.close()
903+ nrpe.add_check(
904+ shortname=svc,
905+ description='process check {%s}' % unit_name,
906+ check_cmd='check_status_file.py -f '
907+ '/var/lib/nagios/service-check-%s.txt' % svc,
908+ )
909+
910+
911+def copy_nrpe_checks():
912+ """
913+ Copy the nrpe checks into place
914+
915+ """
916+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
917+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
918+ 'charmhelpers', 'contrib', 'openstack',
919+ 'files')
920+
921+ if not os.path.exists(NAGIOS_PLUGINS):
922+ os.makedirs(NAGIOS_PLUGINS)
923+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
924+ if os.path.isfile(fname):
925+ shutil.copy2(fname,
926+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
927+
928+
929+def add_haproxy_checks(nrpe, unit_name):
930+ """
931+ Add checks for each service in list
932+
933+ :param NRPE nrpe: NRPE object to add check to
934+ :param str unit_name: Unit name to use in check description
935+ """
936+ nrpe.add_check(
937+ shortname='haproxy_servers',
938+ description='Check HAProxy {%s}' % unit_name,
939+ check_cmd='check_haproxy.sh')
940+ nrpe.add_check(
941+ shortname='haproxy_queue',
942+ description='Check HAProxy queue depth {%s}' % unit_name,
943+ check_cmd='check_haproxy_queue_depth.sh')
944
945=== added directory 'charmhelpers.new/contrib/hahelpers'
946=== added file 'charmhelpers.new/contrib/hahelpers/cluster.py'
947--- charmhelpers.new/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
948+++ charmhelpers.new/contrib/hahelpers/cluster.py 2015-11-24 19:47:41 +0000
949@@ -0,0 +1,316 @@
950+# Copyright 2014-2015 Canonical Limited.
951+#
952+# This file is part of charm-helpers.
953+#
954+# charm-helpers is free software: you can redistribute it and/or modify
955+# it under the terms of the GNU Lesser General Public License version 3 as
956+# published by the Free Software Foundation.
957+#
958+# charm-helpers is distributed in the hope that it will be useful,
959+# but WITHOUT ANY WARRANTY; without even the implied warranty of
960+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
961+# GNU Lesser General Public License for more details.
962+#
963+# You should have received a copy of the GNU Lesser General Public License
964+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
965+
966+#
967+# Copyright 2012 Canonical Ltd.
968+#
969+# Authors:
970+# James Page <james.page@ubuntu.com>
971+# Adam Gandelman <adamg@ubuntu.com>
972+#
973+
974+"""
975+Helpers for clustering and determining "cluster leadership" and other
976+clustering-related helpers.
977+"""
978+
979+import subprocess
980+import os
981+
982+from socket import gethostname as get_unit_hostname
983+
984+import six
985+
986+from charmhelpers.core.hookenv import (
987+ log,
988+ relation_ids,
989+ related_units as relation_list,
990+ relation_get,
991+ config as config_get,
992+ INFO,
993+ ERROR,
994+ WARNING,
995+ unit_get,
996+ is_leader as juju_is_leader
997+)
998+from charmhelpers.core.decorators import (
999+ retry_on_exception,
1000+)
1001+from charmhelpers.core.strutils import (
1002+ bool_from_string,
1003+)
1004+
1005+DC_RESOURCE_NAME = 'DC'
1006+
1007+
1008+class HAIncompleteConfig(Exception):
1009+ pass
1010+
1011+
1012+class CRMResourceNotFound(Exception):
1013+ pass
1014+
1015+
1016+class CRMDCNotFound(Exception):
1017+ pass
1018+
1019+
1020+def is_elected_leader(resource):
1021+ """
1022+ Returns True if the charm executing this is the elected cluster leader.
1023+
1024+ It relies on two mechanisms to determine leadership:
1025+ 1. If juju is sufficiently new and leadership election is supported,
1026+ the is_leader command will be used.
1027+ 2. If the charm is part of a corosync cluster, call corosync to
1028+ determine leadership.
1029+ 3. If the charm is not part of a corosync cluster, the leader is
1030+ determined as being "the alive unit with the lowest unit numer". In
1031+ other words, the oldest surviving unit.
1032+ """
1033+ try:
1034+ return juju_is_leader()
1035+ except NotImplementedError:
1036+ log('Juju leadership election feature not enabled'
1037+ ', using fallback support',
1038+ level=WARNING)
1039+
1040+ if is_clustered():
1041+ if not is_crm_leader(resource):
1042+ log('Deferring action to CRM leader.', level=INFO)
1043+ return False
1044+ else:
1045+ peers = peer_units()
1046+ if peers and not oldest_peer(peers):
1047+ log('Deferring action to oldest service unit.', level=INFO)
1048+ return False
1049+ return True
1050+
1051+
1052+def is_clustered():
1053+ for r_id in (relation_ids('ha') or []):
1054+ for unit in (relation_list(r_id) or []):
1055+ clustered = relation_get('clustered',
1056+ rid=r_id,
1057+ unit=unit)
1058+ if clustered:
1059+ return True
1060+ return False
1061+
1062+
1063+def is_crm_dc():
1064+ """
1065+ Determine leadership by querying the pacemaker Designated Controller
1066+ """
1067+ cmd = ['crm', 'status']
1068+ try:
1069+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1070+ if not isinstance(status, six.text_type):
1071+ status = six.text_type(status, "utf-8")
1072+ except subprocess.CalledProcessError as ex:
1073+ raise CRMDCNotFound(str(ex))
1074+
1075+ current_dc = ''
1076+ for line in status.split('\n'):
1077+ if line.startswith('Current DC'):
1078+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
1079+ current_dc = line.split(':')[1].split()[0]
1080+ if current_dc == get_unit_hostname():
1081+ return True
1082+ elif current_dc == 'NONE':
1083+ raise CRMDCNotFound('Current DC: NONE')
1084+
1085+ return False
1086+
1087+
1088+@retry_on_exception(5, base_delay=2,
1089+ exc_type=(CRMResourceNotFound, CRMDCNotFound))
1090+def is_crm_leader(resource, retry=False):
1091+ """
1092+ Returns True if the charm calling this is the elected corosync leader,
1093+ as returned by calling the external "crm" command.
1094+
1095+ We allow this operation to be retried to avoid the possibility of getting a
1096+ false negative. See LP #1396246 for more info.
1097+ """
1098+ if resource == DC_RESOURCE_NAME:
1099+ return is_crm_dc()
1100+ cmd = ['crm', 'resource', 'show', resource]
1101+ try:
1102+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1103+ if not isinstance(status, six.text_type):
1104+ status = six.text_type(status, "utf-8")
1105+ except subprocess.CalledProcessError:
1106+ status = None
1107+
1108+ if status and get_unit_hostname() in status:
1109+ return True
1110+
1111+ if status and "resource %s is NOT running" % (resource) in status:
1112+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
1113+
1114+ return False
1115+
1116+
1117+def is_leader(resource):
1118+ log("is_leader is deprecated. Please consider using is_crm_leader "
1119+ "instead.", level=WARNING)
1120+ return is_crm_leader(resource)
1121+
1122+
1123+def peer_units(peer_relation="cluster"):
1124+ peers = []
1125+ for r_id in (relation_ids(peer_relation) or []):
1126+ for unit in (relation_list(r_id) or []):
1127+ peers.append(unit)
1128+ return peers
1129+
1130+
1131+def peer_ips(peer_relation='cluster', addr_key='private-address'):
1132+ '''Return a dict of peers and their private-address'''
1133+ peers = {}
1134+ for r_id in relation_ids(peer_relation):
1135+ for unit in relation_list(r_id):
1136+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
1137+ return peers
1138+
1139+
1140+def oldest_peer(peers):
1141+ """Determines who the oldest peer is by comparing unit numbers."""
1142+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
1143+ for peer in peers:
1144+ remote_unit_no = int(peer.split('/')[1])
1145+ if remote_unit_no < local_unit_no:
1146+ return False
1147+ return True
1148+
1149+
1150+def eligible_leader(resource):
1151+ log("eligible_leader is deprecated. Please consider using "
1152+ "is_elected_leader instead.", level=WARNING)
1153+ return is_elected_leader(resource)
1154+
1155+
1156+def https():
1157+ '''
1158+ Determines whether enough data has been provided in configuration
1159+ or relation data to configure HTTPS
1160+ .
1161+ returns: boolean
1162+ '''
1163+ use_https = config_get('use-https')
1164+ if use_https and bool_from_string(use_https):
1165+ return True
1166+ if config_get('ssl_cert') and config_get('ssl_key'):
1167+ return True
1168+ for r_id in relation_ids('identity-service'):
1169+ for unit in relation_list(r_id):
1170+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
1171+ rel_state = [
1172+ relation_get('https_keystone', rid=r_id, unit=unit),
1173+ relation_get('ca_cert', rid=r_id, unit=unit),
1174+ ]
1175+ # NOTE: works around (LP: #1203241)
1176+ if (None not in rel_state) and ('' not in rel_state):
1177+ return True
1178+ return False
1179+
1180+
1181+def determine_api_port(public_port, singlenode_mode=False):
1182+ '''
1183+ Determine correct API server listening port based on
1184+ existence of HTTPS reverse proxy and/or haproxy.
1185+
1186+ public_port: int: standard public port for given service
1187+
1188+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1189+
1190+ returns: int: the correct listening port for the API service
1191+ '''
1192+ i = 0
1193+ if singlenode_mode:
1194+ i += 1
1195+ elif len(peer_units()) > 0 or is_clustered():
1196+ i += 1
1197+ if https():
1198+ i += 1
1199+ return public_port - (i * 10)
1200+
1201+
1202+def determine_apache_port(public_port, singlenode_mode=False):
1203+ '''
1204+ Description: Determine correct apache listening port based on public IP +
1205+ state of the cluster.
1206+
1207+ public_port: int: standard public port for given service
1208+
1209+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1210+
1211+ returns: int: the correct listening port for the HAProxy service
1212+ '''
1213+ i = 0
1214+ if singlenode_mode:
1215+ i += 1
1216+ elif len(peer_units()) > 0 or is_clustered():
1217+ i += 1
1218+ return public_port - (i * 10)
1219+
1220+
1221+def get_hacluster_config(exclude_keys=None):
1222+ '''
1223+ Obtains all relevant configuration from charm configuration required
1224+ for initiating a relation to hacluster:
1225+
1226+ ha-bindiface, ha-mcastport, vip
1227+
1228+ param: exclude_keys: list of setting key(s) to be excluded.
1229+ returns: dict: A dict containing settings keyed by setting name.
1230+ raises: HAIncompleteConfig if settings are missing.
1231+ '''
1232+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
1233+ conf = {}
1234+ for setting in settings:
1235+ if exclude_keys and setting in exclude_keys:
1236+ continue
1237+
1238+ conf[setting] = config_get(setting)
1239+ missing = []
1240+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
1241+ if missing:
1242+ log('Insufficient config data to configure hacluster.', level=ERROR)
1243+ raise HAIncompleteConfig
1244+ return conf
1245+
1246+
1247+def canonical_url(configs, vip_setting='vip'):
1248+ '''
1249+ Returns the correct HTTP URL to this host given the state of HTTPS
1250+ configuration and hacluster.
1251+
1252+ :configs : OSTemplateRenderer: A config tempating object to inspect for
1253+ a complete https context.
1254+
1255+ :vip_setting: str: Setting in charm config that specifies
1256+ VIP address.
1257+ '''
1258+ scheme = 'http'
1259+ if 'https' in configs.complete_contexts():
1260+ scheme = 'https'
1261+ if is_clustered():
1262+ addr = config_get(vip_setting)
1263+ else:
1264+ addr = unit_get('private-address')
1265+ return '%s://%s' % (scheme, addr)
1266
1267=== added directory 'charmhelpers.new/contrib/network'
1268=== added file 'charmhelpers.new/contrib/network/ip.py'
1269--- charmhelpers.new/contrib/network/ip.py 1970-01-01 00:00:00 +0000
1270+++ charmhelpers.new/contrib/network/ip.py 2015-11-24 19:47:41 +0000
1271@@ -0,0 +1,456 @@
1272+# Copyright 2014-2015 Canonical Limited.
1273+#
1274+# This file is part of charm-helpers.
1275+#
1276+# charm-helpers is free software: you can redistribute it and/or modify
1277+# it under the terms of the GNU Lesser General Public License version 3 as
1278+# published by the Free Software Foundation.
1279+#
1280+# charm-helpers is distributed in the hope that it will be useful,
1281+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1282+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1283+# GNU Lesser General Public License for more details.
1284+#
1285+# You should have received a copy of the GNU Lesser General Public License
1286+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1287+
1288+import glob
1289+import re
1290+import subprocess
1291+import six
1292+import socket
1293+
1294+from functools import partial
1295+
1296+from charmhelpers.core.hookenv import unit_get
1297+from charmhelpers.fetch import apt_install, apt_update
1298+from charmhelpers.core.hookenv import (
1299+ log,
1300+ WARNING,
1301+)
1302+
1303+try:
1304+ import netifaces
1305+except ImportError:
1306+ apt_update(fatal=True)
1307+ apt_install('python-netifaces', fatal=True)
1308+ import netifaces
1309+
1310+try:
1311+ import netaddr
1312+except ImportError:
1313+ apt_update(fatal=True)
1314+ apt_install('python-netaddr', fatal=True)
1315+ import netaddr
1316+
1317+
1318+def _validate_cidr(network):
1319+ try:
1320+ netaddr.IPNetwork(network)
1321+ except (netaddr.core.AddrFormatError, ValueError):
1322+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1323+ network)
1324+
1325+
1326+def no_ip_found_error_out(network):
1327+ errmsg = ("No IP address found in network: %s" % network)
1328+ raise ValueError(errmsg)
1329+
1330+
1331+def get_address_in_network(network, fallback=None, fatal=False):
1332+ """Get an IPv4 or IPv6 address within the network from the host.
1333+
1334+ :param network (str): CIDR presentation format. For example,
1335+ '192.168.1.0/24'.
1336+ :param fallback (str): If no address is found, return fallback.
1337+ :param fatal (boolean): If no address is found, fallback is not
1338+ set and fatal is True then exit(1).
1339+ """
1340+ if network is None:
1341+ if fallback is not None:
1342+ return fallback
1343+
1344+ if fatal:
1345+ no_ip_found_error_out(network)
1346+ else:
1347+ return None
1348+
1349+ _validate_cidr(network)
1350+ network = netaddr.IPNetwork(network)
1351+ for iface in netifaces.interfaces():
1352+ addresses = netifaces.ifaddresses(iface)
1353+ if network.version == 4 and netifaces.AF_INET in addresses:
1354+ addr = addresses[netifaces.AF_INET][0]['addr']
1355+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1356+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1357+ if cidr in network:
1358+ return str(cidr.ip)
1359+
1360+ if network.version == 6 and netifaces.AF_INET6 in addresses:
1361+ for addr in addresses[netifaces.AF_INET6]:
1362+ if not addr['addr'].startswith('fe80'):
1363+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1364+ addr['netmask']))
1365+ if cidr in network:
1366+ return str(cidr.ip)
1367+
1368+ if fallback is not None:
1369+ return fallback
1370+
1371+ if fatal:
1372+ no_ip_found_error_out(network)
1373+
1374+ return None
1375+
1376+
1377+def is_ipv6(address):
1378+ """Determine whether provided address is IPv6 or not."""
1379+ try:
1380+ address = netaddr.IPAddress(address)
1381+ except netaddr.AddrFormatError:
1382+ # probably a hostname - so not an address at all!
1383+ return False
1384+
1385+ return address.version == 6
1386+
1387+
1388+def is_address_in_network(network, address):
1389+ """
1390+ Determine whether the provided address is within a network range.
1391+
1392+ :param network (str): CIDR presentation format. For example,
1393+ '192.168.1.0/24'.
1394+ :param address: An individual IPv4 or IPv6 address without a net
1395+ mask or subnet prefix. For example, '192.168.1.1'.
1396+ :returns boolean: Flag indicating whether address is in network.
1397+ """
1398+ try:
1399+ network = netaddr.IPNetwork(network)
1400+ except (netaddr.core.AddrFormatError, ValueError):
1401+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1402+ network)
1403+
1404+ try:
1405+ address = netaddr.IPAddress(address)
1406+ except (netaddr.core.AddrFormatError, ValueError):
1407+ raise ValueError("Address (%s) is not in correct presentation format" %
1408+ address)
1409+
1410+ if address in network:
1411+ return True
1412+ else:
1413+ return False
1414+
1415+
1416+def _get_for_address(address, key):
1417+ """Retrieve an attribute of or the physical interface that
1418+ the IP address provided could be bound to.
1419+
1420+ :param address (str): An individual IPv4 or IPv6 address without a net
1421+ mask or subnet prefix. For example, '192.168.1.1'.
1422+ :param key: 'iface' for the physical interface name or an attribute
1423+ of the configured interface, for example 'netmask'.
1424+ :returns str: Requested attribute or None if address is not bindable.
1425+ """
1426+ address = netaddr.IPAddress(address)
1427+ for iface in netifaces.interfaces():
1428+ addresses = netifaces.ifaddresses(iface)
1429+ if address.version == 4 and netifaces.AF_INET in addresses:
1430+ addr = addresses[netifaces.AF_INET][0]['addr']
1431+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1432+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1433+ cidr = network.cidr
1434+ if address in cidr:
1435+ if key == 'iface':
1436+ return iface
1437+ else:
1438+ return addresses[netifaces.AF_INET][0][key]
1439+
1440+ if address.version == 6 and netifaces.AF_INET6 in addresses:
1441+ for addr in addresses[netifaces.AF_INET6]:
1442+ if not addr['addr'].startswith('fe80'):
1443+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1444+ addr['netmask']))
1445+ cidr = network.cidr
1446+ if address in cidr:
1447+ if key == 'iface':
1448+ return iface
1449+ elif key == 'netmask' and cidr:
1450+ return str(cidr).split('/')[1]
1451+ else:
1452+ return addr[key]
1453+
1454+ return None
1455+
1456+
1457+get_iface_for_address = partial(_get_for_address, key='iface')
1458+
1459+
1460+get_netmask_for_address = partial(_get_for_address, key='netmask')
1461+
1462+
1463+def format_ipv6_addr(address):
1464+ """If address is IPv6, wrap it in '[]' otherwise return None.
1465+
1466+ This is required by most configuration files when specifying IPv6
1467+ addresses.
1468+ """
1469+ if is_ipv6(address):
1470+ return "[%s]" % address
1471+
1472+ return None
1473+
1474+
1475+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
1476+ fatal=True, exc_list=None):
1477+ """Return the assigned IP address for a given interface, if any."""
1478+ # Extract nic if passed /dev/ethX
1479+ if '/' in iface:
1480+ iface = iface.split('/')[-1]
1481+
1482+ if not exc_list:
1483+ exc_list = []
1484+
1485+ try:
1486+ inet_num = getattr(netifaces, inet_type)
1487+ except AttributeError:
1488+ raise Exception("Unknown inet type '%s'" % str(inet_type))
1489+
1490+ interfaces = netifaces.interfaces()
1491+ if inc_aliases:
1492+ ifaces = []
1493+ for _iface in interfaces:
1494+ if iface == _iface or _iface.split(':')[0] == iface:
1495+ ifaces.append(_iface)
1496+
1497+ if fatal and not ifaces:
1498+ raise Exception("Invalid interface '%s'" % iface)
1499+
1500+ ifaces.sort()
1501+ else:
1502+ if iface not in interfaces:
1503+ if fatal:
1504+ raise Exception("Interface '%s' not found " % (iface))
1505+ else:
1506+ return []
1507+
1508+ else:
1509+ ifaces = [iface]
1510+
1511+ addresses = []
1512+ for netiface in ifaces:
1513+ net_info = netifaces.ifaddresses(netiface)
1514+ if inet_num in net_info:
1515+ for entry in net_info[inet_num]:
1516+ if 'addr' in entry and entry['addr'] not in exc_list:
1517+ addresses.append(entry['addr'])
1518+
1519+ if fatal and not addresses:
1520+ raise Exception("Interface '%s' doesn't have any %s addresses." %
1521+ (iface, inet_type))
1522+
1523+ return sorted(addresses)
1524+
1525+
1526+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
1527+
1528+
1529+def get_iface_from_addr(addr):
1530+ """Work out on which interface the provided address is configured."""
1531+ for iface in netifaces.interfaces():
1532+ addresses = netifaces.ifaddresses(iface)
1533+ for inet_type in addresses:
1534+ for _addr in addresses[inet_type]:
1535+ _addr = _addr['addr']
1536+ # link local
1537+ ll_key = re.compile("(.+)%.*")
1538+ raw = re.match(ll_key, _addr)
1539+ if raw:
1540+ _addr = raw.group(1)
1541+
1542+ if _addr == addr:
1543+ log("Address '%s' is configured on iface '%s'" %
1544+ (addr, iface))
1545+ return iface
1546+
1547+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
1548+ raise Exception(msg)
1549+
1550+
1551+def sniff_iface(f):
1552+ """Ensure decorated function is called with a value for iface.
1553+
1554+ If no iface provided, inject net iface inferred from unit private address.
1555+ """
1556+ def iface_sniffer(*args, **kwargs):
1557+ if not kwargs.get('iface', None):
1558+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
1559+
1560+ return f(*args, **kwargs)
1561+
1562+ return iface_sniffer
1563+
1564+
1565+@sniff_iface
1566+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
1567+ dynamic_only=True):
1568+ """Get assigned IPv6 address for a given interface.
1569+
1570+ Returns list of addresses found. If no address found, returns empty list.
1571+
1572+ If iface is None, we infer the current primary interface by doing a reverse
1573+ lookup on the unit private-address.
1574+
1575+ We currently only support scope global IPv6 addresses i.e. non-temporary
1576+ addresses. If no global IPv6 address is found, return the first one found
1577+ in the ipv6 address list.
1578+ """
1579+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
1580+ inc_aliases=inc_aliases, fatal=fatal,
1581+ exc_list=exc_list)
1582+
1583+ if addresses:
1584+ global_addrs = []
1585+ for addr in addresses:
1586+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
1587+ m = re.match(key_scope_link_local, addr)
1588+ if m:
1589+ eui_64_mac = m.group(1)
1590+ iface = m.group(2)
1591+ else:
1592+ global_addrs.append(addr)
1593+
1594+ if global_addrs:
1595+ # Make sure any found global addresses are not temporary
1596+ cmd = ['ip', 'addr', 'show', iface]
1597+ out = subprocess.check_output(cmd).decode('UTF-8')
1598+ if dynamic_only:
1599+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
1600+ else:
1601+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
1602+
1603+ addrs = []
1604+ for line in out.split('\n'):
1605+ line = line.strip()
1606+ m = re.match(key, line)
1607+ if m and 'temporary' not in line:
1608+ # Return the first valid address we find
1609+ for addr in global_addrs:
1610+ if m.group(1) == addr:
1611+ if not dynamic_only or \
1612+ m.group(1).endswith(eui_64_mac):
1613+ addrs.append(addr)
1614+
1615+ if addrs:
1616+ return addrs
1617+
1618+ if fatal:
1619+ raise Exception("Interface '%s' does not have a scope global "
1620+ "non-temporary ipv6 address." % iface)
1621+
1622+ return []
1623+
1624+
1625+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
1626+ """Return a list of bridges on the system."""
1627+ b_regex = "%s/*/bridge" % vnic_dir
1628+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
1629+
1630+
1631+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
1632+ """Return a list of nics comprising a given bridge on the system."""
1633+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
1634+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
1635+
1636+
1637+def is_bridge_member(nic):
1638+ """Check if a given nic is a member of a bridge."""
1639+ for bridge in get_bridges():
1640+ if nic in get_bridge_nics(bridge):
1641+ return True
1642+
1643+ return False
1644+
1645+
1646+def is_ip(address):
1647+ """
1648+ Returns True if address is a valid IP address.
1649+ """
1650+ try:
1651+ # Test to see if already an IPv4 address
1652+ socket.inet_aton(address)
1653+ return True
1654+ except socket.error:
1655+ return False
1656+
1657+
1658+def ns_query(address):
1659+ try:
1660+ import dns.resolver
1661+ except ImportError:
1662+ apt_install('python-dnspython')
1663+ import dns.resolver
1664+
1665+ if isinstance(address, dns.name.Name):
1666+ rtype = 'PTR'
1667+ elif isinstance(address, six.string_types):
1668+ rtype = 'A'
1669+ else:
1670+ return None
1671+
1672+ answers = dns.resolver.query(address, rtype)
1673+ if answers:
1674+ return str(answers[0])
1675+ return None
1676+
1677+
1678+def get_host_ip(hostname, fallback=None):
1679+ """
1680+ Resolves the IP for a given hostname, or returns
1681+ the input if it is already an IP.
1682+ """
1683+ if is_ip(hostname):
1684+ return hostname
1685+
1686+ ip_addr = ns_query(hostname)
1687+ if not ip_addr:
1688+ try:
1689+ ip_addr = socket.gethostbyname(hostname)
1690+ except:
1691+ log("Failed to resolve hostname '%s'" % (hostname),
1692+ level=WARNING)
1693+ return fallback
1694+ return ip_addr
1695+
1696+
1697+def get_hostname(address, fqdn=True):
1698+ """
1699+ Resolves hostname for given IP, or returns the input
1700+ if it is already a hostname.
1701+ """
1702+ if is_ip(address):
1703+ try:
1704+ import dns.reversename
1705+ except ImportError:
1706+ apt_install("python-dnspython")
1707+ import dns.reversename
1708+
1709+ rev = dns.reversename.from_address(address)
1710+ result = ns_query(rev)
1711+
1712+ if not result:
1713+ try:
1714+ result = socket.gethostbyaddr(address)[0]
1715+ except:
1716+ return None
1717+ else:
1718+ result = address
1719+
1720+ if fqdn:
1721+ # strip trailing .
1722+ if result.endswith('.'):
1723+ return result[:-1]
1724+ else:
1725+ return result
1726+ else:
1727+ return result.split('.')[0]
1728
1729=== added directory 'charmhelpers.new/contrib/openstack'
1730=== added directory 'charmhelpers.new/contrib/openstack/amulet'
1731=== added file 'charmhelpers.new/contrib/openstack/amulet/deployment.py'
1732--- charmhelpers.new/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1733+++ charmhelpers.new/contrib/openstack/amulet/deployment.py 2015-11-24 19:47:41 +0000
1734@@ -0,0 +1,297 @@
1735+# Copyright 2014-2015 Canonical Limited.
1736+#
1737+# This file is part of charm-helpers.
1738+#
1739+# charm-helpers is free software: you can redistribute it and/or modify
1740+# it under the terms of the GNU Lesser General Public License version 3 as
1741+# published by the Free Software Foundation.
1742+#
1743+# charm-helpers is distributed in the hope that it will be useful,
1744+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1745+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1746+# GNU Lesser General Public License for more details.
1747+#
1748+# You should have received a copy of the GNU Lesser General Public License
1749+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1750+
1751+import logging
1752+import re
1753+import sys
1754+import six
1755+from collections import OrderedDict
1756+from charmhelpers.contrib.amulet.deployment import (
1757+ AmuletDeployment
1758+)
1759+
1760+DEBUG = logging.DEBUG
1761+ERROR = logging.ERROR
1762+
1763+
1764+class OpenStackAmuletDeployment(AmuletDeployment):
1765+ """OpenStack amulet deployment.
1766+
1767+ This class inherits from AmuletDeployment and has additional support
1768+ that is specifically for use by OpenStack charms.
1769+ """
1770+
1771+ def __init__(self, series=None, openstack=None, source=None,
1772+ stable=True, log_level=DEBUG):
1773+ """Initialize the deployment environment."""
1774+ super(OpenStackAmuletDeployment, self).__init__(series)
1775+ self.log = self.get_logger(level=log_level)
1776+ self.log.info('OpenStackAmuletDeployment: init')
1777+ self.openstack = openstack
1778+ self.source = source
1779+ self.stable = stable
1780+ # Note(coreycb): this needs to be changed when new next branches come
1781+ # out.
1782+ self.current_next = "trusty"
1783+
1784+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
1785+ """Get a logger object that will log to stdout."""
1786+ log = logging
1787+ logger = log.getLogger(name)
1788+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1789+ "%(levelname)s: %(message)s")
1790+
1791+ handler = log.StreamHandler(stream=sys.stdout)
1792+ handler.setLevel(level)
1793+ handler.setFormatter(fmt)
1794+
1795+ logger.addHandler(handler)
1796+ logger.setLevel(level)
1797+
1798+ return logger
1799+
1800+ def _determine_branch_locations(self, other_services):
1801+ """Determine the branch locations for the other services.
1802+
1803+ Determine if the local branch being tested is derived from its
1804+ stable or next (dev) branch, and based on this, use the corresonding
1805+ stable or next branches for the other_services."""
1806+
1807+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
1808+
1809+ # Charms outside the lp:~openstack-charmers namespace
1810+ base_charms = ['mysql', 'mongodb', 'nrpe']
1811+
1812+ # Force these charms to current series even when using an older series.
1813+ # ie. Use trusty/nrpe even when series is precise, as the P charm
1814+ # does not possess the necessary external master config and hooks.
1815+ force_series_current = ['nrpe']
1816+
1817+ if self.series in ['precise', 'trusty']:
1818+ base_series = self.series
1819+ else:
1820+ base_series = self.current_next
1821+
1822+ for svc in other_services:
1823+ if svc['name'] in force_series_current:
1824+ base_series = self.current_next
1825+ # If a location has been explicitly set, use it
1826+ if svc.get('location'):
1827+ continue
1828+ if self.stable:
1829+ temp = 'lp:charms/{}/{}'
1830+ svc['location'] = temp.format(base_series,
1831+ svc['name'])
1832+ else:
1833+ if svc['name'] in base_charms:
1834+ temp = 'lp:charms/{}/{}'
1835+ svc['location'] = temp.format(base_series,
1836+ svc['name'])
1837+ else:
1838+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1839+ svc['location'] = temp.format(self.current_next,
1840+ svc['name'])
1841+
1842+ return other_services
1843+
1844+ def _add_services(self, this_service, other_services):
1845+ """Add services to the deployment and set openstack-origin/source."""
1846+ self.log.info('OpenStackAmuletDeployment: adding services')
1847+
1848+ other_services = self._determine_branch_locations(other_services)
1849+
1850+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1851+ other_services)
1852+
1853+ services = other_services
1854+ services.append(this_service)
1855+
1856+ # Charms which should use the source config option
1857+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1858+ 'ceph-osd', 'ceph-radosgw']
1859+
1860+ # Charms which can not use openstack-origin, ie. many subordinates
1861+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
1862+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
1863+
1864+ if self.openstack:
1865+ for svc in services:
1866+ if svc['name'] not in use_source + no_origin:
1867+ config = {'openstack-origin': self.openstack}
1868+ self.d.configure(svc['name'], config)
1869+
1870+ if self.source:
1871+ for svc in services:
1872+ if svc['name'] in use_source and svc['name'] not in no_origin:
1873+ config = {'source': self.source}
1874+ self.d.configure(svc['name'], config)
1875+
1876+ def _configure_services(self, configs):
1877+ """Configure all of the services."""
1878+ self.log.info('OpenStackAmuletDeployment: configure services')
1879+ for service, config in six.iteritems(configs):
1880+ self.d.configure(service, config)
1881+
1882+ def _auto_wait_for_status(self, message=None, exclude_services=None,
1883+ include_only=None, timeout=1800):
1884+ """Wait for all units to have a specific extended status, except
1885+ for any defined as excluded. Unless specified via message, any
1886+ status containing any case of 'ready' will be considered a match.
1887+
1888+ Examples of message usage:
1889+
1890+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
1891+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
1892+
1893+ Wait for all units to reach this status (exact match):
1894+ message = re.compile('^Unit is ready and clustered$')
1895+
1896+ Wait for all units to reach any one of these (exact match):
1897+ message = re.compile('Unit is ready|OK|Ready')
1898+
1899+ Wait for at least one unit to reach this status (exact match):
1900+ message = {'ready'}
1901+
1902+ See Amulet's sentry.wait_for_messages() for message usage detail.
1903+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
1904+
1905+ :param message: Expected status match
1906+ :param exclude_services: List of juju service names to ignore,
1907+ not to be used in conjuction with include_only.
1908+ :param include_only: List of juju service names to exclusively check,
1909+ not to be used in conjuction with exclude_services.
1910+ :param timeout: Maximum time in seconds to wait for status match
1911+ :returns: None. Raises if timeout is hit.
1912+ """
1913+ self.log.info('Waiting for extended status on units...')
1914+
1915+ all_services = self.d.services.keys()
1916+
1917+ if exclude_services and include_only:
1918+ raise ValueError('exclude_services can not be used '
1919+ 'with include_only')
1920+
1921+ if message:
1922+ if isinstance(message, re._pattern_type):
1923+ match = message.pattern
1924+ else:
1925+ match = message
1926+
1927+ self.log.debug('Custom extended status wait match: '
1928+ '{}'.format(match))
1929+ else:
1930+ self.log.debug('Default extended status wait match: contains '
1931+ 'READY (case-insensitive)')
1932+ message = re.compile('.*ready.*', re.IGNORECASE)
1933+
1934+ if exclude_services:
1935+ self.log.debug('Excluding services from extended status match: '
1936+ '{}'.format(exclude_services))
1937+ else:
1938+ exclude_services = []
1939+
1940+ if include_only:
1941+ services = include_only
1942+ else:
1943+ services = list(set(all_services) - set(exclude_services))
1944+
1945+ self.log.debug('Waiting up to {}s for extended status on services: '
1946+ '{}'.format(timeout, services))
1947+ service_messages = {service: message for service in services}
1948+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
1949+ self.log.info('OK')
1950+
1951+ def _get_openstack_release(self):
1952+ """Get openstack release.
1953+
1954+ Return an integer representing the enum value of the openstack
1955+ release.
1956+ """
1957+ # Must be ordered by OpenStack release (not by Ubuntu release):
1958+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
1959+ self.precise_havana, self.precise_icehouse,
1960+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
1961+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
1962+ self.wily_liberty) = range(12)
1963+
1964+ releases = {
1965+ ('precise', None): self.precise_essex,
1966+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
1967+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
1968+ ('precise', 'cloud:precise-havana'): self.precise_havana,
1969+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
1970+ ('trusty', None): self.trusty_icehouse,
1971+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
1972+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
1973+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
1974+ ('utopic', None): self.utopic_juno,
1975+ ('vivid', None): self.vivid_kilo,
1976+ ('wily', None): self.wily_liberty}
1977+ return releases[(self.series, self.openstack)]
1978+
1979+ def _get_openstack_release_string(self):
1980+ """Get openstack release string.
1981+
1982+ Return a string representing the openstack release.
1983+ """
1984+ releases = OrderedDict([
1985+ ('precise', 'essex'),
1986+ ('quantal', 'folsom'),
1987+ ('raring', 'grizzly'),
1988+ ('saucy', 'havana'),
1989+ ('trusty', 'icehouse'),
1990+ ('utopic', 'juno'),
1991+ ('vivid', 'kilo'),
1992+ ('wily', 'liberty'),
1993+ ])
1994+ if self.openstack:
1995+ os_origin = self.openstack.split(':')[1]
1996+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
1997+ else:
1998+ return releases[self.series]
1999+
2000+ def get_ceph_expected_pools(self, radosgw=False):
2001+ """Return a list of expected ceph pools in a ceph + cinder + glance
2002+ test scenario, based on OpenStack release and whether ceph radosgw
2003+ is flagged as present or not."""
2004+
2005+ if self._get_openstack_release() >= self.trusty_kilo:
2006+ # Kilo or later
2007+ pools = [
2008+ 'rbd',
2009+ 'cinder',
2010+ 'glance'
2011+ ]
2012+ else:
2013+ # Juno or earlier
2014+ pools = [
2015+ 'data',
2016+ 'metadata',
2017+ 'rbd',
2018+ 'cinder',
2019+ 'glance'
2020+ ]
2021+
2022+ if radosgw:
2023+ pools.extend([
2024+ '.rgw.root',
2025+ '.rgw.control',
2026+ '.rgw',
2027+ '.rgw.gc',
2028+ '.users.uid'
2029+ ])
2030+
2031+ return pools
2032
2033=== added file 'charmhelpers.new/contrib/openstack/amulet/utils.py'
2034--- charmhelpers.new/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
2035+++ charmhelpers.new/contrib/openstack/amulet/utils.py 2015-11-24 19:47:41 +0000
2036@@ -0,0 +1,985 @@
2037+# Copyright 2014-2015 Canonical Limited.
2038+#
2039+# This file is part of charm-helpers.
2040+#
2041+# charm-helpers is free software: you can redistribute it and/or modify
2042+# it under the terms of the GNU Lesser General Public License version 3 as
2043+# published by the Free Software Foundation.
2044+#
2045+# charm-helpers is distributed in the hope that it will be useful,
2046+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2047+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2048+# GNU Lesser General Public License for more details.
2049+#
2050+# You should have received a copy of the GNU Lesser General Public License
2051+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2052+
2053+import amulet
2054+import json
2055+import logging
2056+import os
2057+import re
2058+import six
2059+import time
2060+import urllib
2061+
2062+import cinderclient.v1.client as cinder_client
2063+import glanceclient.v1.client as glance_client
2064+import heatclient.v1.client as heat_client
2065+import keystoneclient.v2_0 as keystone_client
2066+import novaclient.v1_1.client as nova_client
2067+import pika
2068+import swiftclient
2069+
2070+from charmhelpers.contrib.amulet.utils import (
2071+ AmuletUtils
2072+)
2073+
2074+DEBUG = logging.DEBUG
2075+ERROR = logging.ERROR
2076+
2077+
2078+class OpenStackAmuletUtils(AmuletUtils):
2079+ """OpenStack amulet utilities.
2080+
2081+ This class inherits from AmuletUtils and has additional support
2082+ that is specifically for use by OpenStack charm tests.
2083+ """
2084+
2085+ def __init__(self, log_level=ERROR):
2086+ """Initialize the deployment environment."""
2087+ super(OpenStackAmuletUtils, self).__init__(log_level)
2088+
2089+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
2090+ public_port, expected):
2091+ """Validate endpoint data.
2092+
2093+ Validate actual endpoint data vs expected endpoint data. The ports
2094+ are used to find the matching endpoint.
2095+ """
2096+ self.log.debug('Validating endpoint data...')
2097+ self.log.debug('actual: {}'.format(repr(endpoints)))
2098+ found = False
2099+ for ep in endpoints:
2100+ self.log.debug('endpoint: {}'.format(repr(ep)))
2101+ if (admin_port in ep.adminurl and
2102+ internal_port in ep.internalurl and
2103+ public_port in ep.publicurl):
2104+ found = True
2105+ actual = {'id': ep.id,
2106+ 'region': ep.region,
2107+ 'adminurl': ep.adminurl,
2108+ 'internalurl': ep.internalurl,
2109+ 'publicurl': ep.publicurl,
2110+ 'service_id': ep.service_id}
2111+ ret = self._validate_dict_data(expected, actual)
2112+ if ret:
2113+ return 'unexpected endpoint data - {}'.format(ret)
2114+
2115+ if not found:
2116+ return 'endpoint not found'
2117+
2118+ def validate_svc_catalog_endpoint_data(self, expected, actual):
2119+ """Validate service catalog endpoint data.
2120+
2121+ Validate a list of actual service catalog endpoints vs a list of
2122+ expected service catalog endpoints.
2123+ """
2124+ self.log.debug('Validating service catalog endpoint data...')
2125+ self.log.debug('actual: {}'.format(repr(actual)))
2126+ for k, v in six.iteritems(expected):
2127+ if k in actual:
2128+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
2129+ if ret:
2130+ return self.endpoint_error(k, ret)
2131+ else:
2132+ return "endpoint {} does not exist".format(k)
2133+ return ret
2134+
2135+ def validate_tenant_data(self, expected, actual):
2136+ """Validate tenant data.
2137+
2138+ Validate a list of actual tenant data vs list of expected tenant
2139+ data.
2140+ """
2141+ self.log.debug('Validating tenant data...')
2142+ self.log.debug('actual: {}'.format(repr(actual)))
2143+ for e in expected:
2144+ found = False
2145+ for act in actual:
2146+ a = {'enabled': act.enabled, 'description': act.description,
2147+ 'name': act.name, 'id': act.id}
2148+ if e['name'] == a['name']:
2149+ found = True
2150+ ret = self._validate_dict_data(e, a)
2151+ if ret:
2152+ return "unexpected tenant data - {}".format(ret)
2153+ if not found:
2154+ return "tenant {} does not exist".format(e['name'])
2155+ return ret
2156+
2157+ def validate_role_data(self, expected, actual):
2158+ """Validate role data.
2159+
2160+ Validate a list of actual role data vs a list of expected role
2161+ data.
2162+ """
2163+ self.log.debug('Validating role data...')
2164+ self.log.debug('actual: {}'.format(repr(actual)))
2165+ for e in expected:
2166+ found = False
2167+ for act in actual:
2168+ a = {'name': act.name, 'id': act.id}
2169+ if e['name'] == a['name']:
2170+ found = True
2171+ ret = self._validate_dict_data(e, a)
2172+ if ret:
2173+ return "unexpected role data - {}".format(ret)
2174+ if not found:
2175+ return "role {} does not exist".format(e['name'])
2176+ return ret
2177+
2178+ def validate_user_data(self, expected, actual):
2179+ """Validate user data.
2180+
2181+ Validate a list of actual user data vs a list of expected user
2182+ data.
2183+ """
2184+ self.log.debug('Validating user data...')
2185+ self.log.debug('actual: {}'.format(repr(actual)))
2186+ for e in expected:
2187+ found = False
2188+ for act in actual:
2189+ a = {'enabled': act.enabled, 'name': act.name,
2190+ 'email': act.email, 'tenantId': act.tenantId,
2191+ 'id': act.id}
2192+ if e['name'] == a['name']:
2193+ found = True
2194+ ret = self._validate_dict_data(e, a)
2195+ if ret:
2196+ return "unexpected user data - {}".format(ret)
2197+ if not found:
2198+ return "user {} does not exist".format(e['name'])
2199+ return ret
2200+
2201+ def validate_flavor_data(self, expected, actual):
2202+ """Validate flavor data.
2203+
2204+ Validate a list of actual flavors vs a list of expected flavors.
2205+ """
2206+ self.log.debug('Validating flavor data...')
2207+ self.log.debug('actual: {}'.format(repr(actual)))
2208+ act = [a.name for a in actual]
2209+ return self._validate_list_data(expected, act)
2210+
2211+ def tenant_exists(self, keystone, tenant):
2212+ """Return True if tenant exists."""
2213+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
2214+ return tenant in [t.name for t in keystone.tenants.list()]
2215+
2216+ def authenticate_cinder_admin(self, keystone_sentry, username,
2217+ password, tenant):
2218+ """Authenticates admin user with cinder."""
2219+ # NOTE(beisner): cinder python client doesn't accept tokens.
2220+ service_ip = \
2221+ keystone_sentry.relation('shared-db',
2222+ 'mysql:shared-db')['private-address']
2223+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
2224+ return cinder_client.Client(username, password, tenant, ept)
2225+
2226+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
2227+ tenant):
2228+ """Authenticates admin user with the keystone admin endpoint."""
2229+ self.log.debug('Authenticating keystone admin...')
2230+ unit = keystone_sentry
2231+ service_ip = unit.relation('shared-db',
2232+ 'mysql:shared-db')['private-address']
2233+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
2234+ return keystone_client.Client(username=user, password=password,
2235+ tenant_name=tenant, auth_url=ep)
2236+
2237+ def authenticate_keystone_user(self, keystone, user, password, tenant):
2238+ """Authenticates a regular user with the keystone public endpoint."""
2239+ self.log.debug('Authenticating keystone user ({})...'.format(user))
2240+ ep = keystone.service_catalog.url_for(service_type='identity',
2241+ endpoint_type='publicURL')
2242+ return keystone_client.Client(username=user, password=password,
2243+ tenant_name=tenant, auth_url=ep)
2244+
2245+ def authenticate_glance_admin(self, keystone):
2246+ """Authenticates admin user with glance."""
2247+ self.log.debug('Authenticating glance admin...')
2248+ ep = keystone.service_catalog.url_for(service_type='image',
2249+ endpoint_type='adminURL')
2250+ return glance_client.Client(ep, token=keystone.auth_token)
2251+
2252+ def authenticate_heat_admin(self, keystone):
2253+ """Authenticates the admin user with heat."""
2254+ self.log.debug('Authenticating heat admin...')
2255+ ep = keystone.service_catalog.url_for(service_type='orchestration',
2256+ endpoint_type='publicURL')
2257+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
2258+
2259+ def authenticate_nova_user(self, keystone, user, password, tenant):
2260+ """Authenticates a regular user with nova-api."""
2261+ self.log.debug('Authenticating nova user ({})...'.format(user))
2262+ ep = keystone.service_catalog.url_for(service_type='identity',
2263+ endpoint_type='publicURL')
2264+ return nova_client.Client(username=user, api_key=password,
2265+ project_id=tenant, auth_url=ep)
2266+
2267+ def authenticate_swift_user(self, keystone, user, password, tenant):
2268+ """Authenticates a regular user with swift api."""
2269+ self.log.debug('Authenticating swift user ({})...'.format(user))
2270+ ep = keystone.service_catalog.url_for(service_type='identity',
2271+ endpoint_type='publicURL')
2272+ return swiftclient.Connection(authurl=ep,
2273+ user=user,
2274+ key=password,
2275+ tenant_name=tenant,
2276+ auth_version='2.0')
2277+
2278+ def create_cirros_image(self, glance, image_name):
2279+ """Download the latest cirros image and upload it to glance,
2280+ validate and return a resource pointer.
2281+
2282+ :param glance: pointer to authenticated glance connection
2283+ :param image_name: display name for new image
2284+ :returns: glance image pointer
2285+ """
2286+ self.log.debug('Creating glance cirros image '
2287+ '({})...'.format(image_name))
2288+
2289+ # Download cirros image
2290+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
2291+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
2292+ if http_proxy:
2293+ proxies = {'http': http_proxy}
2294+ opener = urllib.FancyURLopener(proxies)
2295+ else:
2296+ opener = urllib.FancyURLopener()
2297+
2298+ f = opener.open('http://download.cirros-cloud.net/version/released')
2299+ version = f.read().strip()
2300+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
2301+ local_path = os.path.join('tests', cirros_img)
2302+
2303+ if not os.path.exists(local_path):
2304+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
2305+ version, cirros_img)
2306+ opener.retrieve(cirros_url, local_path)
2307+ f.close()
2308+
2309+ # Create glance image
2310+ with open(local_path) as f:
2311+ image = glance.images.create(name=image_name, is_public=True,
2312+ disk_format='qcow2',
2313+ container_format='bare', data=f)
2314+
2315+ # Wait for image to reach active status
2316+ img_id = image.id
2317+ ret = self.resource_reaches_status(glance.images, img_id,
2318+ expected_stat='active',
2319+ msg='Image status wait')
2320+ if not ret:
2321+ msg = 'Glance image failed to reach expected state.'
2322+ amulet.raise_status(amulet.FAIL, msg=msg)
2323+
2324+ # Re-validate new image
2325+ self.log.debug('Validating image attributes...')
2326+ val_img_name = glance.images.get(img_id).name
2327+ val_img_stat = glance.images.get(img_id).status
2328+ val_img_pub = glance.images.get(img_id).is_public
2329+ val_img_cfmt = glance.images.get(img_id).container_format
2330+ val_img_dfmt = glance.images.get(img_id).disk_format
2331+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
2332+ 'container fmt:{} disk fmt:{}'.format(
2333+ val_img_name, val_img_pub, img_id,
2334+ val_img_stat, val_img_cfmt, val_img_dfmt))
2335+
2336+ if val_img_name == image_name and val_img_stat == 'active' \
2337+ and val_img_pub is True and val_img_cfmt == 'bare' \
2338+ and val_img_dfmt == 'qcow2':
2339+ self.log.debug(msg_attr)
2340+ else:
2341+ msg = ('Volume validation failed, {}'.format(msg_attr))
2342+ amulet.raise_status(amulet.FAIL, msg=msg)
2343+
2344+ return image
2345+
2346+ def delete_image(self, glance, image):
2347+ """Delete the specified image."""
2348+
2349+ # /!\ DEPRECATION WARNING
2350+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2351+ 'delete_resource instead of delete_image.')
2352+ self.log.debug('Deleting glance image ({})...'.format(image))
2353+ return self.delete_resource(glance.images, image, msg='glance image')
2354+
2355+ def create_instance(self, nova, image_name, instance_name, flavor):
2356+ """Create the specified instance."""
2357+ self.log.debug('Creating instance '
2358+ '({}|{}|{})'.format(instance_name, image_name, flavor))
2359+ image = nova.images.find(name=image_name)
2360+ flavor = nova.flavors.find(name=flavor)
2361+ instance = nova.servers.create(name=instance_name, image=image,
2362+ flavor=flavor)
2363+
2364+ count = 1
2365+ status = instance.status
2366+ while status != 'ACTIVE' and count < 60:
2367+ time.sleep(3)
2368+ instance = nova.servers.get(instance.id)
2369+ status = instance.status
2370+ self.log.debug('instance status: {}'.format(status))
2371+ count += 1
2372+
2373+ if status != 'ACTIVE':
2374+ self.log.error('instance creation timed out')
2375+ return None
2376+
2377+ return instance
2378+
2379+ def delete_instance(self, nova, instance):
2380+ """Delete the specified instance."""
2381+
2382+ # /!\ DEPRECATION WARNING
2383+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2384+ 'delete_resource instead of delete_instance.')
2385+ self.log.debug('Deleting instance ({})...'.format(instance))
2386+ return self.delete_resource(nova.servers, instance,
2387+ msg='nova instance')
2388+
2389+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
2390+ """Create a new keypair, or return pointer if it already exists."""
2391+ try:
2392+ _keypair = nova.keypairs.get(keypair_name)
2393+ self.log.debug('Keypair ({}) already exists, '
2394+ 'using it.'.format(keypair_name))
2395+ return _keypair
2396+ except:
2397+ self.log.debug('Keypair ({}) does not exist, '
2398+ 'creating it.'.format(keypair_name))
2399+
2400+ _keypair = nova.keypairs.create(name=keypair_name)
2401+ return _keypair
2402+
2403+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
2404+ img_id=None, src_vol_id=None, snap_id=None):
2405+ """Create cinder volume, optionally from a glance image, OR
2406+ optionally as a clone of an existing volume, OR optionally
2407+ from a snapshot. Wait for the new volume status to reach
2408+ the expected status, validate and return a resource pointer.
2409+
2410+ :param vol_name: cinder volume display name
2411+ :param vol_size: size in gigabytes
2412+ :param img_id: optional glance image id
2413+ :param src_vol_id: optional source volume id to clone
2414+ :param snap_id: optional snapshot id to use
2415+ :returns: cinder volume pointer
2416+ """
2417+ # Handle parameter input and avoid impossible combinations
2418+ if img_id and not src_vol_id and not snap_id:
2419+ # Create volume from image
2420+ self.log.debug('Creating cinder volume from glance image...')
2421+ bootable = 'true'
2422+ elif src_vol_id and not img_id and not snap_id:
2423+ # Clone an existing volume
2424+ self.log.debug('Cloning cinder volume...')
2425+ bootable = cinder.volumes.get(src_vol_id).bootable
2426+ elif snap_id and not src_vol_id and not img_id:
2427+ # Create volume from snapshot
2428+ self.log.debug('Creating cinder volume from snapshot...')
2429+ snap = cinder.volume_snapshots.find(id=snap_id)
2430+ vol_size = snap.size
2431+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
2432+ bootable = cinder.volumes.get(snap_vol_id).bootable
2433+ elif not img_id and not src_vol_id and not snap_id:
2434+ # Create volume
2435+ self.log.debug('Creating cinder volume...')
2436+ bootable = 'false'
2437+ else:
2438+ # Impossible combination of parameters
2439+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
2440+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
2441+ img_id, src_vol_id,
2442+ snap_id))
2443+ amulet.raise_status(amulet.FAIL, msg=msg)
2444+
2445+ # Create new volume
2446+ try:
2447+ vol_new = cinder.volumes.create(display_name=vol_name,
2448+ imageRef=img_id,
2449+ size=vol_size,
2450+ source_volid=src_vol_id,
2451+ snapshot_id=snap_id)
2452+ vol_id = vol_new.id
2453+ except Exception as e:
2454+ msg = 'Failed to create volume: {}'.format(e)
2455+ amulet.raise_status(amulet.FAIL, msg=msg)
2456+
2457+ # Wait for volume to reach available status
2458+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
2459+ expected_stat="available",
2460+ msg="Volume status wait")
2461+ if not ret:
2462+ msg = 'Cinder volume failed to reach expected state.'
2463+ amulet.raise_status(amulet.FAIL, msg=msg)
2464+
2465+ # Re-validate new volume
2466+ self.log.debug('Validating volume attributes...')
2467+ val_vol_name = cinder.volumes.get(vol_id).display_name
2468+ val_vol_boot = cinder.volumes.get(vol_id).bootable
2469+ val_vol_stat = cinder.volumes.get(vol_id).status
2470+ val_vol_size = cinder.volumes.get(vol_id).size
2471+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
2472+ '{} size:{}'.format(val_vol_name, vol_id,
2473+ val_vol_stat, val_vol_boot,
2474+ val_vol_size))
2475+
2476+ if val_vol_boot == bootable and val_vol_stat == 'available' \
2477+ and val_vol_name == vol_name and val_vol_size == vol_size:
2478+ self.log.debug(msg_attr)
2479+ else:
2480+ msg = ('Volume validation failed, {}'.format(msg_attr))
2481+ amulet.raise_status(amulet.FAIL, msg=msg)
2482+
2483+ return vol_new
2484+
2485+ def delete_resource(self, resource, resource_id,
2486+ msg="resource", max_wait=120):
2487+ """Delete one openstack resource, such as one instance, keypair,
2488+ image, volume, stack, etc., and confirm deletion within max wait time.
2489+
2490+ :param resource: pointer to os resource type, ex:glance_client.images
2491+ :param resource_id: unique name or id for the openstack resource
2492+ :param msg: text to identify purpose in logging
2493+ :param max_wait: maximum wait time in seconds
2494+ :returns: True if successful, otherwise False
2495+ """
2496+ self.log.debug('Deleting OpenStack resource '
2497+ '{} ({})'.format(resource_id, msg))
2498+ num_before = len(list(resource.list()))
2499+ resource.delete(resource_id)
2500+
2501+ tries = 0
2502+ num_after = len(list(resource.list()))
2503+ while num_after != (num_before - 1) and tries < (max_wait / 4):
2504+ self.log.debug('{} delete check: '
2505+ '{} [{}:{}] {}'.format(msg, tries,
2506+ num_before,
2507+ num_after,
2508+ resource_id))
2509+ time.sleep(4)
2510+ num_after = len(list(resource.list()))
2511+ tries += 1
2512+
2513+ self.log.debug('{}: expected, actual count = {}, '
2514+ '{}'.format(msg, num_before - 1, num_after))
2515+
2516+ if num_after == (num_before - 1):
2517+ return True
2518+ else:
2519+ self.log.error('{} delete timed out'.format(msg))
2520+ return False
2521+
2522+ def resource_reaches_status(self, resource, resource_id,
2523+ expected_stat='available',
2524+ msg='resource', max_wait=120):
2525+ """Wait for an openstack resources status to reach an
2526+ expected status within a specified time. Useful to confirm that
2527+ nova instances, cinder vols, snapshots, glance images, heat stacks
2528+ and other resources eventually reach the expected status.
2529+
2530+ :param resource: pointer to os resource type, ex: heat_client.stacks
2531+ :param resource_id: unique id for the openstack resource
2532+ :param expected_stat: status to expect resource to reach
2533+ :param msg: text to identify purpose in logging
2534+ :param max_wait: maximum wait time in seconds
2535+ :returns: True if successful, False if status is not reached
2536+ """
2537+
2538+ tries = 0
2539+ resource_stat = resource.get(resource_id).status
2540+ while resource_stat != expected_stat and tries < (max_wait / 4):
2541+ self.log.debug('{} status check: '
2542+ '{} [{}:{}] {}'.format(msg, tries,
2543+ resource_stat,
2544+ expected_stat,
2545+ resource_id))
2546+ time.sleep(4)
2547+ resource_stat = resource.get(resource_id).status
2548+ tries += 1
2549+
2550+ self.log.debug('{}: expected, actual status = {}, '
2551+ '{}'.format(msg, resource_stat, expected_stat))
2552+
2553+ if resource_stat == expected_stat:
2554+ return True
2555+ else:
2556+ self.log.debug('{} never reached expected status: '
2557+ '{}'.format(resource_id, expected_stat))
2558+ return False
2559+
2560+ def get_ceph_osd_id_cmd(self, index):
2561+ """Produce a shell command that will return a ceph-osd id."""
2562+ return ("`initctl list | grep 'ceph-osd ' | "
2563+ "awk 'NR=={} {{ print $2 }}' | "
2564+ "grep -o '[0-9]*'`".format(index + 1))
2565+
2566+ def get_ceph_pools(self, sentry_unit):
2567+ """Return a dict of ceph pools from a single ceph unit, with
2568+ pool name as keys, pool id as vals."""
2569+ pools = {}
2570+ cmd = 'sudo ceph osd lspools'
2571+ output, code = sentry_unit.run(cmd)
2572+ if code != 0:
2573+ msg = ('{} `{}` returned {} '
2574+ '{}'.format(sentry_unit.info['unit_name'],
2575+ cmd, code, output))
2576+ amulet.raise_status(amulet.FAIL, msg=msg)
2577+
2578+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
2579+ for pool in str(output).split(','):
2580+ pool_id_name = pool.split(' ')
2581+ if len(pool_id_name) == 2:
2582+ pool_id = pool_id_name[0]
2583+ pool_name = pool_id_name[1]
2584+ pools[pool_name] = int(pool_id)
2585+
2586+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
2587+ pools))
2588+ return pools
2589+
2590+ def get_ceph_df(self, sentry_unit):
2591+ """Return dict of ceph df json output, including ceph pool state.
2592+
2593+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2594+ :returns: Dict of ceph df output
2595+ """
2596+ cmd = 'sudo ceph df --format=json'
2597+ output, code = sentry_unit.run(cmd)
2598+ if code != 0:
2599+ msg = ('{} `{}` returned {} '
2600+ '{}'.format(sentry_unit.info['unit_name'],
2601+ cmd, code, output))
2602+ amulet.raise_status(amulet.FAIL, msg=msg)
2603+ return json.loads(output)
2604+
2605+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
2606+ """Take a sample of attributes of a ceph pool, returning ceph
2607+ pool name, object count and disk space used for the specified
2608+ pool ID number.
2609+
2610+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2611+ :param pool_id: Ceph pool ID
2612+ :returns: List of pool name, object count, kb disk space used
2613+ """
2614+ df = self.get_ceph_df(sentry_unit)
2615+ pool_name = df['pools'][pool_id]['name']
2616+ obj_count = df['pools'][pool_id]['stats']['objects']
2617+ kb_used = df['pools'][pool_id]['stats']['kb_used']
2618+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
2619+ '{} kb used'.format(pool_name, pool_id,
2620+ obj_count, kb_used))
2621+ return pool_name, obj_count, kb_used
2622+
2623+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
2624+ """Validate ceph pool samples taken over time, such as pool
2625+ object counts or pool kb used, before adding, after adding, and
2626+ after deleting items which affect those pool attributes. The
2627+ 2nd element is expected to be greater than the 1st; 3rd is expected
2628+ to be less than the 2nd.
2629+
2630+ :param samples: List containing 3 data samples
2631+ :param sample_type: String for logging and usage context
2632+ :returns: None if successful, Failure message otherwise
2633+ """
2634+ original, created, deleted = range(3)
2635+ if samples[created] <= samples[original] or \
2636+ samples[deleted] >= samples[created]:
2637+ return ('Ceph {} samples ({}) '
2638+ 'unexpected.'.format(sample_type, samples))
2639+ else:
2640+ self.log.debug('Ceph {} samples (OK): '
2641+ '{}'.format(sample_type, samples))
2642+ return None
2643+
2644+ # rabbitmq/amqp specific helpers:
2645+
2646+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
2647+ """Wait for rmq units extended status to show cluster readiness,
2648+ after an optional initial sleep period. Initial sleep is likely
2649+ necessary to be effective following a config change, as status
2650+ message may not instantly update to non-ready."""
2651+
2652+ if init_sleep:
2653+ time.sleep(init_sleep)
2654+
2655+ message = re.compile('^Unit is ready and clustered$')
2656+ deployment._auto_wait_for_status(message=message,
2657+ timeout=timeout,
2658+ include_only=['rabbitmq-server'])
2659+
2660+ def add_rmq_test_user(self, sentry_units,
2661+ username="testuser1", password="changeme"):
2662+ """Add a test user via the first rmq juju unit, check connection as
2663+ the new user against all sentry units.
2664+
2665+ :param sentry_units: list of sentry unit pointers
2666+ :param username: amqp user name, default to testuser1
2667+ :param password: amqp user password
2668+ :returns: None if successful. Raise on error.
2669+ """
2670+ self.log.debug('Adding rmq user ({})...'.format(username))
2671+
2672+ # Check that user does not already exist
2673+ cmd_user_list = 'rabbitmqctl list_users'
2674+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2675+ if username in output:
2676+ self.log.warning('User ({}) already exists, returning '
2677+ 'gracefully.'.format(username))
2678+ return
2679+
2680+ perms = '".*" ".*" ".*"'
2681+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
2682+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
2683+
2684+ # Add user via first unit
2685+ for cmd in cmds:
2686+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
2687+
2688+ # Check connection against the other sentry_units
2689+ self.log.debug('Checking user connect against units...')
2690+ for sentry_unit in sentry_units:
2691+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
2692+ username=username,
2693+ password=password)
2694+ connection.close()
2695+
2696+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
2697+ """Delete a rabbitmq user via the first rmq juju unit.
2698+
2699+ :param sentry_units: list of sentry unit pointers
2700+ :param username: amqp user name, default to testuser1
2701+ :param password: amqp user password
2702+ :returns: None if successful or no such user.
2703+ """
2704+ self.log.debug('Deleting rmq user ({})...'.format(username))
2705+
2706+ # Check that the user exists
2707+ cmd_user_list = 'rabbitmqctl list_users'
2708+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2709+
2710+ if username not in output:
2711+ self.log.warning('User ({}) does not exist, returning '
2712+ 'gracefully.'.format(username))
2713+ return
2714+
2715+ # Delete the user
2716+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
2717+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
2718+
2719+ def get_rmq_cluster_status(self, sentry_unit):
2720+ """Execute rabbitmq cluster status command on a unit and return
2721+ the full output.
2722+
2723+ :param unit: sentry unit
2724+ :returns: String containing console output of cluster status command
2725+ """
2726+ cmd = 'rabbitmqctl cluster_status'
2727+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
2728+ self.log.debug('{} cluster_status:\n{}'.format(
2729+ sentry_unit.info['unit_name'], output))
2730+ return str(output)
2731+
2732+ def get_rmq_cluster_running_nodes(self, sentry_unit):
2733+ """Parse rabbitmqctl cluster_status output string, return list of
2734+ running rabbitmq cluster nodes.
2735+
2736+ :param unit: sentry unit
2737+ :returns: List containing node names of running nodes
2738+ """
2739+ # NOTE(beisner): rabbitmqctl cluster_status output is not
2740+ # json-parsable, do string chop foo, then json.loads that.
2741+ str_stat = self.get_rmq_cluster_status(sentry_unit)
2742+ if 'running_nodes' in str_stat:
2743+ pos_start = str_stat.find("{running_nodes,") + 15
2744+ pos_end = str_stat.find("]},", pos_start) + 1
2745+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
2746+ run_nodes = json.loads(str_run_nodes)
2747+ return run_nodes
2748+ else:
2749+ return []
2750+
2751+ def validate_rmq_cluster_running_nodes(self, sentry_units):
2752+ """Check that all rmq unit hostnames are represented in the
2753+ cluster_status output of all units.
2754+
2755+ :param host_names: dict of juju unit names to host names
2756+ :param units: list of sentry unit pointers (all rmq units)
2757+ :returns: None if successful, otherwise return error message
2758+ """
2759+ host_names = self.get_unit_hostnames(sentry_units)
2760+ errors = []
2761+
2762+ # Query every unit for cluster_status running nodes
2763+ for query_unit in sentry_units:
2764+ query_unit_name = query_unit.info['unit_name']
2765+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
2766+
2767+ # Confirm that every unit is represented in the queried unit's
2768+ # cluster_status running nodes output.
2769+ for validate_unit in sentry_units:
2770+ val_host_name = host_names[validate_unit.info['unit_name']]
2771+ val_node_name = 'rabbit@{}'.format(val_host_name)
2772+
2773+ if val_node_name not in running_nodes:
2774+ errors.append('Cluster member check failed on {}: {} not '
2775+ 'in {}\n'.format(query_unit_name,
2776+ val_node_name,
2777+ running_nodes))
2778+ if errors:
2779+ return ''.join(errors)
2780+
2781+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
2782+ """Check a single juju rmq unit for ssl and port in the config file."""
2783+ host = sentry_unit.info['public-address']
2784+ unit_name = sentry_unit.info['unit_name']
2785+
2786+ conf_file = '/etc/rabbitmq/rabbitmq.config'
2787+ conf_contents = str(self.file_contents_safe(sentry_unit,
2788+ conf_file, max_wait=16))
2789+ # Checks
2790+ conf_ssl = 'ssl' in conf_contents
2791+ conf_port = str(port) in conf_contents
2792+
2793+ # Port explicitly checked in config
2794+ if port and conf_port and conf_ssl:
2795+ self.log.debug('SSL is enabled @{}:{} '
2796+ '({})'.format(host, port, unit_name))
2797+ return True
2798+ elif port and not conf_port and conf_ssl:
2799+ self.log.debug('SSL is enabled @{} but not on port {} '
2800+ '({})'.format(host, port, unit_name))
2801+ return False
2802+ # Port not checked (useful when checking that ssl is disabled)
2803+ elif not port and conf_ssl:
2804+ self.log.debug('SSL is enabled @{}:{} '
2805+ '({})'.format(host, port, unit_name))
2806+ return True
2807+ elif not conf_ssl:
2808+ self.log.debug('SSL not enabled @{}:{} '
2809+ '({})'.format(host, port, unit_name))
2810+ return False
2811+ else:
2812+ msg = ('Unknown condition when checking SSL status @{}:{} '
2813+ '({})'.format(host, port, unit_name))
2814+ amulet.raise_status(amulet.FAIL, msg)
2815+
2816+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
2817+ """Check that ssl is enabled on rmq juju sentry units.
2818+
2819+ :param sentry_units: list of all rmq sentry units
2820+ :param port: optional ssl port override to validate
2821+ :returns: None if successful, otherwise return error message
2822+ """
2823+ for sentry_unit in sentry_units:
2824+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
2825+ return ('Unexpected condition: ssl is disabled on unit '
2826+ '({})'.format(sentry_unit.info['unit_name']))
2827+ return None
2828+
2829+ def validate_rmq_ssl_disabled_units(self, sentry_units):
2830+ """Check that ssl is enabled on listed rmq juju sentry units.
2831+
2832+ :param sentry_units: list of all rmq sentry units
2833+ :returns: True if successful. Raise on error.
2834+ """
2835+ for sentry_unit in sentry_units:
2836+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
2837+ return ('Unexpected condition: ssl is enabled on unit '
2838+ '({})'.format(sentry_unit.info['unit_name']))
2839+ return None
2840+
2841+ def configure_rmq_ssl_on(self, sentry_units, deployment,
2842+ port=None, max_wait=60):
2843+ """Turn ssl charm config option on, with optional non-default
2844+ ssl port specification. Confirm that it is enabled on every
2845+ unit.
2846+
2847+ :param sentry_units: list of sentry units
2848+ :param deployment: amulet deployment object pointer
2849+ :param port: amqp port, use defaults if None
2850+ :param max_wait: maximum time to wait in seconds to confirm
2851+ :returns: None if successful. Raise on error.
2852+ """
2853+ self.log.debug('Setting ssl charm config option: on')
2854+
2855+ # Enable RMQ SSL
2856+ config = {'ssl': 'on'}
2857+ if port:
2858+ config['ssl_port'] = port
2859+
2860+ deployment.d.configure('rabbitmq-server', config)
2861+
2862+ # Wait for unit status
2863+ self.rmq_wait_for_cluster(deployment)
2864+
2865+ # Confirm
2866+ tries = 0
2867+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2868+ while ret and tries < (max_wait / 4):
2869+ time.sleep(4)
2870+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2871+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2872+ tries += 1
2873+
2874+ if ret:
2875+ amulet.raise_status(amulet.FAIL, ret)
2876+
2877+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
2878+ """Turn ssl charm config option off, confirm that it is disabled
2879+ on every unit.
2880+
2881+ :param sentry_units: list of sentry units
2882+ :param deployment: amulet deployment object pointer
2883+ :param max_wait: maximum time to wait in seconds to confirm
2884+ :returns: None if successful. Raise on error.
2885+ """
2886+ self.log.debug('Setting ssl charm config option: off')
2887+
2888+ # Disable RMQ SSL
2889+ config = {'ssl': 'off'}
2890+ deployment.d.configure('rabbitmq-server', config)
2891+
2892+ # Wait for unit status
2893+ self.rmq_wait_for_cluster(deployment)
2894+
2895+ # Confirm
2896+ tries = 0
2897+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2898+ while ret and tries < (max_wait / 4):
2899+ time.sleep(4)
2900+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2901+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2902+ tries += 1
2903+
2904+ if ret:
2905+ amulet.raise_status(amulet.FAIL, ret)
2906+
2907+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
2908+ port=None, fatal=True,
2909+ username="testuser1", password="changeme"):
2910+ """Establish and return a pika amqp connection to the rabbitmq service
2911+ running on a rmq juju unit.
2912+
2913+ :param sentry_unit: sentry unit pointer
2914+ :param ssl: boolean, default to False
2915+ :param port: amqp port, use defaults if None
2916+ :param fatal: boolean, default to True (raises on connect error)
2917+ :param username: amqp user name, default to testuser1
2918+ :param password: amqp user password
2919+ :returns: pika amqp connection pointer or None if failed and non-fatal
2920+ """
2921+ host = sentry_unit.info['public-address']
2922+ unit_name = sentry_unit.info['unit_name']
2923+
2924+ # Default port logic if port is not specified
2925+ if ssl and not port:
2926+ port = 5671
2927+ elif not ssl and not port:
2928+ port = 5672
2929+
2930+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
2931+ '{}...'.format(host, port, unit_name, username))
2932+
2933+ try:
2934+ credentials = pika.PlainCredentials(username, password)
2935+ parameters = pika.ConnectionParameters(host=host, port=port,
2936+ credentials=credentials,
2937+ ssl=ssl,
2938+ connection_attempts=3,
2939+ retry_delay=5,
2940+ socket_timeout=1)
2941+ connection = pika.BlockingConnection(parameters)
2942+ assert connection.server_properties['product'] == 'RabbitMQ'
2943+ self.log.debug('Connect OK')
2944+ return connection
2945+ except Exception as e:
2946+ msg = ('amqp connection failed to {}:{} as '
2947+ '{} ({})'.format(host, port, username, str(e)))
2948+ if fatal:
2949+ amulet.raise_status(amulet.FAIL, msg)
2950+ else:
2951+ self.log.warn(msg)
2952+ return None
2953+
2954+ def publish_amqp_message_by_unit(self, sentry_unit, message,
2955+ queue="test", ssl=False,
2956+ username="testuser1",
2957+ password="changeme",
2958+ port=None):
2959+ """Publish an amqp message to a rmq juju unit.
2960+
2961+ :param sentry_unit: sentry unit pointer
2962+ :param message: amqp message string
2963+ :param queue: message queue, default to test
2964+ :param username: amqp user name, default to testuser1
2965+ :param password: amqp user password
2966+ :param ssl: boolean, default to False
2967+ :param port: amqp port, use defaults if None
2968+ :returns: None. Raises exception if publish failed.
2969+ """
2970+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
2971+ message))
2972+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
2973+ port=port,
2974+ username=username,
2975+ password=password)
2976+
2977+ # NOTE(beisner): extra debug here re: pika hang potential:
2978+ # https://github.com/pika/pika/issues/297
2979+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
2980+ self.log.debug('Defining channel...')
2981+ channel = connection.channel()
2982+ self.log.debug('Declaring queue...')
2983+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
2984+ self.log.debug('Publishing message...')
2985+ channel.basic_publish(exchange='', routing_key=queue, body=message)
2986+ self.log.debug('Closing channel...')
2987+ channel.close()
2988+ self.log.debug('Closing connection...')
2989+ connection.close()
2990+
2991+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
2992+ username="testuser1",
2993+ password="changeme",
2994+ ssl=False, port=None):
2995+ """Get an amqp message from a rmq juju unit.
2996+
2997+ :param sentry_unit: sentry unit pointer
2998+ :param queue: message queue, default to test
2999+ :param username: amqp user name, default to testuser1
3000+ :param password: amqp user password
3001+ :param ssl: boolean, default to False
3002+ :param port: amqp port, use defaults if None
3003+ :returns: amqp message body as string. Raise if get fails.
3004+ """
3005+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
3006+ port=port,
3007+ username=username,
3008+ password=password)
3009+ channel = connection.channel()
3010+ method_frame, _, body = channel.basic_get(queue)
3011+
3012+ if method_frame:
3013+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
3014+ body))
3015+ channel.basic_ack(method_frame.delivery_tag)
3016+ channel.close()
3017+ connection.close()
3018+ return body
3019+ else:
3020+ msg = 'No message retrieved.'
3021+ amulet.raise_status(amulet.FAIL, msg)
3022
3023=== added file 'charmhelpers.new/contrib/openstack/context.py'
3024--- charmhelpers.new/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
3025+++ charmhelpers.new/contrib/openstack/context.py 2015-11-24 19:47:41 +0000
3026@@ -0,0 +1,1457 @@
3027+# Copyright 2014-2015 Canonical Limited.
3028+#
3029+# This file is part of charm-helpers.
3030+#
3031+# charm-helpers is free software: you can redistribute it and/or modify
3032+# it under the terms of the GNU Lesser General Public License version 3 as
3033+# published by the Free Software Foundation.
3034+#
3035+# charm-helpers is distributed in the hope that it will be useful,
3036+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3037+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3038+# GNU Lesser General Public License for more details.
3039+#
3040+# You should have received a copy of the GNU Lesser General Public License
3041+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3042+
3043+import glob
3044+import json
3045+import os
3046+import re
3047+import time
3048+from base64 import b64decode
3049+from subprocess import check_call
3050+
3051+import six
3052+import yaml
3053+
3054+from charmhelpers.fetch import (
3055+ apt_install,
3056+ filter_installed_packages,
3057+)
3058+from charmhelpers.core.hookenv import (
3059+ config,
3060+ is_relation_made,
3061+ local_unit,
3062+ log,
3063+ relation_get,
3064+ relation_ids,
3065+ related_units,
3066+ relation_set,
3067+ unit_get,
3068+ unit_private_ip,
3069+ charm_name,
3070+ DEBUG,
3071+ INFO,
3072+ WARNING,
3073+ ERROR,
3074+)
3075+
3076+from charmhelpers.core.sysctl import create as sysctl_create
3077+from charmhelpers.core.strutils import bool_from_string
3078+
3079+from charmhelpers.core.host import (
3080+ get_bond_master,
3081+ is_phy_iface,
3082+ list_nics,
3083+ get_nic_hwaddr,
3084+ mkdir,
3085+ write_file,
3086+)
3087+from charmhelpers.contrib.hahelpers.cluster import (
3088+ determine_apache_port,
3089+ determine_api_port,
3090+ https,
3091+ is_clustered,
3092+)
3093+from charmhelpers.contrib.hahelpers.apache import (
3094+ get_cert,
3095+ get_ca_cert,
3096+ install_ca_cert,
3097+)
3098+from charmhelpers.contrib.openstack.neutron import (
3099+ neutron_plugin_attribute,
3100+ parse_data_port_mappings,
3101+)
3102+from charmhelpers.contrib.openstack.ip import (
3103+ resolve_address,
3104+ INTERNAL,
3105+)
3106+from charmhelpers.contrib.network.ip import (
3107+ get_address_in_network,
3108+ get_ipv4_addr,
3109+ get_ipv6_addr,
3110+ get_netmask_for_address,
3111+ format_ipv6_addr,
3112+ is_address_in_network,
3113+ is_bridge_member,
3114+)
3115+from charmhelpers.contrib.openstack.utils import get_host_ip
3116+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
3117+ADDRESS_TYPES = ['admin', 'internal', 'public']
3118+
3119+
3120+class OSContextError(Exception):
3121+ pass
3122+
3123+
3124+def ensure_packages(packages):
3125+ """Install but do not upgrade required plugin packages."""
3126+ required = filter_installed_packages(packages)
3127+ if required:
3128+ apt_install(required, fatal=True)
3129+
3130+
3131+def context_complete(ctxt):
3132+ _missing = []
3133+ for k, v in six.iteritems(ctxt):
3134+ if v is None or v == '':
3135+ _missing.append(k)
3136+
3137+ if _missing:
3138+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
3139+ return False
3140+
3141+ return True
3142+
3143+
3144+def config_flags_parser(config_flags):
3145+ """Parses config flags string into dict.
3146+
3147+ This parsing method supports a few different formats for the config
3148+ flag values to be parsed:
3149+
3150+ 1. A string in the simple format of key=value pairs, with the possibility
3151+ of specifying multiple key value pairs within the same string. For
3152+ example, a string in the format of 'key1=value1, key2=value2' will
3153+ return a dict of:
3154+
3155+ {'key1': 'value1',
3156+ 'key2': 'value2'}.
3157+
3158+ 2. A string in the above format, but supporting a comma-delimited list
3159+ of values for the same key. For example, a string in the format of
3160+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
3161+
3162+ {'key1', 'value1',
3163+ 'key2', 'value2,value3,value4'}
3164+
3165+ 3. A string containing a colon character (:) prior to an equal
3166+ character (=) will be treated as yaml and parsed as such. This can be
3167+ used to specify more complex key value pairs. For example,
3168+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
3169+ return a dict of:
3170+
3171+ {'key1', 'subkey1=value1, subkey2=value2'}
3172+
3173+ The provided config_flags string may be a list of comma-separated values
3174+ which themselves may be comma-separated list of values.
3175+ """
3176+ # If we find a colon before an equals sign then treat it as yaml.
3177+ # Note: limit it to finding the colon first since this indicates assignment
3178+ # for inline yaml.
3179+ colon = config_flags.find(':')
3180+ equals = config_flags.find('=')
3181+ if colon > 0:
3182+ if colon < equals or equals < 0:
3183+ return yaml.safe_load(config_flags)
3184+
3185+ if config_flags.find('==') >= 0:
3186+ log("config_flags is not in expected format (key=value)", level=ERROR)
3187+ raise OSContextError
3188+
3189+ # strip the following from each value.
3190+ post_strippers = ' ,'
3191+ # we strip any leading/trailing '=' or ' ' from the string then
3192+ # split on '='.
3193+ split = config_flags.strip(' =').split('=')
3194+ limit = len(split)
3195+ flags = {}
3196+ for i in range(0, limit - 1):
3197+ current = split[i]
3198+ next = split[i + 1]
3199+ vindex = next.rfind(',')
3200+ if (i == limit - 2) or (vindex < 0):
3201+ value = next
3202+ else:
3203+ value = next[:vindex]
3204+
3205+ if i == 0:
3206+ key = current
3207+ else:
3208+ # if this not the first entry, expect an embedded key.
3209+ index = current.rfind(',')
3210+ if index < 0:
3211+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
3212+ raise OSContextError
3213+ key = current[index + 1:]
3214+
3215+ # Add to collection.
3216+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
3217+
3218+ return flags
3219+
3220+
3221+class OSContextGenerator(object):
3222+ """Base class for all context generators."""
3223+ interfaces = []
3224+ related = False
3225+ complete = False
3226+ missing_data = []
3227+
3228+ def __call__(self):
3229+ raise NotImplementedError
3230+
3231+ def context_complete(self, ctxt):
3232+ """Check for missing data for the required context data.
3233+ Set self.missing_data if it exists and return False.
3234+ Set self.complete if no missing data and return True.
3235+ """
3236+ # Fresh start
3237+ self.complete = False
3238+ self.missing_data = []
3239+ for k, v in six.iteritems(ctxt):
3240+ if v is None or v == '':
3241+ if k not in self.missing_data:
3242+ self.missing_data.append(k)
3243+
3244+ if self.missing_data:
3245+ self.complete = False
3246+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
3247+ else:
3248+ self.complete = True
3249+ return self.complete
3250+
3251+ def get_related(self):
3252+ """Check if any of the context interfaces have relation ids.
3253+ Set self.related and return True if one of the interfaces
3254+ has relation ids.
3255+ """
3256+ # Fresh start
3257+ self.related = False
3258+ try:
3259+ for interface in self.interfaces:
3260+ if relation_ids(interface):
3261+ self.related = True
3262+ return self.related
3263+ except AttributeError as e:
3264+ log("{} {}"
3265+ "".format(self, e), 'INFO')
3266+ return self.related
3267+
3268+
3269+class SharedDBContext(OSContextGenerator):
3270+ interfaces = ['shared-db']
3271+
3272+ def __init__(self,
3273+ database=None, user=None, relation_prefix=None, ssl_dir=None):
3274+ """Allows inspecting relation for settings prefixed with
3275+ relation_prefix. This is useful for parsing access for multiple
3276+ databases returned via the shared-db interface (eg, nova_password,
3277+ quantum_password)
3278+ """
3279+ self.relation_prefix = relation_prefix
3280+ self.database = database
3281+ self.user = user
3282+ self.ssl_dir = ssl_dir
3283+ self.rel_name = self.interfaces[0]
3284+
3285+ def __call__(self):
3286+ self.database = self.database or config('database')
3287+ self.user = self.user or config('database-user')
3288+ if None in [self.database, self.user]:
3289+ log("Could not generate shared_db context. Missing required charm "
3290+ "config options. (database name and user)", level=ERROR)
3291+ raise OSContextError
3292+
3293+ ctxt = {}
3294+
3295+ # NOTE(jamespage) if mysql charm provides a network upon which
3296+ # access to the database should be made, reconfigure relation
3297+ # with the service units local address and defer execution
3298+ access_network = relation_get('access-network')
3299+ if access_network is not None:
3300+ if self.relation_prefix is not None:
3301+ hostname_key = "{}_hostname".format(self.relation_prefix)
3302+ else:
3303+ hostname_key = "hostname"
3304+ access_hostname = get_address_in_network(access_network,
3305+ unit_get('private-address'))
3306+ set_hostname = relation_get(attribute=hostname_key,
3307+ unit=local_unit())
3308+ if set_hostname != access_hostname:
3309+ relation_set(relation_settings={hostname_key: access_hostname})
3310+ return None # Defer any further hook execution for now....
3311+
3312+ password_setting = 'password'
3313+ if self.relation_prefix:
3314+ password_setting = self.relation_prefix + '_password'
3315+
3316+ for rid in relation_ids(self.interfaces[0]):
3317+ self.related = True
3318+ for unit in related_units(rid):
3319+ rdata = relation_get(rid=rid, unit=unit)
3320+ host = rdata.get('db_host')
3321+ host = format_ipv6_addr(host) or host
3322+ ctxt = {
3323+ 'database_host': host,
3324+ 'database': self.database,
3325+ 'database_user': self.user,
3326+ 'database_password': rdata.get(password_setting),
3327+ 'database_type': 'mysql'
3328+ }
3329+ if self.context_complete(ctxt):
3330+ db_ssl(rdata, ctxt, self.ssl_dir)
3331+ return ctxt
3332+ return {}
3333+
3334+
3335+class PostgresqlDBContext(OSContextGenerator):
3336+ interfaces = ['pgsql-db']
3337+
3338+ def __init__(self, database=None):
3339+ self.database = database
3340+
3341+ def __call__(self):
3342+ self.database = self.database or config('database')
3343+ if self.database is None:
3344+ log('Could not generate postgresql_db context. Missing required '
3345+ 'charm config options. (database name)', level=ERROR)
3346+ raise OSContextError
3347+
3348+ ctxt = {}
3349+ for rid in relation_ids(self.interfaces[0]):
3350+ self.related = True
3351+ for unit in related_units(rid):
3352+ rel_host = relation_get('host', rid=rid, unit=unit)
3353+ rel_user = relation_get('user', rid=rid, unit=unit)
3354+ rel_passwd = relation_get('password', rid=rid, unit=unit)
3355+ ctxt = {'database_host': rel_host,
3356+ 'database': self.database,
3357+ 'database_user': rel_user,
3358+ 'database_password': rel_passwd,
3359+ 'database_type': 'postgresql'}
3360+ if self.context_complete(ctxt):
3361+ return ctxt
3362+
3363+ return {}
3364+
3365+
3366+def db_ssl(rdata, ctxt, ssl_dir):
3367+ if 'ssl_ca' in rdata and ssl_dir:
3368+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
3369+ with open(ca_path, 'w') as fh:
3370+ fh.write(b64decode(rdata['ssl_ca']))
3371+
3372+ ctxt['database_ssl_ca'] = ca_path
3373+ elif 'ssl_ca' in rdata:
3374+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
3375+ return ctxt
3376+
3377+ if 'ssl_cert' in rdata:
3378+ cert_path = os.path.join(
3379+ ssl_dir, 'db-client.cert')
3380+ if not os.path.exists(cert_path):
3381+ log("Waiting 1m for ssl client cert validity", level=INFO)
3382+ time.sleep(60)
3383+
3384+ with open(cert_path, 'w') as fh:
3385+ fh.write(b64decode(rdata['ssl_cert']))
3386+
3387+ ctxt['database_ssl_cert'] = cert_path
3388+ key_path = os.path.join(ssl_dir, 'db-client.key')
3389+ with open(key_path, 'w') as fh:
3390+ fh.write(b64decode(rdata['ssl_key']))
3391+
3392+ ctxt['database_ssl_key'] = key_path
3393+
3394+ return ctxt
3395+
3396+
3397+class IdentityServiceContext(OSContextGenerator):
3398+
3399+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
3400+ self.service = service
3401+ self.service_user = service_user
3402+ self.rel_name = rel_name
3403+ self.interfaces = [self.rel_name]
3404+
3405+ def __call__(self):
3406+ log('Generating template context for ' + self.rel_name, level=DEBUG)
3407+ ctxt = {}
3408+
3409+ if self.service and self.service_user:
3410+ # This is required for pki token signing if we don't want /tmp to
3411+ # be used.
3412+ cachedir = '/var/cache/%s' % (self.service)
3413+ if not os.path.isdir(cachedir):
3414+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
3415+ mkdir(path=cachedir, owner=self.service_user,
3416+ group=self.service_user, perms=0o700)
3417+
3418+ ctxt['signing_dir'] = cachedir
3419+
3420+ for rid in relation_ids(self.rel_name):
3421+ self.related = True
3422+ for unit in related_units(rid):
3423+ rdata = relation_get(rid=rid, unit=unit)
3424+ serv_host = rdata.get('service_host')
3425+ serv_host = format_ipv6_addr(serv_host) or serv_host
3426+ auth_host = rdata.get('auth_host')
3427+ auth_host = format_ipv6_addr(auth_host) or auth_host
3428+ svc_protocol = rdata.get('service_protocol') or 'http'
3429+ auth_protocol = rdata.get('auth_protocol') or 'http'
3430+ ctxt.update({'service_port': rdata.get('service_port'),
3431+ 'service_host': serv_host,
3432+ 'auth_host': auth_host,
3433+ 'auth_port': rdata.get('auth_port'),
3434+ 'admin_tenant_name': rdata.get('service_tenant'),
3435+ 'admin_user': rdata.get('service_username'),
3436+ 'admin_password': rdata.get('service_password'),
3437+ 'service_protocol': svc_protocol,
3438+ 'auth_protocol': auth_protocol})
3439+
3440+ if self.context_complete(ctxt):
3441+ # NOTE(jamespage) this is required for >= icehouse
3442+ # so a missing value just indicates keystone needs
3443+ # upgrading
3444+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
3445+ return ctxt
3446+
3447+ return {}
3448+
3449+
3450+class AMQPContext(OSContextGenerator):
3451+
3452+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
3453+ self.ssl_dir = ssl_dir
3454+ self.rel_name = rel_name
3455+ self.relation_prefix = relation_prefix
3456+ self.interfaces = [rel_name]
3457+
3458+ def __call__(self):
3459+ log('Generating template context for amqp', level=DEBUG)
3460+ conf = config()
3461+ if self.relation_prefix:
3462+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
3463+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
3464+ else:
3465+ user_setting = 'rabbit-user'
3466+ vhost_setting = 'rabbit-vhost'
3467+
3468+ try:
3469+ username = conf[user_setting]
3470+ vhost = conf[vhost_setting]
3471+ except KeyError as e:
3472+ log('Could not generate shared_db context. Missing required charm '
3473+ 'config options: %s.' % e, level=ERROR)
3474+ raise OSContextError
3475+
3476+ ctxt = {}
3477+ for rid in relation_ids(self.rel_name):
3478+ ha_vip_only = False
3479+ self.related = True
3480+ for unit in related_units(rid):
3481+ if relation_get('clustered', rid=rid, unit=unit):
3482+ ctxt['clustered'] = True
3483+ vip = relation_get('vip', rid=rid, unit=unit)
3484+ vip = format_ipv6_addr(vip) or vip
3485+ ctxt['rabbitmq_host'] = vip
3486+ else:
3487+ host = relation_get('private-address', rid=rid, unit=unit)
3488+ host = format_ipv6_addr(host) or host
3489+ ctxt['rabbitmq_host'] = host
3490+
3491+ ctxt.update({
3492+ 'rabbitmq_user': username,
3493+ 'rabbitmq_password': relation_get('password', rid=rid,
3494+ unit=unit),
3495+ 'rabbitmq_virtual_host': vhost,
3496+ })
3497+
3498+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
3499+ if ssl_port:
3500+ ctxt['rabbit_ssl_port'] = ssl_port
3501+
3502+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
3503+ if ssl_ca:
3504+ ctxt['rabbit_ssl_ca'] = ssl_ca
3505+
3506+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
3507+ ctxt['rabbitmq_ha_queues'] = True
3508+
3509+ ha_vip_only = relation_get('ha-vip-only',
3510+ rid=rid, unit=unit) is not None
3511+
3512+ if self.context_complete(ctxt):
3513+ if 'rabbit_ssl_ca' in ctxt:
3514+ if not self.ssl_dir:
3515+ log("Charm not setup for ssl support but ssl ca "
3516+ "found", level=INFO)
3517+ break
3518+
3519+ ca_path = os.path.join(
3520+ self.ssl_dir, 'rabbit-client-ca.pem')
3521+ with open(ca_path, 'w') as fh:
3522+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
3523+ ctxt['rabbit_ssl_ca'] = ca_path
3524+
3525+ # Sufficient information found = break out!
3526+ break
3527+
3528+ # Used for active/active rabbitmq >= grizzly
3529+ if (('clustered' not in ctxt or ha_vip_only) and
3530+ len(related_units(rid)) > 1):
3531+ rabbitmq_hosts = []
3532+ for unit in related_units(rid):
3533+ host = relation_get('private-address', rid=rid, unit=unit)
3534+ host = format_ipv6_addr(host) or host
3535+ rabbitmq_hosts.append(host)
3536+
3537+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
3538+
3539+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
3540+ if oslo_messaging_flags:
3541+ ctxt['oslo_messaging_flags'] = config_flags_parser(
3542+ oslo_messaging_flags)
3543+
3544+ if not self.complete:
3545+ return {}
3546+
3547+ return ctxt
3548+
3549+
3550+class CephContext(OSContextGenerator):
3551+ """Generates context for /etc/ceph/ceph.conf templates."""
3552+ interfaces = ['ceph']
3553+
3554+ def __call__(self):
3555+ if not relation_ids('ceph'):
3556+ return {}
3557+
3558+ log('Generating template context for ceph', level=DEBUG)
3559+ mon_hosts = []
3560+ ctxt = {
3561+ 'use_syslog': str(config('use-syslog')).lower()
3562+ }
3563+ for rid in relation_ids('ceph'):
3564+ for unit in related_units(rid):
3565+ if not ctxt.get('auth'):
3566+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
3567+ if not ctxt.get('key'):
3568+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
3569+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
3570+ unit=unit)
3571+ unit_priv_addr = relation_get('private-address', rid=rid,
3572+ unit=unit)
3573+ ceph_addr = ceph_pub_addr or unit_priv_addr
3574+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
3575+ mon_hosts.append(ceph_addr)
3576+
3577+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
3578+
3579+ if not os.path.isdir('/etc/ceph'):
3580+ os.mkdir('/etc/ceph')
3581+
3582+ if not self.context_complete(ctxt):
3583+ return {}
3584+
3585+ ensure_packages(['ceph-common'])
3586+ return ctxt
3587+
3588+
3589+class HAProxyContext(OSContextGenerator):
3590+ """Provides half a context for the haproxy template, which describes
3591+ all peers to be included in the cluster. Each charm needs to include
3592+ its own context generator that describes the port mapping.
3593+ """
3594+ interfaces = ['cluster']
3595+
3596+ def __init__(self, singlenode_mode=False):
3597+ self.singlenode_mode = singlenode_mode
3598+
3599+ def __call__(self):
3600+ if not relation_ids('cluster') and not self.singlenode_mode:
3601+ return {}
3602+
3603+ if config('prefer-ipv6'):
3604+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
3605+ else:
3606+ addr = get_host_ip(unit_get('private-address'))
3607+
3608+ l_unit = local_unit().replace('/', '-')
3609+ cluster_hosts = {}
3610+
3611+ # NOTE(jamespage): build out map of configured network endpoints
3612+ # and associated backends
3613+ for addr_type in ADDRESS_TYPES:
3614+ cfg_opt = 'os-{}-network'.format(addr_type)
3615+ laddr = get_address_in_network(config(cfg_opt))
3616+ if laddr:
3617+ netmask = get_netmask_for_address(laddr)
3618+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
3619+ netmask),
3620+ 'backends': {l_unit: laddr}}
3621+ for rid in relation_ids('cluster'):
3622+ for unit in related_units(rid):
3623+ _laddr = relation_get('{}-address'.format(addr_type),
3624+ rid=rid, unit=unit)
3625+ if _laddr:
3626+ _unit = unit.replace('/', '-')
3627+ cluster_hosts[laddr]['backends'][_unit] = _laddr
3628+
3629+ # NOTE(jamespage) add backend based on private address - this
3630+ # with either be the only backend or the fallback if no acls
3631+ # match in the frontend
3632+ cluster_hosts[addr] = {}
3633+ netmask = get_netmask_for_address(addr)
3634+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
3635+ 'backends': {l_unit: addr}}
3636+ for rid in relation_ids('cluster'):
3637+ for unit in related_units(rid):
3638+ _laddr = relation_get('private-address',
3639+ rid=rid, unit=unit)
3640+ if _laddr:
3641+ _unit = unit.replace('/', '-')
3642+ cluster_hosts[addr]['backends'][_unit] = _laddr
3643+
3644+ ctxt = {
3645+ 'frontends': cluster_hosts,
3646+ 'default_backend': addr
3647+ }
3648+
3649+ if config('haproxy-server-timeout'):
3650+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
3651+
3652+ if config('haproxy-client-timeout'):
3653+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
3654+
3655+ if config('prefer-ipv6'):
3656+ ctxt['ipv6'] = True
3657+ ctxt['local_host'] = 'ip6-localhost'
3658+ ctxt['haproxy_host'] = '::'
3659+ ctxt['stat_port'] = ':::8888'
3660+ else:
3661+ ctxt['local_host'] = '127.0.0.1'
3662+ ctxt['haproxy_host'] = '0.0.0.0'
3663+ ctxt['stat_port'] = ':8888'
3664+
3665+ for frontend in cluster_hosts:
3666+ if (len(cluster_hosts[frontend]['backends']) > 1 or
3667+ self.singlenode_mode):
3668+ # Enable haproxy when we have enough peers.
3669+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
3670+ level=DEBUG)
3671+ with open('/etc/default/haproxy', 'w') as out:
3672+ out.write('ENABLED=1\n')
3673+
3674+ return ctxt
3675+
3676+ log('HAProxy context is incomplete, this unit has no peers.',
3677+ level=INFO)
3678+ return {}
3679+
3680+
3681+class ImageServiceContext(OSContextGenerator):
3682+ interfaces = ['image-service']
3683+
3684+ def __call__(self):
3685+ """Obtains the glance API server from the image-service relation.
3686+ Useful in nova and cinder (currently).
3687+ """
3688+ log('Generating template context for image-service.', level=DEBUG)
3689+ rids = relation_ids('image-service')
3690+ if not rids:
3691+ return {}
3692+
3693+ for rid in rids:
3694+ for unit in related_units(rid):
3695+ api_server = relation_get('glance-api-server',
3696+ rid=rid, unit=unit)
3697+ if api_server:
3698+ return {'glance_api_servers': api_server}
3699+
3700+ log("ImageService context is incomplete. Missing required relation "
3701+ "data.", level=INFO)
3702+ return {}
3703+
3704+
3705+class ApacheSSLContext(OSContextGenerator):
3706+ """Generates a context for an apache vhost configuration that configures
3707+ HTTPS reverse proxying for one or many endpoints. Generated context
3708+ looks something like::
3709+
3710+ {
3711+ 'namespace': 'cinder',
3712+ 'private_address': 'iscsi.mycinderhost.com',
3713+ 'endpoints': [(8776, 8766), (8777, 8767)]
3714+ }
3715+
3716+ The endpoints list consists of a tuples mapping external ports
3717+ to internal ports.
3718+ """
3719+ interfaces = ['https']
3720+
3721+ # charms should inherit this context and set external ports
3722+ # and service namespace accordingly.
3723+ external_ports = []
3724+ service_namespace = None
3725+
3726+ def enable_modules(self):
3727+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
3728+ check_call(cmd)
3729+
3730+ def configure_cert(self, cn=None):
3731+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
3732+ mkdir(path=ssl_dir)
3733+ cert, key = get_cert(cn)
3734+ if cn:
3735+ cert_filename = 'cert_{}'.format(cn)
3736+ key_filename = 'key_{}'.format(cn)
3737+ else:
3738+ cert_filename = 'cert'
3739+ key_filename = 'key'
3740+
3741+ write_file(path=os.path.join(ssl_dir, cert_filename),
3742+ content=b64decode(cert))
3743+ write_file(path=os.path.join(ssl_dir, key_filename),
3744+ content=b64decode(key))
3745+
3746+ def configure_ca(self):
3747+ ca_cert = get_ca_cert()
3748+ if ca_cert:
3749+ install_ca_cert(b64decode(ca_cert))
3750+
3751+ def canonical_names(self):
3752+ """Figure out which canonical names clients will access this service.
3753+ """
3754+ cns = []
3755+ for r_id in relation_ids('identity-service'):
3756+ for unit in related_units(r_id):
3757+ rdata = relation_get(rid=r_id, unit=unit)
3758+ for k in rdata:
3759+ if k.startswith('ssl_key_'):
3760+ cns.append(k.lstrip('ssl_key_'))
3761+
3762+ return sorted(list(set(cns)))
3763+
3764+ def get_network_addresses(self):
3765+ """For each network configured, return corresponding address and vip
3766+ (if available).
3767+
3768+ Returns a list of tuples of the form:
3769+
3770+ [(address_in_net_a, vip_in_net_a),
3771+ (address_in_net_b, vip_in_net_b),
3772+ ...]
3773+
3774+ or, if no vip(s) available:
3775+
3776+ [(address_in_net_a, address_in_net_a),
3777+ (address_in_net_b, address_in_net_b),
3778+ ...]
3779+ """
3780+ addresses = []
3781+ if config('vip'):
3782+ vips = config('vip').split()
3783+ else:
3784+ vips = []
3785+
3786+ for net_type in ['os-internal-network', 'os-admin-network',
3787+ 'os-public-network']:
3788+ addr = get_address_in_network(config(net_type),
3789+ unit_get('private-address'))
3790+ if len(vips) > 1 and is_clustered():
3791+ if not config(net_type):
3792+ log("Multiple networks configured but net_type "
3793+ "is None (%s)." % net_type, level=WARNING)
3794+ continue
3795+
3796+ for vip in vips:
3797+ if is_address_in_network(config(net_type), vip):
3798+ addresses.append((addr, vip))
3799+ break
3800+
3801+ elif is_clustered() and config('vip'):
3802+ addresses.append((addr, config('vip')))
3803+ else:
3804+ addresses.append((addr, addr))
3805+
3806+ return sorted(addresses)
3807+
3808+ def __call__(self):
3809+ if isinstance(self.external_ports, six.string_types):
3810+ self.external_ports = [self.external_ports]
3811+
3812+ if not self.external_ports or not https():
3813+ return {}
3814+
3815+ self.configure_ca()
3816+ self.enable_modules()
3817+
3818+ ctxt = {'namespace': self.service_namespace,
3819+ 'endpoints': [],
3820+ 'ext_ports': []}
3821+
3822+ cns = self.canonical_names()
3823+ if cns:
3824+ for cn in cns:
3825+ self.configure_cert(cn)
3826+ else:
3827+ # Expect cert/key provided in config (currently assumed that ca
3828+ # uses ip for cn)
3829+ cn = resolve_address(endpoint_type=INTERNAL)
3830+ self.configure_cert(cn)
3831+
3832+ addresses = self.get_network_addresses()
3833+ for address, endpoint in sorted(set(addresses)):
3834+ for api_port in self.external_ports:
3835+ ext_port = determine_apache_port(api_port,
3836+ singlenode_mode=True)
3837+ int_port = determine_api_port(api_port, singlenode_mode=True)
3838+ portmap = (address, endpoint, int(ext_port), int(int_port))
3839+ ctxt['endpoints'].append(portmap)
3840+ ctxt['ext_ports'].append(int(ext_port))
3841+
3842+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
3843+ return ctxt
3844+
3845+
3846+class NeutronContext(OSContextGenerator):
3847+ interfaces = []
3848+
3849+ @property
3850+ def plugin(self):
3851+ return None
3852+
3853+ @property
3854+ def network_manager(self):
3855+ return None
3856+
3857+ @property
3858+ def packages(self):
3859+ return neutron_plugin_attribute(self.plugin, 'packages',
3860+ self.network_manager)
3861+
3862+ @property
3863+ def neutron_security_groups(self):
3864+ return None
3865+
3866+ def _ensure_packages(self):
3867+ for pkgs in self.packages:
3868+ ensure_packages(pkgs)
3869+
3870+ def _save_flag_file(self):
3871+ if self.network_manager == 'quantum':
3872+ _file = '/etc/nova/quantum_plugin.conf'
3873+ else:
3874+ _file = '/etc/nova/neutron_plugin.conf'
3875+
3876+ with open(_file, 'wb') as out:
3877+ out.write(self.plugin + '\n')
3878+
3879+ def ovs_ctxt(self):
3880+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3881+ self.network_manager)
3882+ config = neutron_plugin_attribute(self.plugin, 'config',
3883+ self.network_manager)
3884+ ovs_ctxt = {'core_plugin': driver,
3885+ 'neutron_plugin': 'ovs',
3886+ 'neutron_security_groups': self.neutron_security_groups,
3887+ 'local_ip': unit_private_ip(),
3888+ 'config': config}
3889+
3890+ return ovs_ctxt
3891+
3892+ def nuage_ctxt(self):
3893+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3894+ self.network_manager)
3895+ config = neutron_plugin_attribute(self.plugin, 'config',
3896+ self.network_manager)
3897+ nuage_ctxt = {'core_plugin': driver,
3898+ 'neutron_plugin': 'vsp',
3899+ 'neutron_security_groups': self.neutron_security_groups,
3900+ 'local_ip': unit_private_ip(),
3901+ 'config': config}
3902+
3903+ return nuage_ctxt
3904+
3905+ def nvp_ctxt(self):
3906+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3907+ self.network_manager)
3908+ config = neutron_plugin_attribute(self.plugin, 'config',
3909+ self.network_manager)
3910+ nvp_ctxt = {'core_plugin': driver,
3911+ 'neutron_plugin': 'nvp',
3912+ 'neutron_security_groups': self.neutron_security_groups,
3913+ 'local_ip': unit_private_ip(),
3914+ 'config': config}
3915+
3916+ return nvp_ctxt
3917+
3918+ def n1kv_ctxt(self):
3919+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3920+ self.network_manager)
3921+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
3922+ self.network_manager)
3923+ n1kv_user_config_flags = config('n1kv-config-flags')
3924+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
3925+ n1kv_ctxt = {'core_plugin': driver,
3926+ 'neutron_plugin': 'n1kv',
3927+ 'neutron_security_groups': self.neutron_security_groups,
3928+ 'local_ip': unit_private_ip(),
3929+ 'config': n1kv_config,
3930+ 'vsm_ip': config('n1kv-vsm-ip'),
3931+ 'vsm_username': config('n1kv-vsm-username'),
3932+ 'vsm_password': config('n1kv-vsm-password'),
3933+ 'restrict_policy_profiles': restrict_policy_profiles}
3934+
3935+ if n1kv_user_config_flags:
3936+ flags = config_flags_parser(n1kv_user_config_flags)
3937+ n1kv_ctxt['user_config_flags'] = flags
3938+
3939+ return n1kv_ctxt
3940+
3941+ def calico_ctxt(self):
3942+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3943+ self.network_manager)
3944+ config = neutron_plugin_attribute(self.plugin, 'config',
3945+ self.network_manager)
3946+ calico_ctxt = {'core_plugin': driver,
3947+ 'neutron_plugin': 'Calico',
3948+ 'neutron_security_groups': self.neutron_security_groups,
3949+ 'local_ip': unit_private_ip(),
3950+ 'config': config}
3951+
3952+ return calico_ctxt
3953+
3954+ def neutron_ctxt(self):
3955+ if https():
3956+ proto = 'https'
3957+ else:
3958+ proto = 'http'
3959+
3960+ if is_clustered():
3961+ host = config('vip')
3962+ else:
3963+ host = unit_get('private-address')
3964+
3965+ ctxt = {'network_manager': self.network_manager,
3966+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
3967+ return ctxt
3968+
3969+ def pg_ctxt(self):
3970+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3971+ self.network_manager)
3972+ config = neutron_plugin_attribute(self.plugin, 'config',
3973+ self.network_manager)
3974+ ovs_ctxt = {'core_plugin': driver,
3975+ 'neutron_plugin': 'plumgrid',
3976+ 'neutron_security_groups': self.neutron_security_groups,
3977+ 'local_ip': unit_private_ip(),
3978+ 'config': config}
3979+ return ovs_ctxt
3980+
3981+ def midonet_ctxt(self):
3982+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3983+ self.network_manager)
3984+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
3985+ self.network_manager)
3986+ mido_ctxt = {'core_plugin': driver,
3987+ 'neutron_plugin': 'midonet',
3988+ 'neutron_security_groups': self.neutron_security_groups,
3989+ 'local_ip': unit_private_ip(),
3990+ 'config': midonet_config}
3991+
3992+ return mido_ctxt
3993+
3994+ def __call__(self):
3995+ if self.network_manager not in ['quantum', 'neutron']:
3996+ return {}
3997+
3998+ if not self.plugin:
3999+ return {}
4000+
4001+ ctxt = self.neutron_ctxt()
4002+
4003+ if self.plugin == 'ovs':
4004+ ctxt.update(self.ovs_ctxt())
4005+ elif self.plugin in ['nvp', 'nsx']:
4006+ ctxt.update(self.nvp_ctxt())
4007+ elif self.plugin == 'n1kv':
4008+ ctxt.update(self.n1kv_ctxt())
4009+ elif self.plugin == 'Calico':
4010+ ctxt.update(self.calico_ctxt())
4011+ elif self.plugin == 'vsp':
4012+ ctxt.update(self.nuage_ctxt())
4013+ elif self.plugin == 'plumgrid':
4014+ ctxt.update(self.pg_ctxt())
4015+ elif self.plugin == 'midonet':
4016+ ctxt.update(self.midonet_ctxt())
4017+
4018+ alchemy_flags = config('neutron-alchemy-flags')
4019+ if alchemy_flags:
4020+ flags = config_flags_parser(alchemy_flags)
4021+ ctxt['neutron_alchemy_flags'] = flags
4022+
4023+ self._save_flag_file()
4024+ return ctxt
4025+
4026+
4027+class NeutronPortContext(OSContextGenerator):
4028+
4029+ def resolve_ports(self, ports):
4030+ """Resolve NICs not yet bound to bridge(s)
4031+
4032+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
4033+ """
4034+ if not ports:
4035+ return None
4036+
4037+ hwaddr_to_nic = {}
4038+ hwaddr_to_ip = {}
4039+ for nic in list_nics():
4040+ # Ignore virtual interfaces (bond masters will be identified from
4041+ # their slaves)
4042+ if not is_phy_iface(nic):
4043+ continue
4044+
4045+ _nic = get_bond_master(nic)
4046+ if _nic:
4047+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
4048+ level=DEBUG)
4049+ nic = _nic
4050+
4051+ hwaddr = get_nic_hwaddr(nic)
4052+ hwaddr_to_nic[hwaddr] = nic
4053+ addresses = get_ipv4_addr(nic, fatal=False)
4054+ addresses += get_ipv6_addr(iface=nic, fatal=False)
4055+ hwaddr_to_ip[hwaddr] = addresses
4056+
4057+ resolved = []
4058+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
4059+ for entry in ports:
4060+ if re.match(mac_regex, entry):
4061+ # NIC is in known NICs and does NOT hace an IP address
4062+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
4063+ # If the nic is part of a bridge then don't use it
4064+ if is_bridge_member(hwaddr_to_nic[entry]):
4065+ continue
4066+
4067+ # Entry is a MAC address for a valid interface that doesn't
4068+ # have an IP address assigned yet.
4069+ resolved.append(hwaddr_to_nic[entry])
4070+ else:
4071+ # If the passed entry is not a MAC address, assume it's a valid
4072+ # interface, and that the user put it there on purpose (we can
4073+ # trust it to be the real external network).
4074+ resolved.append(entry)
4075+
4076+ # Ensure no duplicates
4077+ return list(set(resolved))
4078+
4079+
4080+class OSConfigFlagContext(OSContextGenerator):
4081+ """Provides support for user-defined config flags.
4082+
4083+ Users can define a comma-seperated list of key=value pairs
4084+ in the charm configuration and apply them at any point in
4085+ any file by using a template flag.
4086+
4087+ Sometimes users might want config flags inserted within a
4088+ specific section so this class allows users to specify the
4089+ template flag name, allowing for multiple template flags
4090+ (sections) within the same context.
4091+
4092+ NOTE: the value of config-flags may be a comma-separated list of
4093+ key=value pairs and some Openstack config files support
4094+ comma-separated lists as values.
4095+ """
4096+
4097+ def __init__(self, charm_flag='config-flags',
4098+ template_flag='user_config_flags'):
4099+ """
4100+ :param charm_flag: config flags in charm configuration.
4101+ :param template_flag: insert point for user-defined flags in template
4102+ file.
4103+ """
4104+ super(OSConfigFlagContext, self).__init__()
4105+ self._charm_flag = charm_flag
4106+ self._template_flag = template_flag
4107+
4108+ def __call__(self):
4109+ config_flags = config(self._charm_flag)
4110+ if not config_flags:
4111+ return {}
4112+
4113+ return {self._template_flag:
4114+ config_flags_parser(config_flags)}
4115+
4116+
4117+class LibvirtConfigFlagsContext(OSContextGenerator):
4118+ """
4119+ This context provides support for extending
4120+ the libvirt section through user-defined flags.
4121+ """
4122+ def __call__(self):
4123+ ctxt = {}
4124+ libvirt_flags = config('libvirt-flags')
4125+ if libvirt_flags:
4126+ ctxt['libvirt_flags'] = config_flags_parser(
4127+ libvirt_flags)
4128+ return ctxt
4129+
4130+
4131+class SubordinateConfigContext(OSContextGenerator):
4132+
4133+ """
4134+ Responsible for inspecting relations to subordinates that
4135+ may be exporting required config via a json blob.
4136+
4137+ The subordinate interface allows subordinates to export their
4138+ configuration requirements to the principle for multiple config
4139+ files and multiple serivces. Ie, a subordinate that has interfaces
4140+ to both glance and nova may export to following yaml blob as json::
4141+
4142+ glance:
4143+ /etc/glance/glance-api.conf:
4144+ sections:
4145+ DEFAULT:
4146+ - [key1, value1]
4147+ /etc/glance/glance-registry.conf:
4148+ MYSECTION:
4149+ - [key2, value2]
4150+ nova:
4151+ /etc/nova/nova.conf:
4152+ sections:
4153+ DEFAULT:
4154+ - [key3, value3]
4155+
4156+
4157+ It is then up to the principle charms to subscribe this context to
4158+ the service+config file it is interestd in. Configuration data will
4159+ be available in the template context, in glance's case, as::
4160+
4161+ ctxt = {
4162+ ... other context ...
4163+ 'subordinate_configuration': {
4164+ 'DEFAULT': {
4165+ 'key1': 'value1',
4166+ },
4167+ 'MYSECTION': {
4168+ 'key2': 'value2',
4169+ },
4170+ }
4171+ }
4172+ """
4173+
4174+ def __init__(self, service, config_file, interface):
4175+ """
4176+ :param service : Service name key to query in any subordinate
4177+ data found
4178+ :param config_file : Service's config file to query sections
4179+ :param interface : Subordinate interface to inspect
4180+ """
4181+ self.config_file = config_file
4182+ if isinstance(service, list):
4183+ self.services = service
4184+ else:
4185+ self.services = [service]
4186+ if isinstance(interface, list):
4187+ self.interfaces = interface
4188+ else:
4189+ self.interfaces = [interface]
4190+
4191+ def __call__(self):
4192+ ctxt = {'sections': {}}
4193+ rids = []
4194+ for interface in self.interfaces:
4195+ rids.extend(relation_ids(interface))
4196+ for rid in rids:
4197+ for unit in related_units(rid):
4198+ sub_config = relation_get('subordinate_configuration',
4199+ rid=rid, unit=unit)
4200+ if sub_config and sub_config != '':
4201+ try:
4202+ sub_config = json.loads(sub_config)
4203+ except:
4204+ log('Could not parse JSON from '
4205+ 'subordinate_configuration setting from %s'
4206+ % rid, level=ERROR)
4207+ continue
4208+
4209+ for service in self.services:
4210+ if service not in sub_config:
4211+ log('Found subordinate_configuration on %s but it '
4212+ 'contained nothing for %s service'
4213+ % (rid, service), level=INFO)
4214+ continue
4215+
4216+ sub_config = sub_config[service]
4217+ if self.config_file not in sub_config:
4218+ log('Found subordinate_configuration on %s but it '
4219+ 'contained nothing for %s'
4220+ % (rid, self.config_file), level=INFO)
4221+ continue
4222+
4223+ sub_config = sub_config[self.config_file]
4224+ for k, v in six.iteritems(sub_config):
4225+ if k == 'sections':
4226+ for section, config_list in six.iteritems(v):
4227+ log("adding section '%s'" % (section),
4228+ level=DEBUG)
4229+ if ctxt[k].get(section):
4230+ ctxt[k][section].extend(config_list)
4231+ else:
4232+ ctxt[k][section] = config_list
4233+ else:
4234+ ctxt[k] = v
4235+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
4236+ return ctxt
4237+
4238+
4239+class LogLevelContext(OSContextGenerator):
4240+
4241+ def __call__(self):
4242+ ctxt = {}
4243+ ctxt['debug'] = \
4244+ False if config('debug') is None else config('debug')
4245+ ctxt['verbose'] = \
4246+ False if config('verbose') is None else config('verbose')
4247+
4248+ return ctxt
4249+
4250+
4251+class SyslogContext(OSContextGenerator):
4252+
4253+ def __call__(self):
4254+ ctxt = {'use_syslog': config('use-syslog')}
4255+ return ctxt
4256+
4257+
4258+class BindHostContext(OSContextGenerator):
4259+
4260+ def __call__(self):
4261+ if config('prefer-ipv6'):
4262+ return {'bind_host': '::'}
4263+ else:
4264+ return {'bind_host': '0.0.0.0'}
4265+
4266+
4267+class WorkerConfigContext(OSContextGenerator):
4268+
4269+ @property
4270+ def num_cpus(self):
4271+ try:
4272+ from psutil import NUM_CPUS
4273+ except ImportError:
4274+ apt_install('python-psutil', fatal=True)
4275+ from psutil import NUM_CPUS
4276+
4277+ return NUM_CPUS
4278+
4279+ def __call__(self):
4280+ multiplier = config('worker-multiplier') or 0
4281+ ctxt = {"workers": self.num_cpus * multiplier}
4282+ return ctxt
4283+
4284+
4285+class ZeroMQContext(OSContextGenerator):
4286+ interfaces = ['zeromq-configuration']
4287+
4288+ def __call__(self):
4289+ ctxt = {}
4290+ if is_relation_made('zeromq-configuration', 'host'):
4291+ for rid in relation_ids('zeromq-configuration'):
4292+ for unit in related_units(rid):
4293+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
4294+ ctxt['zmq_host'] = relation_get('host', unit, rid)
4295+ ctxt['zmq_redis_address'] = relation_get(
4296+ 'zmq_redis_address', unit, rid)
4297+
4298+ return ctxt
4299+
4300+
4301+class NotificationDriverContext(OSContextGenerator):
4302+
4303+ def __init__(self, zmq_relation='zeromq-configuration',
4304+ amqp_relation='amqp'):
4305+ """
4306+ :param zmq_relation: Name of Zeromq relation to check
4307+ """
4308+ self.zmq_relation = zmq_relation
4309+ self.amqp_relation = amqp_relation
4310+
4311+ def __call__(self):
4312+ ctxt = {'notifications': 'False'}
4313+ if is_relation_made(self.amqp_relation):
4314+ ctxt['notifications'] = "True"
4315+
4316+ return ctxt
4317+
4318+
4319+class SysctlContext(OSContextGenerator):
4320+ """This context check if the 'sysctl' option exists on configuration
4321+ then creates a file with the loaded contents"""
4322+ def __call__(self):
4323+ sysctl_dict = config('sysctl')
4324+ if sysctl_dict:
4325+ sysctl_create(sysctl_dict,
4326+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
4327+ return {'sysctl': sysctl_dict}
4328+
4329+
4330+class NeutronAPIContext(OSContextGenerator):
4331+ '''
4332+ Inspects current neutron-plugin-api relation for neutron settings. Return
4333+ defaults if it is not present.
4334+ '''
4335+ interfaces = ['neutron-plugin-api']
4336+
4337+ def __call__(self):
4338+ self.neutron_defaults = {
4339+ 'l2_population': {
4340+ 'rel_key': 'l2-population',
4341+ 'default': False,
4342+ },
4343+ 'overlay_network_type': {
4344+ 'rel_key': 'overlay-network-type',
4345+ 'default': 'gre',
4346+ },
4347+ 'neutron_security_groups': {
4348+ 'rel_key': 'neutron-security-groups',
4349+ 'default': False,
4350+ },
4351+ 'network_device_mtu': {
4352+ 'rel_key': 'network-device-mtu',
4353+ 'default': None,
4354+ },
4355+ 'enable_dvr': {
4356+ 'rel_key': 'enable-dvr',
4357+ 'default': False,
4358+ },
4359+ 'enable_l3ha': {
4360+ 'rel_key': 'enable-l3ha',
4361+ 'default': False,
4362+ },
4363+ }
4364+ ctxt = self.get_neutron_options({})
4365+ for rid in relation_ids('neutron-plugin-api'):
4366+ for unit in related_units(rid):
4367+ rdata = relation_get(rid=rid, unit=unit)
4368+ if 'l2-population' in rdata:
4369+ ctxt.update(self.get_neutron_options(rdata))
4370+
4371+ return ctxt
4372+
4373+ def get_neutron_options(self, rdata):
4374+ settings = {}
4375+ for nkey in self.neutron_defaults.keys():
4376+ defv = self.neutron_defaults[nkey]['default']
4377+ rkey = self.neutron_defaults[nkey]['rel_key']
4378+ if rkey in rdata.keys():
4379+ if type(defv) is bool:
4380+ settings[nkey] = bool_from_string(rdata[rkey])
4381+ else:
4382+ settings[nkey] = rdata[rkey]
4383+ else:
4384+ settings[nkey] = defv
4385+ return settings
4386+
4387+
4388+class ExternalPortContext(NeutronPortContext):
4389+
4390+ def __call__(self):
4391+ ctxt = {}
4392+ ports = config('ext-port')
4393+ if ports:
4394+ ports = [p.strip() for p in ports.split()]
4395+ ports = self.resolve_ports(ports)
4396+ if ports:
4397+ ctxt = {"ext_port": ports[0]}
4398+ napi_settings = NeutronAPIContext()()
4399+ mtu = napi_settings.get('network_device_mtu')
4400+ if mtu:
4401+ ctxt['ext_port_mtu'] = mtu
4402+
4403+ return ctxt
4404+
4405+
4406+class DataPortContext(NeutronPortContext):
4407+
4408+ def __call__(self):
4409+ ports = config('data-port')
4410+ if ports:
4411+ # Map of {port/mac:bridge}
4412+ portmap = parse_data_port_mappings(ports)
4413+ ports = portmap.keys()
4414+ # Resolve provided ports or mac addresses and filter out those
4415+ # already attached to a bridge.
4416+ resolved = self.resolve_ports(ports)
4417+ # FIXME: is this necessary?
4418+ normalized = {get_nic_hwaddr(port): port for port in resolved
4419+ if port not in ports}
4420+ normalized.update({port: port for port in resolved
4421+ if port in ports})
4422+ if resolved:
4423+ return {normalized[port]: bridge for port, bridge in
4424+ six.iteritems(portmap) if port in normalized.keys()}
4425+
4426+ return None
4427+
4428+
4429+class PhyNICMTUContext(DataPortContext):
4430+
4431+ def __call__(self):
4432+ ctxt = {}
4433+ mappings = super(PhyNICMTUContext, self).__call__()
4434+ if mappings and mappings.keys():
4435+ ports = sorted(mappings.keys())
4436+ napi_settings = NeutronAPIContext()()
4437+ mtu = napi_settings.get('network_device_mtu')
4438+ all_ports = set()
4439+ # If any of ports is a vlan device, its underlying device must have
4440+ # mtu applied first.
4441+ for port in ports:
4442+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
4443+ lport = os.path.basename(lport)
4444+ all_ports.add(lport.split('_')[1])
4445+
4446+ all_ports = list(all_ports)
4447+ all_ports.extend(ports)
4448+ if mtu:
4449+ ctxt["devs"] = '\\n'.join(all_ports)
4450+ ctxt['mtu'] = mtu
4451+
4452+ return ctxt
4453+
4454+
4455+class NetworkServiceContext(OSContextGenerator):
4456+
4457+ def __init__(self, rel_name='quantum-network-service'):
4458+ self.rel_name = rel_name
4459+ self.interfaces = [rel_name]
4460+
4461+ def __call__(self):
4462+ for rid in relation_ids(self.rel_name):
4463+ for unit in related_units(rid):
4464+ rdata = relation_get(rid=rid, unit=unit)
4465+ ctxt = {
4466+ 'keystone_host': rdata.get('keystone_host'),
4467+ 'service_port': rdata.get('service_port'),
4468+ 'auth_port': rdata.get('auth_port'),
4469+ 'service_tenant': rdata.get('service_tenant'),
4470+ 'service_username': rdata.get('service_username'),
4471+ 'service_password': rdata.get('service_password'),
4472+ 'quantum_host': rdata.get('quantum_host'),
4473+ 'quantum_port': rdata.get('quantum_port'),
4474+ 'quantum_url': rdata.get('quantum_url'),
4475+ 'region': rdata.get('region'),
4476+ 'service_protocol':
4477+ rdata.get('service_protocol') or 'http',
4478+ 'auth_protocol':
4479+ rdata.get('auth_protocol') or 'http',
4480+ }
4481+ if self.context_complete(ctxt):
4482+ return ctxt
4483+ return {}
4484
4485=== added file 'charmhelpers.new/contrib/openstack/ip.py'
4486--- charmhelpers.new/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
4487+++ charmhelpers.new/contrib/openstack/ip.py 2015-11-24 19:47:41 +0000
4488@@ -0,0 +1,151 @@
4489+# Copyright 2014-2015 Canonical Limited.
4490+#
4491+# This file is part of charm-helpers.
4492+#
4493+# charm-helpers is free software: you can redistribute it and/or modify
4494+# it under the terms of the GNU Lesser General Public License version 3 as
4495+# published by the Free Software Foundation.
4496+#
4497+# charm-helpers is distributed in the hope that it will be useful,
4498+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4499+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4500+# GNU Lesser General Public License for more details.
4501+#
4502+# You should have received a copy of the GNU Lesser General Public License
4503+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4504+
4505+from charmhelpers.core.hookenv import (
4506+ config,
4507+ unit_get,
4508+ service_name,
4509+)
4510+from charmhelpers.contrib.network.ip import (
4511+ get_address_in_network,
4512+ is_address_in_network,
4513+ is_ipv6,
4514+ get_ipv6_addr,
4515+)
4516+from charmhelpers.contrib.hahelpers.cluster import is_clustered
4517+
4518+PUBLIC = 'public'
4519+INTERNAL = 'int'
4520+ADMIN = 'admin'
4521+
4522+ADDRESS_MAP = {
4523+ PUBLIC: {
4524+ 'config': 'os-public-network',
4525+ 'fallback': 'public-address',
4526+ 'override': 'os-public-hostname',
4527+ },
4528+ INTERNAL: {
4529+ 'config': 'os-internal-network',
4530+ 'fallback': 'private-address',
4531+ 'override': 'os-internal-hostname',
4532+ },
4533+ ADMIN: {
4534+ 'config': 'os-admin-network',
4535+ 'fallback': 'private-address',
4536+ 'override': 'os-admin-hostname',
4537+ }
4538+}
4539+
4540+
4541+def canonical_url(configs, endpoint_type=PUBLIC):
4542+ """Returns the correct HTTP URL to this host given the state of HTTPS
4543+ configuration, hacluster and charm configuration.
4544+
4545+ :param configs: OSTemplateRenderer config templating object to inspect
4546+ for a complete https context.
4547+ :param endpoint_type: str endpoint type to resolve.
4548+ :param returns: str base URL for services on the current service unit.
4549+ """
4550+ scheme = _get_scheme(configs)
4551+
4552+ address = resolve_address(endpoint_type)
4553+ if is_ipv6(address):
4554+ address = "[{}]".format(address)
4555+
4556+ return '%s://%s' % (scheme, address)
4557+
4558+
4559+def _get_scheme(configs):
4560+ """Returns the scheme to use for the url (either http or https)
4561+ depending upon whether https is in the configs value.
4562+
4563+ :param configs: OSTemplateRenderer config templating object to inspect
4564+ for a complete https context.
4565+ :returns: either 'http' or 'https' depending on whether https is
4566+ configured within the configs context.
4567+ """
4568+ scheme = 'http'
4569+ if configs and 'https' in configs.complete_contexts():
4570+ scheme = 'https'
4571+ return scheme
4572+
4573+
4574+def _get_address_override(endpoint_type=PUBLIC):
4575+ """Returns any address overrides that the user has defined based on the
4576+ endpoint type.
4577+
4578+ Note: this function allows for the service name to be inserted into the
4579+ address if the user specifies {service_name}.somehost.org.
4580+
4581+ :param endpoint_type: the type of endpoint to retrieve the override
4582+ value for.
4583+ :returns: any endpoint address or hostname that the user has overridden
4584+ or None if an override is not present.
4585+ """
4586+ override_key = ADDRESS_MAP[endpoint_type]['override']
4587+ addr_override = config(override_key)
4588+ if not addr_override:
4589+ return None
4590+ else:
4591+ return addr_override.format(service_name=service_name())
4592+
4593+
4594+def resolve_address(endpoint_type=PUBLIC):
4595+ """Return unit address depending on net config.
4596+
4597+ If unit is clustered with vip(s) and has net splits defined, return vip on
4598+ correct network. If clustered with no nets defined, return primary vip.
4599+
4600+ If not clustered, return unit address ensuring address is on configured net
4601+ split if one is configured.
4602+
4603+ :param endpoint_type: Network endpoing type
4604+ """
4605+ resolved_address = _get_address_override(endpoint_type)
4606+ if resolved_address:
4607+ return resolved_address
4608+
4609+ vips = config('vip')
4610+ if vips:
4611+ vips = vips.split()
4612+
4613+ net_type = ADDRESS_MAP[endpoint_type]['config']
4614+ net_addr = config(net_type)
4615+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
4616+ clustered = is_clustered()
4617+ if clustered:
4618+ if not net_addr:
4619+ # If no net-splits defined, we expect a single vip
4620+ resolved_address = vips[0]
4621+ else:
4622+ for vip in vips:
4623+ if is_address_in_network(net_addr, vip):
4624+ resolved_address = vip
4625+ break
4626+ else:
4627+ if config('prefer-ipv6'):
4628+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
4629+ else:
4630+ fallback_addr = unit_get(net_fallback)
4631+
4632+ resolved_address = get_address_in_network(net_addr, fallback_addr)
4633+
4634+ if resolved_address is None:
4635+ raise ValueError("Unable to resolve a suitable IP address based on "
4636+ "charm state and configuration. (net_type=%s, "
4637+ "clustered=%s)" % (net_type, clustered))
4638+
4639+ return resolved_address
4640
4641=== added file 'charmhelpers.new/contrib/openstack/neutron.py'
4642--- charmhelpers.new/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
4643+++ charmhelpers.new/contrib/openstack/neutron.py 2015-11-24 19:47:41 +0000
4644@@ -0,0 +1,370 @@
4645+# Copyright 2014-2015 Canonical Limited.
4646+#
4647+# This file is part of charm-helpers.
4648+#
4649+# charm-helpers is free software: you can redistribute it and/or modify
4650+# it under the terms of the GNU Lesser General Public License version 3 as
4651+# published by the Free Software Foundation.
4652+#
4653+# charm-helpers is distributed in the hope that it will be useful,
4654+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4655+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4656+# GNU Lesser General Public License for more details.
4657+#
4658+# You should have received a copy of the GNU Lesser General Public License
4659+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4660+
4661+# Various utilies for dealing with Neutron and the renaming from Quantum.
4662+
4663+import six
4664+from subprocess import check_output
4665+
4666+from charmhelpers.core.hookenv import (
4667+ config,
4668+ log,
4669+ ERROR,
4670+)
4671+
4672+from charmhelpers.contrib.openstack.utils import os_release
4673+
4674+
4675+def headers_package():
4676+ """Ensures correct linux-headers for running kernel are installed,
4677+ for building DKMS package"""
4678+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4679+ return 'linux-headers-%s' % kver
4680+
4681+QUANTUM_CONF_DIR = '/etc/quantum'
4682+
4683+
4684+def kernel_version():
4685+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
4686+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4687+ kver = kver.split('.')
4688+ return (int(kver[0]), int(kver[1]))
4689+
4690+
4691+def determine_dkms_package():
4692+ """ Determine which DKMS package should be used based on kernel version """
4693+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
4694+ if kernel_version() >= (3, 13):
4695+ return []
4696+ else:
4697+ return ['openvswitch-datapath-dkms']
4698+
4699+
4700+# legacy
4701+
4702+
4703+def quantum_plugins():
4704+ from charmhelpers.contrib.openstack import context
4705+ return {
4706+ 'ovs': {
4707+ 'config': '/etc/quantum/plugins/openvswitch/'
4708+ 'ovs_quantum_plugin.ini',
4709+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
4710+ 'OVSQuantumPluginV2',
4711+ 'contexts': [
4712+ context.SharedDBContext(user=config('neutron-database-user'),
4713+ database=config('neutron-database'),
4714+ relation_prefix='neutron',
4715+ ssl_dir=QUANTUM_CONF_DIR)],
4716+ 'services': ['quantum-plugin-openvswitch-agent'],
4717+ 'packages': [[headers_package()] + determine_dkms_package(),
4718+ ['quantum-plugin-openvswitch-agent']],
4719+ 'server_packages': ['quantum-server',
4720+ 'quantum-plugin-openvswitch'],
4721+ 'server_services': ['quantum-server']
4722+ },
4723+ 'nvp': {
4724+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
4725+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
4726+ 'QuantumPlugin.NvpPluginV2',
4727+ 'contexts': [
4728+ context.SharedDBContext(user=config('neutron-database-user'),
4729+ database=config('neutron-database'),
4730+ relation_prefix='neutron',
4731+ ssl_dir=QUANTUM_CONF_DIR)],
4732+ 'services': [],
4733+ 'packages': [],
4734+ 'server_packages': ['quantum-server',
4735+ 'quantum-plugin-nicira'],
4736+ 'server_services': ['quantum-server']
4737+ }
4738+ }
4739+
4740+NEUTRON_CONF_DIR = '/etc/neutron'
4741+
4742+
4743+def neutron_plugins():
4744+ from charmhelpers.contrib.openstack import context
4745+ release = os_release('nova-common')
4746+ plugins = {
4747+ 'ovs': {
4748+ 'config': '/etc/neutron/plugins/openvswitch/'
4749+ 'ovs_neutron_plugin.ini',
4750+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
4751+ 'OVSNeutronPluginV2',
4752+ 'contexts': [
4753+ context.SharedDBContext(user=config('neutron-database-user'),
4754+ database=config('neutron-database'),
4755+ relation_prefix='neutron',
4756+ ssl_dir=NEUTRON_CONF_DIR)],
4757+ 'services': ['neutron-plugin-openvswitch-agent'],
4758+ 'packages': [[headers_package()] + determine_dkms_package(),
4759+ ['neutron-plugin-openvswitch-agent']],
4760+ 'server_packages': ['neutron-server',
4761+ 'neutron-plugin-openvswitch'],
4762+ 'server_services': ['neutron-server']
4763+ },
4764+ 'nvp': {
4765+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
4766+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
4767+ 'NeutronPlugin.NvpPluginV2',
4768+ 'contexts': [
4769+ context.SharedDBContext(user=config('neutron-database-user'),
4770+ database=config('neutron-database'),
4771+ relation_prefix='neutron',
4772+ ssl_dir=NEUTRON_CONF_DIR)],
4773+ 'services': [],
4774+ 'packages': [],
4775+ 'server_packages': ['neutron-server',
4776+ 'neutron-plugin-nicira'],
4777+ 'server_services': ['neutron-server']
4778+ },
4779+ 'nsx': {
4780+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
4781+ 'driver': 'vmware',
4782+ 'contexts': [
4783+ context.SharedDBContext(user=config('neutron-database-user'),
4784+ database=config('neutron-database'),
4785+ relation_prefix='neutron',
4786+ ssl_dir=NEUTRON_CONF_DIR)],
4787+ 'services': [],
4788+ 'packages': [],
4789+ 'server_packages': ['neutron-server',
4790+ 'neutron-plugin-vmware'],
4791+ 'server_services': ['neutron-server']
4792+ },
4793+ 'n1kv': {
4794+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
4795+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
4796+ 'contexts': [
4797+ context.SharedDBContext(user=config('neutron-database-user'),
4798+ database=config('neutron-database'),
4799+ relation_prefix='neutron',
4800+ ssl_dir=NEUTRON_CONF_DIR)],
4801+ 'services': [],
4802+ 'packages': [[headers_package()] + determine_dkms_package(),
4803+ ['neutron-plugin-cisco']],
4804+ 'server_packages': ['neutron-server',
4805+ 'neutron-plugin-cisco'],
4806+ 'server_services': ['neutron-server']
4807+ },
4808+ 'Calico': {
4809+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
4810+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
4811+ 'contexts': [
4812+ context.SharedDBContext(user=config('neutron-database-user'),
4813+ database=config('neutron-database'),
4814+ relation_prefix='neutron',
4815+ ssl_dir=NEUTRON_CONF_DIR)],
4816+ 'services': ['calico-felix',
4817+ 'bird',
4818+ 'neutron-dhcp-agent',
4819+ 'nova-api-metadata',
4820+ 'etcd'],
4821+ 'packages': [[headers_package()] + determine_dkms_package(),
4822+ ['calico-compute',
4823+ 'bird',
4824+ 'neutron-dhcp-agent',
4825+ 'nova-api-metadata',
4826+ 'etcd']],
4827+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
4828+ 'server_services': ['neutron-server', 'etcd']
4829+ },
4830+ 'vsp': {
4831+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
4832+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
4833+ 'contexts': [
4834+ context.SharedDBContext(user=config('neutron-database-user'),
4835+ database=config('neutron-database'),
4836+ relation_prefix='neutron',
4837+ ssl_dir=NEUTRON_CONF_DIR)],
4838+ 'services': [],
4839+ 'packages': [],
4840+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
4841+ 'server_services': ['neutron-server']
4842+ },
4843+ 'plumgrid': {
4844+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
4845+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
4846+ 'contexts': [
4847+ context.SharedDBContext(user=config('database-user'),
4848+ database=config('database'),
4849+ ssl_dir=NEUTRON_CONF_DIR)],
4850+ 'services': [],
4851+ 'packages': ['plumgrid-lxc',
4852+ 'iovisor-dkms'],
4853+ 'server_packages': ['neutron-server',
4854+ 'neutron-plugin-plumgrid'],
4855+ 'server_services': ['neutron-server']
4856+ },
4857+ 'midonet': {
4858+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
4859+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
4860+ 'contexts': [
4861+ context.SharedDBContext(user=config('neutron-database-user'),
4862+ database=config('neutron-database'),
4863+ relation_prefix='neutron',
4864+ ssl_dir=NEUTRON_CONF_DIR)],
4865+ 'services': [],
4866+ 'packages': [[headers_package()] + determine_dkms_package()],
4867+ 'server_packages': ['neutron-server',
4868+ 'python-neutron-plugin-midonet'],
4869+ 'server_services': ['neutron-server']
4870+ }
4871+ }
4872+ if release >= 'icehouse':
4873+ # NOTE: patch in ml2 plugin for icehouse onwards
4874+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
4875+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
4876+ plugins['ovs']['server_packages'] = ['neutron-server',
4877+ 'neutron-plugin-ml2']
4878+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
4879+ plugins['nvp'] = plugins['nsx']
4880+ return plugins
4881+
4882+
4883+def neutron_plugin_attribute(plugin, attr, net_manager=None):
4884+ manager = net_manager or network_manager()
4885+ if manager == 'quantum':
4886+ plugins = quantum_plugins()
4887+ elif manager == 'neutron':
4888+ plugins = neutron_plugins()
4889+ else:
4890+ log("Network manager '%s' does not support plugins." % (manager),
4891+ level=ERROR)
4892+ raise Exception
4893+
4894+ try:
4895+ _plugin = plugins[plugin]
4896+ except KeyError:
4897+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
4898+ raise Exception
4899+
4900+ try:
4901+ return _plugin[attr]
4902+ except KeyError:
4903+ return None
4904+
4905+
4906+def network_manager():
4907+ '''
4908+ Deals with the renaming of Quantum to Neutron in H and any situations
4909+ that require compatability (eg, deploying H with network-manager=quantum,
4910+ upgrading from G).
4911+ '''
4912+ release = os_release('nova-common')
4913+ manager = config('network-manager').lower()
4914+
4915+ if manager not in ['quantum', 'neutron']:
4916+ return manager
4917+
4918+ if release in ['essex']:
4919+ # E does not support neutron
4920+ log('Neutron networking not supported in Essex.', level=ERROR)
4921+ raise Exception
4922+ elif release in ['folsom', 'grizzly']:
4923+ # neutron is named quantum in F and G
4924+ return 'quantum'
4925+ else:
4926+ # ensure accurate naming for all releases post-H
4927+ return 'neutron'
4928+
4929+
4930+def parse_mappings(mappings, key_rvalue=False):
4931+ """By default mappings are lvalue keyed.
4932+
4933+ If key_rvalue is True, the mapping will be reversed to allow multiple
4934+ configs for the same lvalue.
4935+ """
4936+ parsed = {}
4937+ if mappings:
4938+ mappings = mappings.split()
4939+ for m in mappings:
4940+ p = m.partition(':')
4941+
4942+ if key_rvalue:
4943+ key_index = 2
4944+ val_index = 0
4945+ # if there is no rvalue skip to next
4946+ if not p[1]:
4947+ continue
4948+ else:
4949+ key_index = 0
4950+ val_index = 2
4951+
4952+ key = p[key_index].strip()
4953+ parsed[key] = p[val_index].strip()
4954+
4955+ return parsed
4956+
4957+
4958+def parse_bridge_mappings(mappings):
4959+ """Parse bridge mappings.
4960+
4961+ Mappings must be a space-delimited list of provider:bridge mappings.
4962+
4963+ Returns dict of the form {provider:bridge}.
4964+ """
4965+ return parse_mappings(mappings)
4966+
4967+
4968+def parse_data_port_mappings(mappings, default_bridge='br-data'):
4969+ """Parse data port mappings.
4970+
4971+ Mappings must be a space-delimited list of bridge:port.
4972+
4973+ Returns dict of the form {port:bridge} where ports may be mac addresses or
4974+ interface names.
4975+ """
4976+
4977+ # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
4978+ # proposed for <port> since it may be a mac address which will differ
4979+ # across units this allowing first-known-good to be chosen.
4980+ _mappings = parse_mappings(mappings, key_rvalue=True)
4981+ if not _mappings or list(_mappings.values()) == ['']:
4982+ if not mappings:
4983+ return {}
4984+
4985+ # For backwards-compatibility we need to support port-only provided in
4986+ # config.
4987+ _mappings = {mappings.split()[0]: default_bridge}
4988+
4989+ ports = _mappings.keys()
4990+ if len(set(ports)) != len(ports):
4991+ raise Exception("It is not allowed to have the same port configured "
4992+ "on more than one bridge")
4993+
4994+ return _mappings
4995+
4996+
4997+def parse_vlan_range_mappings(mappings):
4998+ """Parse vlan range mappings.
4999+
5000+ Mappings must be a space-delimited list of provider:start:end mappings.
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches