Merge lp:~james-page/charms/trusty/swift-storage/xenial into lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/swift-storage/xenial
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk
Diff against target: 13142 lines (+12701/-18) (has conflicts)
59 files modified
.testr.conf (+8/-0)
actions.yaml (+6/-0)
actions/actions.py (+108/-0)
actions/openstack_upgrade.py (+34/-0)
charm-helpers-hooks.yaml (+5/-0)
charmhelpers.new/cli/__init__.py (+191/-0)
charmhelpers.new/cli/benchmark.py (+36/-0)
charmhelpers.new/cli/commands.py (+32/-0)
charmhelpers.new/cli/hookenv.py (+23/-0)
charmhelpers.new/cli/host.py (+31/-0)
charmhelpers.new/cli/unitdata.py (+39/-0)
charmhelpers.new/contrib/charmsupport/nrpe.py (+398/-0)
charmhelpers.new/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers.new/contrib/network/ip.py (+458/-0)
charmhelpers.new/contrib/openstack/amulet/deployment.py (+302/-0)
charmhelpers.new/contrib/openstack/amulet/utils.py (+985/-0)
charmhelpers.new/contrib/openstack/context.py (+1477/-0)
charmhelpers.new/contrib/openstack/files/check_haproxy.sh (+34/-0)
charmhelpers.new/contrib/openstack/ip.py (+151/-0)
charmhelpers.new/contrib/openstack/neutron.py (+378/-0)
charmhelpers.new/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers.new/contrib/openstack/templates/haproxy.cfg (+66/-0)
charmhelpers.new/contrib/openstack/templating.py (+323/-0)
charmhelpers.new/contrib/openstack/utils.py (+1044/-0)
charmhelpers.new/contrib/python/packages.py (+130/-0)
charmhelpers.new/contrib/storage/linux/ceph.py (+1039/-0)
charmhelpers.new/contrib/storage/linux/loopback.py (+88/-0)
charmhelpers.new/contrib/storage/linux/utils.py (+71/-0)
charmhelpers.new/core/files.py (+45/-0)
charmhelpers.new/core/hookenv.py (+978/-0)
charmhelpers.new/core/host.py (+673/-0)
charmhelpers.new/core/hugepage.py (+71/-0)
charmhelpers.new/core/kernel.py (+68/-0)
charmhelpers.new/core/services/base.py (+353/-0)
charmhelpers.new/core/services/helpers.py (+292/-0)
charmhelpers.new/core/strutils.py (+72/-0)
charmhelpers.new/core/templating.py (+81/-0)
charmhelpers.new/core/unitdata.py (+521/-0)
charmhelpers.new/fetch/__init__.py (+464/-0)
charmhelpers.new/fetch/archiveurl.py (+167/-0)
charmhelpers.new/fetch/bzrurl.py (+68/-0)
charmhelpers.new/fetch/giturl.py (+70/-0)
lib/misc_utils.py (+110/-0)
lib/swift_storage_context.py (+96/-0)
lib/swift_storage_utils.py (+372/-0)
requirements.txt (+11/-0)
test-requirements.txt (+8/-0)
tests/018-basic-trusty-liberty (+11/-0)
tests/019-basic-trusty-mitaka (+11/-0)
tests/019-basic-vivid-kilo (+0/-9)
tests/020-basic-wily-liberty (+9/-0)
tests/021-basic-xenial-mitaka (+9/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-0)
tests/tests.yaml (+21/-0)
tox.ini (+29/-0)
unit_tests/test_actions.py (+220/-0)
unit_tests/test_actions_openstack_upgrade.py (+58/-0)
unit_tests/test_swift_storage_context.py (+1/-5)
unit_tests/test_swift_storage_utils.py (+9/-4)
Conflict adding file .testr.conf.  Moved existing file to .testr.conf.moved.
Path conflict: <deleted> / lib/misc_utils.py
Path conflict: <deleted> / lib/swift_storage_context.py
Path conflict: <deleted> / lib/swift_storage_utils.py
Conflict adding file actions.  Moved existing file to actions.moved.
Conflict adding file actions.yaml.  Moved existing file to actions.yaml.moved.
Text conflict in charm-helpers-hooks.yaml
Conflict adding file charmhelpers.  Moved existing file to charmhelpers.moved.
Conflict: charmhelpers.new is not a directory, but has files in it.  Created directory.
Conflict adding files to charmhelpers.new/contrib.  Created directory.
Conflict because charmhelpers.new/contrib is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/charmsupport.  Created directory.
Conflict because charmhelpers.new/contrib/charmsupport is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/hahelpers.  Created directory.
Conflict because charmhelpers.new/contrib/hahelpers is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/network.  Created directory.
Conflict because charmhelpers.new/contrib/network is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack.  Created directory.
Conflict because charmhelpers.new/contrib/openstack is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/amulet.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/amulet is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/files.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/files is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/templates.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/templates is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/python.  Created directory.
Conflict because charmhelpers.new/contrib/python is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage.  Created directory.
Conflict because charmhelpers.new/contrib/storage is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage/linux.  Created directory.
Conflict because charmhelpers.new/contrib/storage/linux is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core.  Created directory.
Conflict because charmhelpers.new/core is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core/services.  Created directory.
Conflict because charmhelpers.new/core/services is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/fetch.  Created directory.
Conflict because charmhelpers.new/fetch is not versioned, but has versioned children.  Versioned directory.
Conflict adding file hooks/install.real.  Moved existing file to hooks/install.real.moved.
Conflict adding file hooks/lib.  Moved existing file to hooks/lib.moved.
Conflict adding file lib.  Moved existing file to lib.moved.
Conflict adding file requirements.txt.  Moved existing file to requirements.txt.moved.
Conflict adding file test-requirements.txt.  Moved existing file to test-requirements.txt.moved.
Conflict adding file tests/018-basic-trusty-liberty.  Moved existing file to tests/018-basic-trusty-liberty.moved.
Conflict adding file tests/019-basic-trusty-mitaka.  Moved existing file to tests/019-basic-trusty-mitaka.moved.
Conflict adding file tests/020-basic-wily-liberty.  Moved existing file to tests/020-basic-wily-liberty.moved.
Conflict adding file tests/021-basic-xenial-mitaka.  Moved existing file to tests/021-basic-xenial-mitaka.moved.
Text conflict in tests/charmhelpers/contrib/openstack/amulet/deployment.py
Conflict adding file tests/setup.  Moved existing file to tests/setup.moved.
Conflict adding file tests/tests.yaml.  Moved existing file to tests/tests.yaml.moved.
Conflict adding file tox.ini.  Moved existing file to tox.ini.moved.
Conflict adding file unit_tests/test_actions.py.  Moved existing file to unit_tests/test_actions.py.moved.
Conflict adding file unit_tests/test_actions_openstack_upgrade.py.  Moved existing file to unit_tests/test_actions_openstack_upgrade.py.moved.
To merge this branch: bzr merge lp:~james-page/charms/trusty/swift-storage/xenial
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+284514@code.launchpad.net

This proposal has been superseded by a proposal from 2016-01-30.

Description of the change

Resync helpers, fixup xenial support.

To post a comment you must log in.
104. By James Page

Tidy lint

105. By James Page

Baseline

106. By James Page

Enable xenial mitaka tests

Unmerged revisions

106. By James Page

Enable xenial mitaka tests

105. By James Page

Baseline

104. By James Page

Tidy lint

103. By James Page

Resync helpers, refactor code to use cpu calcs from charmhelpers

102. By James Page

Fix liberty/mitaka typo from previous test definition update batch.

101. By Corey Bryant

[corey.bryant, r=jamespage] Sync charm-helpers.

100. By Liam Young

Update test combo definitions, remove Vivid deprecated release tests, update bundletester testplan yaml, update tests README.

99. By Liam Young

[corey.bryant, r=gnuoy] Charmhelper sync

98. By James Page

Resync helpers

97. By Corey Bryant

[corey.bryant,r=trivial] Sync charm-helpers.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.testr.conf'
2--- .testr.conf 1970-01-01 00:00:00 +0000
3+++ .testr.conf 2016-01-30 12:38:43 +0000
4@@ -0,0 +1,8 @@
5+[DEFAULT]
6+test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
7+ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
8+ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
9+ ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
10+
11+test_id_option=--load-list $IDFILE
12+test_list_option=--list
13
14=== renamed file '.testr.conf' => '.testr.conf.moved'
15=== added directory 'actions'
16=== renamed directory 'actions' => 'actions.moved'
17=== added file 'actions.yaml'
18--- actions.yaml 1970-01-01 00:00:00 +0000
19+++ actions.yaml 2016-01-30 12:38:43 +0000
20@@ -0,0 +1,6 @@
21+pause:
22+ description: Pause the swift-storage unit. This action will stop Swift services.
23+resume:
24+ description: Resume the swift-storage unit. This action will start Swift services.
25+openstack-upgrade:
26+ description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
27
28=== renamed file 'actions.yaml' => 'actions.yaml.moved'
29=== added file 'actions/__init__.py'
30=== added file 'actions/actions.py'
31--- actions/actions.py 1970-01-01 00:00:00 +0000
32+++ actions/actions.py 2016-01-30 12:38:43 +0000
33@@ -0,0 +1,108 @@
34+#!/usr/bin/python
35+
36+import argparse
37+import os
38+import sys
39+import yaml
40+
41+from charmhelpers.core.host import service_pause, service_resume
42+from charmhelpers.core.hookenv import action_fail
43+from charmhelpers.core.unitdata import HookData, kv
44+from charmhelpers.contrib.openstack.utils import (
45+ get_os_codename_package,
46+ set_os_workload_status,
47+)
48+from lib.swift_storage_utils import (
49+ assess_status,
50+ REQUIRED_INTERFACES,
51+ SWIFT_SVCS,
52+)
53+from hooks.swift_storage_hooks import (
54+ CONFIGS,
55+)
56+
57+
58+def _get_services():
59+ """Return a list of services that need to be (un)paused."""
60+ services = SWIFT_SVCS[:]
61+ # Before Icehouse there was no swift-container-sync
62+ if get_os_codename_package("swift-container") < "icehouse":
63+ services.remove("swift-container-sync")
64+ return services
65+
66+
67+def get_action_parser(actions_yaml_path, action_name,
68+ get_services=_get_services):
69+ """Make an argparse.ArgumentParser seeded from actions.yaml definitions."""
70+ with open(actions_yaml_path) as fh:
71+ doc = yaml.load(fh)[action_name]["description"]
72+ parser = argparse.ArgumentParser(description=doc)
73+ parser.add_argument("--services", default=get_services())
74+ # TODO: Add arguments for params defined in the actions.yaml
75+ return parser
76+
77+
78+def pause(args):
79+ """Pause all the swift services.
80+
81+ @raises Exception if any services fail to stop
82+ """
83+ for service in args.services:
84+ stopped = service_pause(service)
85+ if not stopped:
86+ raise Exception("{} didn't stop cleanly.".format(service))
87+ with HookData()():
88+ kv().set('unit-paused', True)
89+ set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
90+ charm_func=assess_status)
91+
92+
93+def resume(args):
94+ """Resume all the swift services.
95+
96+ @raises Exception if any services fail to start
97+ """
98+ for service in args.services:
99+ started = service_resume(service)
100+ if not started:
101+ raise Exception("{} didn't start cleanly.".format(service))
102+ with HookData()():
103+ kv().set('unit-paused', False)
104+ set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
105+ charm_func=assess_status)
106+
107+
108+# A dictionary of all the defined actions to callables (which take
109+# parsed arguments).
110+ACTIONS = {"pause": pause, "resume": resume}
111+
112+
113+def main(argv):
114+ action_name = _get_action_name()
115+ actions_yaml_path = _get_actions_yaml_path()
116+ parser = get_action_parser(actions_yaml_path, action_name)
117+ args = parser.parse_args(argv)
118+ try:
119+ action = ACTIONS[action_name]
120+ except KeyError:
121+ return "Action %s undefined" % action_name
122+ else:
123+ try:
124+ action(args)
125+ except Exception as e:
126+ action_fail(str(e))
127+
128+
129+def _get_action_name():
130+ """Return the name of the action."""
131+ return os.path.basename(__file__)
132+
133+
134+def _get_actions_yaml_path():
135+ """Return the path to actions.yaml"""
136+ cwd = os.path.dirname(__file__)
137+ return os.path.join(cwd, "..", "actions.yaml")
138+
139+
140+if __name__ == "__main__":
141+ sys.exit(main(sys.argv[1:]))
142
143=== added symlink 'actions/charmhelpers'
144=== target is u'../charmhelpers/'
145=== added symlink 'actions/hooks'
146=== target is u'../hooks'
147=== added symlink 'actions/lib'
148=== target is u'../lib'
149=== added symlink 'actions/openstack-upgrade'
150=== target is u'openstack_upgrade.py'
151=== added file 'actions/openstack_upgrade.py'
152--- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000
153+++ actions/openstack_upgrade.py 2016-01-30 12:38:43 +0000
154@@ -0,0 +1,34 @@
155+#!/usr/bin/python
156+import sys
157+
158+sys.path.append('hooks/')
159+
160+from charmhelpers.contrib.openstack.utils import (
161+ do_action_openstack_upgrade,
162+)
163+
164+from swift_storage_hooks import (
165+ config_changed,
166+ CONFIGS,
167+)
168+
169+from lib.swift_storage_utils import (
170+ do_openstack_upgrade,
171+)
172+
173+
174+def openstack_upgrade():
175+ """Upgrade packages to config-set Openstack version.
176+
177+ If the charm was installed from source we cannot upgrade it.
178+ For backwards compatibility a config flag must be set for this
179+ code to run, otherwise a full service level upgrade will fire
180+ on config-changed."""
181+
182+ if (do_action_openstack_upgrade('swift',
183+ do_openstack_upgrade,
184+ CONFIGS)):
185+ config_changed()
186+
187+if __name__ == '__main__':
188+ openstack_upgrade()
189
190=== added symlink 'actions/pause'
191=== target is u'actions.py'
192=== added symlink 'actions/resume'
193=== target is u'actions.py'
194=== modified file 'charm-helpers-hooks.yaml'
195--- charm-helpers-hooks.yaml 2015-10-22 16:09:29 +0000
196+++ charm-helpers-hooks.yaml 2016-01-30 12:38:43 +0000
197@@ -1,5 +1,10 @@
198+<<<<<<< TREE
199 branch: lp:~openstack-charmers/charm-helpers/stable
200 destination: charmhelpers
201+=======
202+branch: lp:charm-helpers
203+destination: charmhelpers
204+>>>>>>> MERGE-SOURCE
205 include:
206 - core
207 - cli
208
209=== renamed directory 'charmhelpers' => 'charmhelpers.moved'
210=== renamed symlink 'hooks/charmhelpers' => 'charmhelpers.new'
211=== target was u'../charmhelpers/'
212=== added directory 'charmhelpers.new/cli'
213=== added file 'charmhelpers.new/cli/__init__.py'
214--- charmhelpers.new/cli/__init__.py 1970-01-01 00:00:00 +0000
215+++ charmhelpers.new/cli/__init__.py 2016-01-30 12:38:43 +0000
216@@ -0,0 +1,191 @@
217+# Copyright 2014-2015 Canonical Limited.
218+#
219+# This file is part of charm-helpers.
220+#
221+# charm-helpers is free software: you can redistribute it and/or modify
222+# it under the terms of the GNU Lesser General Public License version 3 as
223+# published by the Free Software Foundation.
224+#
225+# charm-helpers is distributed in the hope that it will be useful,
226+# but WITHOUT ANY WARRANTY; without even the implied warranty of
227+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
228+# GNU Lesser General Public License for more details.
229+#
230+# You should have received a copy of the GNU Lesser General Public License
231+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
232+
233+import inspect
234+import argparse
235+import sys
236+
237+from six.moves import zip
238+
239+import charmhelpers.core.unitdata
240+
241+
242+class OutputFormatter(object):
243+ def __init__(self, outfile=sys.stdout):
244+ self.formats = (
245+ "raw",
246+ "json",
247+ "py",
248+ "yaml",
249+ "csv",
250+ "tab",
251+ )
252+ self.outfile = outfile
253+
254+ def add_arguments(self, argument_parser):
255+ formatgroup = argument_parser.add_mutually_exclusive_group()
256+ choices = self.supported_formats
257+ formatgroup.add_argument("--format", metavar='FMT',
258+ help="Select output format for returned data, "
259+ "where FMT is one of: {}".format(choices),
260+ choices=choices, default='raw')
261+ for fmt in self.formats:
262+ fmtfunc = getattr(self, fmt)
263+ formatgroup.add_argument("-{}".format(fmt[0]),
264+ "--{}".format(fmt), action='store_const',
265+ const=fmt, dest='format',
266+ help=fmtfunc.__doc__)
267+
268+ @property
269+ def supported_formats(self):
270+ return self.formats
271+
272+ def raw(self, output):
273+ """Output data as raw string (default)"""
274+ if isinstance(output, (list, tuple)):
275+ output = '\n'.join(map(str, output))
276+ self.outfile.write(str(output))
277+
278+ def py(self, output):
279+ """Output data as a nicely-formatted python data structure"""
280+ import pprint
281+ pprint.pprint(output, stream=self.outfile)
282+
283+ def json(self, output):
284+ """Output data in JSON format"""
285+ import json
286+ json.dump(output, self.outfile)
287+
288+ def yaml(self, output):
289+ """Output data in YAML format"""
290+ import yaml
291+ yaml.safe_dump(output, self.outfile)
292+
293+ def csv(self, output):
294+ """Output data as excel-compatible CSV"""
295+ import csv
296+ csvwriter = csv.writer(self.outfile)
297+ csvwriter.writerows(output)
298+
299+ def tab(self, output):
300+ """Output data in excel-compatible tab-delimited format"""
301+ import csv
302+ csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
303+ csvwriter.writerows(output)
304+
305+ def format_output(self, output, fmt='raw'):
306+ fmtfunc = getattr(self, fmt)
307+ fmtfunc(output)
308+
309+
310+class CommandLine(object):
311+ argument_parser = None
312+ subparsers = None
313+ formatter = None
314+ exit_code = 0
315+
316+ def __init__(self):
317+ if not self.argument_parser:
318+ self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
319+ if not self.formatter:
320+ self.formatter = OutputFormatter()
321+ self.formatter.add_arguments(self.argument_parser)
322+ if not self.subparsers:
323+ self.subparsers = self.argument_parser.add_subparsers(help='Commands')
324+
325+ def subcommand(self, command_name=None):
326+ """
327+ Decorate a function as a subcommand. Use its arguments as the
328+ command-line arguments"""
329+ def wrapper(decorated):
330+ cmd_name = command_name or decorated.__name__
331+ subparser = self.subparsers.add_parser(cmd_name,
332+ description=decorated.__doc__)
333+ for args, kwargs in describe_arguments(decorated):
334+ subparser.add_argument(*args, **kwargs)
335+ subparser.set_defaults(func=decorated)
336+ return decorated
337+ return wrapper
338+
339+ def test_command(self, decorated):
340+ """
341+ Subcommand is a boolean test function, so bool return values should be
342+ converted to a 0/1 exit code.
343+ """
344+ decorated._cli_test_command = True
345+ return decorated
346+
347+ def no_output(self, decorated):
348+ """
349+ Subcommand is not expected to return a value, so don't print a spurious None.
350+ """
351+ decorated._cli_no_output = True
352+ return decorated
353+
354+ def subcommand_builder(self, command_name, description=None):
355+ """
356+ Decorate a function that builds a subcommand. Builders should accept a
357+ single argument (the subparser instance) and return the function to be
358+ run as the command."""
359+ def wrapper(decorated):
360+ subparser = self.subparsers.add_parser(command_name)
361+ func = decorated(subparser)
362+ subparser.set_defaults(func=func)
363+ subparser.description = description or func.__doc__
364+ return wrapper
365+
366+ def run(self):
367+ "Run cli, processing arguments and executing subcommands."
368+ arguments = self.argument_parser.parse_args()
369+ argspec = inspect.getargspec(arguments.func)
370+ vargs = []
371+ for arg in argspec.args:
372+ vargs.append(getattr(arguments, arg))
373+ if argspec.varargs:
374+ vargs.extend(getattr(arguments, argspec.varargs))
375+ output = arguments.func(*vargs)
376+ if getattr(arguments.func, '_cli_test_command', False):
377+ self.exit_code = 0 if output else 1
378+ output = ''
379+ if getattr(arguments.func, '_cli_no_output', False):
380+ output = ''
381+ self.formatter.format_output(output, arguments.format)
382+ if charmhelpers.core.unitdata._KV:
383+ charmhelpers.core.unitdata._KV.flush()
384+
385+
386+cmdline = CommandLine()
387+
388+
389+def describe_arguments(func):
390+ """
391+ Analyze a function's signature and return a data structure suitable for
392+ passing in as arguments to an argparse parser's add_argument() method."""
393+
394+ argspec = inspect.getargspec(func)
395+ # we should probably raise an exception somewhere if func includes **kwargs
396+ if argspec.defaults:
397+ positional_args = argspec.args[:-len(argspec.defaults)]
398+ keyword_names = argspec.args[-len(argspec.defaults):]
399+ for arg, default in zip(keyword_names, argspec.defaults):
400+ yield ('--{}'.format(arg),), {'default': default}
401+ else:
402+ positional_args = argspec.args
403+
404+ for arg in positional_args:
405+ yield (arg,), {}
406+ if argspec.varargs:
407+ yield (argspec.varargs,), {'nargs': '*'}
408
409=== added file 'charmhelpers.new/cli/benchmark.py'
410--- charmhelpers.new/cli/benchmark.py 1970-01-01 00:00:00 +0000
411+++ charmhelpers.new/cli/benchmark.py 2016-01-30 12:38:43 +0000
412@@ -0,0 +1,36 @@
413+# Copyright 2014-2015 Canonical Limited.
414+#
415+# This file is part of charm-helpers.
416+#
417+# charm-helpers is free software: you can redistribute it and/or modify
418+# it under the terms of the GNU Lesser General Public License version 3 as
419+# published by the Free Software Foundation.
420+#
421+# charm-helpers is distributed in the hope that it will be useful,
422+# but WITHOUT ANY WARRANTY; without even the implied warranty of
423+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
424+# GNU Lesser General Public License for more details.
425+#
426+# You should have received a copy of the GNU Lesser General Public License
427+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
428+
429+from . import cmdline
430+from charmhelpers.contrib.benchmark import Benchmark
431+
432+
433+@cmdline.subcommand(command_name='benchmark-start')
434+def start():
435+ Benchmark.start()
436+
437+
438+@cmdline.subcommand(command_name='benchmark-finish')
439+def finish():
440+ Benchmark.finish()
441+
442+
443+@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
444+def service(subparser):
445+ subparser.add_argument("value", help="The composite score.")
446+ subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
447+ subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
448+ return Benchmark.set_composite_score
449
450=== added file 'charmhelpers.new/cli/commands.py'
451--- charmhelpers.new/cli/commands.py 1970-01-01 00:00:00 +0000
452+++ charmhelpers.new/cli/commands.py 2016-01-30 12:38:43 +0000
453@@ -0,0 +1,32 @@
454+# Copyright 2014-2015 Canonical Limited.
455+#
456+# This file is part of charm-helpers.
457+#
458+# charm-helpers is free software: you can redistribute it and/or modify
459+# it under the terms of the GNU Lesser General Public License version 3 as
460+# published by the Free Software Foundation.
461+#
462+# charm-helpers is distributed in the hope that it will be useful,
463+# but WITHOUT ANY WARRANTY; without even the implied warranty of
464+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
465+# GNU Lesser General Public License for more details.
466+#
467+# You should have received a copy of the GNU Lesser General Public License
468+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
469+
470+"""
471+This module loads sub-modules into the python runtime so they can be
472+discovered via the inspect module. In order to prevent flake8 from (rightfully)
473+telling us these are unused modules, throw a ' # noqa' at the end of each import
474+so that the warning is suppressed.
475+"""
476+
477+from . import CommandLine # noqa
478+
479+"""
480+Import the sub-modules which have decorated subcommands to register with chlp.
481+"""
482+from . import host # noqa
483+from . import benchmark # noqa
484+from . import unitdata # noqa
485+from . import hookenv # noqa
486
487=== added file 'charmhelpers.new/cli/hookenv.py'
488--- charmhelpers.new/cli/hookenv.py 1970-01-01 00:00:00 +0000
489+++ charmhelpers.new/cli/hookenv.py 2016-01-30 12:38:43 +0000
490@@ -0,0 +1,23 @@
491+# Copyright 2014-2015 Canonical Limited.
492+#
493+# This file is part of charm-helpers.
494+#
495+# charm-helpers is free software: you can redistribute it and/or modify
496+# it under the terms of the GNU Lesser General Public License version 3 as
497+# published by the Free Software Foundation.
498+#
499+# charm-helpers is distributed in the hope that it will be useful,
500+# but WITHOUT ANY WARRANTY; without even the implied warranty of
501+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
502+# GNU Lesser General Public License for more details.
503+#
504+# You should have received a copy of the GNU Lesser General Public License
505+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
506+
507+from . import cmdline
508+from charmhelpers.core import hookenv
509+
510+
511+cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
512+cmdline.subcommand('service-name')(hookenv.service_name)
513+cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
514
515=== added file 'charmhelpers.new/cli/host.py'
516--- charmhelpers.new/cli/host.py 1970-01-01 00:00:00 +0000
517+++ charmhelpers.new/cli/host.py 2016-01-30 12:38:43 +0000
518@@ -0,0 +1,31 @@
519+# Copyright 2014-2015 Canonical Limited.
520+#
521+# This file is part of charm-helpers.
522+#
523+# charm-helpers is free software: you can redistribute it and/or modify
524+# it under the terms of the GNU Lesser General Public License version 3 as
525+# published by the Free Software Foundation.
526+#
527+# charm-helpers is distributed in the hope that it will be useful,
528+# but WITHOUT ANY WARRANTY; without even the implied warranty of
529+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
530+# GNU Lesser General Public License for more details.
531+#
532+# You should have received a copy of the GNU Lesser General Public License
533+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
534+
535+from . import cmdline
536+from charmhelpers.core import host
537+
538+
539+@cmdline.subcommand()
540+def mounts():
541+ "List mounts"
542+ return host.mounts()
543+
544+
545+@cmdline.subcommand_builder('service', description="Control system services")
546+def service(subparser):
547+ subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
548+ subparser.add_argument("service_name", help="Name of the service to control")
549+ return host.service
550
551=== added file 'charmhelpers.new/cli/unitdata.py'
552--- charmhelpers.new/cli/unitdata.py 1970-01-01 00:00:00 +0000
553+++ charmhelpers.new/cli/unitdata.py 2016-01-30 12:38:43 +0000
554@@ -0,0 +1,39 @@
555+# Copyright 2014-2015 Canonical Limited.
556+#
557+# This file is part of charm-helpers.
558+#
559+# charm-helpers is free software: you can redistribute it and/or modify
560+# it under the terms of the GNU Lesser General Public License version 3 as
561+# published by the Free Software Foundation.
562+#
563+# charm-helpers is distributed in the hope that it will be useful,
564+# but WITHOUT ANY WARRANTY; without even the implied warranty of
565+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
566+# GNU Lesser General Public License for more details.
567+#
568+# You should have received a copy of the GNU Lesser General Public License
569+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
570+
571+from . import cmdline
572+from charmhelpers.core import unitdata
573+
574+
575+@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
576+def unitdata_cmd(subparser):
577+ nested = subparser.add_subparsers()
578+ get_cmd = nested.add_parser('get', help='Retrieve data')
579+ get_cmd.add_argument('key', help='Key to retrieve the value of')
580+ get_cmd.set_defaults(action='get', value=None)
581+ set_cmd = nested.add_parser('set', help='Store data')
582+ set_cmd.add_argument('key', help='Key to set')
583+ set_cmd.add_argument('value', help='Value to store')
584+ set_cmd.set_defaults(action='set')
585+
586+ def _unitdata_cmd(action, key, value):
587+ if action == 'get':
588+ return unitdata.kv().get(key)
589+ elif action == 'set':
590+ unitdata.kv().set(key, value)
591+ unitdata.kv().flush()
592+ return ''
593+ return _unitdata_cmd
594
595=== added directory 'charmhelpers.new/contrib'
596=== added directory 'charmhelpers.new/contrib/charmsupport'
597=== added file 'charmhelpers.new/contrib/charmsupport/nrpe.py'
598--- charmhelpers.new/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
599+++ charmhelpers.new/contrib/charmsupport/nrpe.py 2016-01-30 12:38:43 +0000
600@@ -0,0 +1,398 @@
601+# Copyright 2014-2015 Canonical Limited.
602+#
603+# This file is part of charm-helpers.
604+#
605+# charm-helpers is free software: you can redistribute it and/or modify
606+# it under the terms of the GNU Lesser General Public License version 3 as
607+# published by the Free Software Foundation.
608+#
609+# charm-helpers is distributed in the hope that it will be useful,
610+# but WITHOUT ANY WARRANTY; without even the implied warranty of
611+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
612+# GNU Lesser General Public License for more details.
613+#
614+# You should have received a copy of the GNU Lesser General Public License
615+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
616+
617+"""Compatibility with the nrpe-external-master charm"""
618+# Copyright 2012 Canonical Ltd.
619+#
620+# Authors:
621+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
622+
623+import subprocess
624+import pwd
625+import grp
626+import os
627+import glob
628+import shutil
629+import re
630+import shlex
631+import yaml
632+
633+from charmhelpers.core.hookenv import (
634+ config,
635+ local_unit,
636+ log,
637+ relation_ids,
638+ relation_set,
639+ relations_of_type,
640+)
641+
642+from charmhelpers.core.host import service
643+
644+# This module adds compatibility with the nrpe-external-master and plain nrpe
645+# subordinate charms. To use it in your charm:
646+#
647+# 1. Update metadata.yaml
648+#
649+# provides:
650+# (...)
651+# nrpe-external-master:
652+# interface: nrpe-external-master
653+# scope: container
654+#
655+# and/or
656+#
657+# provides:
658+# (...)
659+# local-monitors:
660+# interface: local-monitors
661+# scope: container
662+
663+#
664+# 2. Add the following to config.yaml
665+#
666+# nagios_context:
667+# default: "juju"
668+# type: string
669+# description: |
670+# Used by the nrpe subordinate charms.
671+# A string that will be prepended to instance name to set the host name
672+# in nagios. So for instance the hostname would be something like:
673+# juju-myservice-0
674+# If you're running multiple environments with the same services in them
675+# this allows you to differentiate between them.
676+# nagios_servicegroups:
677+# default: ""
678+# type: string
679+# description: |
680+# A comma-separated list of nagios servicegroups.
681+# If left empty, the nagios_context will be used as the servicegroup
682+#
683+# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
684+#
685+# 4. Update your hooks.py with something like this:
686+#
687+# from charmsupport.nrpe import NRPE
688+# (...)
689+# def update_nrpe_config():
690+# nrpe_compat = NRPE()
691+# nrpe_compat.add_check(
692+# shortname = "myservice",
693+# description = "Check MyService",
694+# check_cmd = "check_http -w 2 -c 10 http://localhost"
695+# )
696+# nrpe_compat.add_check(
697+# "myservice_other",
698+# "Check for widget failures",
699+# check_cmd = "/srv/myapp/scripts/widget_check"
700+# )
701+# nrpe_compat.write()
702+#
703+# def config_changed():
704+# (...)
705+# update_nrpe_config()
706+#
707+# def nrpe_external_master_relation_changed():
708+# update_nrpe_config()
709+#
710+# def local_monitors_relation_changed():
711+# update_nrpe_config()
712+#
713+# 5. ln -s hooks.py nrpe-external-master-relation-changed
714+# ln -s hooks.py local-monitors-relation-changed
715+
716+
717+class CheckException(Exception):
718+ pass
719+
720+
721+class Check(object):
722+ shortname_re = '[A-Za-z0-9-_]+$'
723+ service_template = ("""
724+#---------------------------------------------------
725+# This file is Juju managed
726+#---------------------------------------------------
727+define service {{
728+ use active-service
729+ host_name {nagios_hostname}
730+ service_description {nagios_hostname}[{shortname}] """
731+ """{description}
732+ check_command check_nrpe!{command}
733+ servicegroups {nagios_servicegroup}
734+}}
735+""")
736+
737+ def __init__(self, shortname, description, check_cmd):
738+ super(Check, self).__init__()
739+ # XXX: could be better to calculate this from the service name
740+ if not re.match(self.shortname_re, shortname):
741+ raise CheckException("shortname must match {}".format(
742+ Check.shortname_re))
743+ self.shortname = shortname
744+ self.command = "check_{}".format(shortname)
745+ # Note: a set of invalid characters is defined by the
746+ # Nagios server config
747+ # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
748+ self.description = description
749+ self.check_cmd = self._locate_cmd(check_cmd)
750+
751+ def _get_check_filename(self):
752+ return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
753+
754+ def _get_service_filename(self, hostname):
755+ return os.path.join(NRPE.nagios_exportdir,
756+ 'service__{}_{}.cfg'.format(hostname, self.command))
757+
758+ def _locate_cmd(self, check_cmd):
759+ search_path = (
760+ '/usr/lib/nagios/plugins',
761+ '/usr/local/lib/nagios/plugins',
762+ )
763+ parts = shlex.split(check_cmd)
764+ for path in search_path:
765+ if os.path.exists(os.path.join(path, parts[0])):
766+ command = os.path.join(path, parts[0])
767+ if len(parts) > 1:
768+ command += " " + " ".join(parts[1:])
769+ return command
770+ log('Check command not found: {}'.format(parts[0]))
771+ return ''
772+
773+ def _remove_service_files(self):
774+ if not os.path.exists(NRPE.nagios_exportdir):
775+ return
776+ for f in os.listdir(NRPE.nagios_exportdir):
777+ if f.endswith('_{}.cfg'.format(self.command)):
778+ os.remove(os.path.join(NRPE.nagios_exportdir, f))
779+
780+ def remove(self, hostname):
781+ nrpe_check_file = self._get_check_filename()
782+ if os.path.exists(nrpe_check_file):
783+ os.remove(nrpe_check_file)
784+ self._remove_service_files()
785+
786+ def write(self, nagios_context, hostname, nagios_servicegroups):
787+ nrpe_check_file = self._get_check_filename()
788+ with open(nrpe_check_file, 'w') as nrpe_check_config:
789+ nrpe_check_config.write("# check {}\n".format(self.shortname))
790+ nrpe_check_config.write("command[{}]={}\n".format(
791+ self.command, self.check_cmd))
792+
793+ if not os.path.exists(NRPE.nagios_exportdir):
794+ log('Not writing service config as {} is not accessible'.format(
795+ NRPE.nagios_exportdir))
796+ else:
797+ self.write_service_config(nagios_context, hostname,
798+ nagios_servicegroups)
799+
800+ def write_service_config(self, nagios_context, hostname,
801+ nagios_servicegroups):
802+ self._remove_service_files()
803+
804+ templ_vars = {
805+ 'nagios_hostname': hostname,
806+ 'nagios_servicegroup': nagios_servicegroups,
807+ 'description': self.description,
808+ 'shortname': self.shortname,
809+ 'command': self.command,
810+ }
811+ nrpe_service_text = Check.service_template.format(**templ_vars)
812+ nrpe_service_file = self._get_service_filename(hostname)
813+ with open(nrpe_service_file, 'w') as nrpe_service_config:
814+ nrpe_service_config.write(str(nrpe_service_text))
815+
816+ def run(self):
817+ subprocess.call(self.check_cmd)
818+
819+
820+class NRPE(object):
821+ nagios_logdir = '/var/log/nagios'
822+ nagios_exportdir = '/var/lib/nagios/export'
823+ nrpe_confdir = '/etc/nagios/nrpe.d'
824+
825+ def __init__(self, hostname=None):
826+ super(NRPE, self).__init__()
827+ self.config = config()
828+ self.nagios_context = self.config['nagios_context']
829+ if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
830+ self.nagios_servicegroups = self.config['nagios_servicegroups']
831+ else:
832+ self.nagios_servicegroups = self.nagios_context
833+ self.unit_name = local_unit().replace('/', '-')
834+ if hostname:
835+ self.hostname = hostname
836+ else:
837+ nagios_hostname = get_nagios_hostname()
838+ if nagios_hostname:
839+ self.hostname = nagios_hostname
840+ else:
841+ self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
842+ self.checks = []
843+
844+ def add_check(self, *args, **kwargs):
845+ self.checks.append(Check(*args, **kwargs))
846+
847+ def remove_check(self, *args, **kwargs):
848+ if kwargs.get('shortname') is None:
849+ raise ValueError('shortname of check must be specified')
850+
851+ # Use sensible defaults if they're not specified - these are not
852+ # actually used during removal, but they're required for constructing
853+ # the Check object; check_disk is chosen because it's part of the
854+ # nagios-plugins-basic package.
855+ if kwargs.get('check_cmd') is None:
856+ kwargs['check_cmd'] = 'check_disk'
857+ if kwargs.get('description') is None:
858+ kwargs['description'] = ''
859+
860+ check = Check(*args, **kwargs)
861+ check.remove(self.hostname)
862+
863+ def write(self):
864+ try:
865+ nagios_uid = pwd.getpwnam('nagios').pw_uid
866+ nagios_gid = grp.getgrnam('nagios').gr_gid
867+ except:
868+ log("Nagios user not set up, nrpe checks not updated")
869+ return
870+
871+ if not os.path.exists(NRPE.nagios_logdir):
872+ os.mkdir(NRPE.nagios_logdir)
873+ os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
874+
875+ nrpe_monitors = {}
876+ monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
877+ for nrpecheck in self.checks:
878+ nrpecheck.write(self.nagios_context, self.hostname,
879+ self.nagios_servicegroups)
880+ nrpe_monitors[nrpecheck.shortname] = {
881+ "command": nrpecheck.command,
882+ }
883+
884+ service('restart', 'nagios-nrpe-server')
885+
886+ monitor_ids = relation_ids("local-monitors") + \
887+ relation_ids("nrpe-external-master")
888+ for rid in monitor_ids:
889+ relation_set(relation_id=rid, monitors=yaml.dump(monitors))
890+
891+
892+def get_nagios_hostcontext(relation_name='nrpe-external-master'):
893+ """
894+ Query relation with nrpe subordinate, return the nagios_host_context
895+
896+ :param str relation_name: Name of relation nrpe sub joined to
897+ """
898+ for rel in relations_of_type(relation_name):
899+ if 'nagios_host_context' in rel:
900+ return rel['nagios_host_context']
901+
902+
903+def get_nagios_hostname(relation_name='nrpe-external-master'):
904+ """
905+ Query relation with nrpe subordinate, return the nagios_hostname
906+
907+ :param str relation_name: Name of relation nrpe sub joined to
908+ """
909+ for rel in relations_of_type(relation_name):
910+ if 'nagios_hostname' in rel:
911+ return rel['nagios_hostname']
912+
913+
914+def get_nagios_unit_name(relation_name='nrpe-external-master'):
915+ """
916+ Return the nagios unit name prepended with host_context if needed
917+
918+ :param str relation_name: Name of relation nrpe sub joined to
919+ """
920+ host_context = get_nagios_hostcontext(relation_name)
921+ if host_context:
922+ unit = "%s:%s" % (host_context, local_unit())
923+ else:
924+ unit = local_unit()
925+ return unit
926+
927+
928+def add_init_service_checks(nrpe, services, unit_name):
929+ """
930+ Add checks for each service in list
931+
932+ :param NRPE nrpe: NRPE object to add check to
933+ :param list services: List of services to check
934+ :param str unit_name: Unit name to use in check description
935+ """
936+ for svc in services:
937+ upstart_init = '/etc/init/%s.conf' % svc
938+ sysv_init = '/etc/init.d/%s' % svc
939+ if os.path.exists(upstart_init):
940+ # Don't add a check for these services from neutron-gateway
941+ if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
942+ nrpe.add_check(
943+ shortname=svc,
944+ description='process check {%s}' % unit_name,
945+ check_cmd='check_upstart_job %s' % svc
946+ )
947+ elif os.path.exists(sysv_init):
948+ cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
949+ cron_file = ('*/5 * * * * root '
950+ '/usr/local/lib/nagios/plugins/check_exit_status.pl '
951+ '-s /etc/init.d/%s status > '
952+ '/var/lib/nagios/service-check-%s.txt\n' % (svc,
953+ svc)
954+ )
955+ f = open(cronpath, 'w')
956+ f.write(cron_file)
957+ f.close()
958+ nrpe.add_check(
959+ shortname=svc,
960+ description='process check {%s}' % unit_name,
961+ check_cmd='check_status_file.py -f '
962+ '/var/lib/nagios/service-check-%s.txt' % svc,
963+ )
964+
965+
966+def copy_nrpe_checks():
967+ """
968+ Copy the nrpe checks into place
969+
970+ """
971+ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
972+ nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
973+ 'charmhelpers', 'contrib', 'openstack',
974+ 'files')
975+
976+ if not os.path.exists(NAGIOS_PLUGINS):
977+ os.makedirs(NAGIOS_PLUGINS)
978+ for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
979+ if os.path.isfile(fname):
980+ shutil.copy2(fname,
981+ os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
982+
983+
984+def add_haproxy_checks(nrpe, unit_name):
985+ """
986+ Add checks for each service in list
987+
988+ :param NRPE nrpe: NRPE object to add check to
989+ :param str unit_name: Unit name to use in check description
990+ """
991+ nrpe.add_check(
992+ shortname='haproxy_servers',
993+ description='Check HAProxy {%s}' % unit_name,
994+ check_cmd='check_haproxy.sh')
995+ nrpe.add_check(
996+ shortname='haproxy_queue',
997+ description='Check HAProxy queue depth {%s}' % unit_name,
998+ check_cmd='check_haproxy_queue_depth.sh')
999
1000=== added directory 'charmhelpers.new/contrib/hahelpers'
1001=== added file 'charmhelpers.new/contrib/hahelpers/cluster.py'
1002--- charmhelpers.new/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
1003+++ charmhelpers.new/contrib/hahelpers/cluster.py 2016-01-30 12:38:43 +0000
1004@@ -0,0 +1,316 @@
1005+# Copyright 2014-2015 Canonical Limited.
1006+#
1007+# This file is part of charm-helpers.
1008+#
1009+# charm-helpers is free software: you can redistribute it and/or modify
1010+# it under the terms of the GNU Lesser General Public License version 3 as
1011+# published by the Free Software Foundation.
1012+#
1013+# charm-helpers is distributed in the hope that it will be useful,
1014+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1015+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1016+# GNU Lesser General Public License for more details.
1017+#
1018+# You should have received a copy of the GNU Lesser General Public License
1019+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1020+
1021+#
1022+# Copyright 2012 Canonical Ltd.
1023+#
1024+# Authors:
1025+# James Page <james.page@ubuntu.com>
1026+# Adam Gandelman <adamg@ubuntu.com>
1027+#
1028+
1029+"""
1030+Helpers for clustering and determining "cluster leadership" and other
1031+clustering-related helpers.
1032+"""
1033+
1034+import subprocess
1035+import os
1036+
1037+from socket import gethostname as get_unit_hostname
1038+
1039+import six
1040+
1041+from charmhelpers.core.hookenv import (
1042+ log,
1043+ relation_ids,
1044+ related_units as relation_list,
1045+ relation_get,
1046+ config as config_get,
1047+ INFO,
1048+ ERROR,
1049+ WARNING,
1050+ unit_get,
1051+ is_leader as juju_is_leader
1052+)
1053+from charmhelpers.core.decorators import (
1054+ retry_on_exception,
1055+)
1056+from charmhelpers.core.strutils import (
1057+ bool_from_string,
1058+)
1059+
1060+DC_RESOURCE_NAME = 'DC'
1061+
1062+
1063+class HAIncompleteConfig(Exception):
1064+ pass
1065+
1066+
1067+class CRMResourceNotFound(Exception):
1068+ pass
1069+
1070+
1071+class CRMDCNotFound(Exception):
1072+ pass
1073+
1074+
1075+def is_elected_leader(resource):
1076+ """
1077+ Returns True if the charm executing this is the elected cluster leader.
1078+
1079+ It relies on two mechanisms to determine leadership:
1080+ 1. If juju is sufficiently new and leadership election is supported,
1081+ the is_leader command will be used.
1082+ 2. If the charm is part of a corosync cluster, call corosync to
1083+ determine leadership.
1084+ 3. If the charm is not part of a corosync cluster, the leader is
1085+ determined as being "the alive unit with the lowest unit numer". In
1086+ other words, the oldest surviving unit.
1087+ """
1088+ try:
1089+ return juju_is_leader()
1090+ except NotImplementedError:
1091+ log('Juju leadership election feature not enabled'
1092+ ', using fallback support',
1093+ level=WARNING)
1094+
1095+ if is_clustered():
1096+ if not is_crm_leader(resource):
1097+ log('Deferring action to CRM leader.', level=INFO)
1098+ return False
1099+ else:
1100+ peers = peer_units()
1101+ if peers and not oldest_peer(peers):
1102+ log('Deferring action to oldest service unit.', level=INFO)
1103+ return False
1104+ return True
1105+
1106+
1107+def is_clustered():
1108+ for r_id in (relation_ids('ha') or []):
1109+ for unit in (relation_list(r_id) or []):
1110+ clustered = relation_get('clustered',
1111+ rid=r_id,
1112+ unit=unit)
1113+ if clustered:
1114+ return True
1115+ return False
1116+
1117+
1118+def is_crm_dc():
1119+ """
1120+ Determine leadership by querying the pacemaker Designated Controller
1121+ """
1122+ cmd = ['crm', 'status']
1123+ try:
1124+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1125+ if not isinstance(status, six.text_type):
1126+ status = six.text_type(status, "utf-8")
1127+ except subprocess.CalledProcessError as ex:
1128+ raise CRMDCNotFound(str(ex))
1129+
1130+ current_dc = ''
1131+ for line in status.split('\n'):
1132+ if line.startswith('Current DC'):
1133+ # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
1134+ current_dc = line.split(':')[1].split()[0]
1135+ if current_dc == get_unit_hostname():
1136+ return True
1137+ elif current_dc == 'NONE':
1138+ raise CRMDCNotFound('Current DC: NONE')
1139+
1140+ return False
1141+
1142+
1143+@retry_on_exception(5, base_delay=2,
1144+ exc_type=(CRMResourceNotFound, CRMDCNotFound))
1145+def is_crm_leader(resource, retry=False):
1146+ """
1147+ Returns True if the charm calling this is the elected corosync leader,
1148+ as returned by calling the external "crm" command.
1149+
1150+ We allow this operation to be retried to avoid the possibility of getting a
1151+ false negative. See LP #1396246 for more info.
1152+ """
1153+ if resource == DC_RESOURCE_NAME:
1154+ return is_crm_dc()
1155+ cmd = ['crm', 'resource', 'show', resource]
1156+ try:
1157+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
1158+ if not isinstance(status, six.text_type):
1159+ status = six.text_type(status, "utf-8")
1160+ except subprocess.CalledProcessError:
1161+ status = None
1162+
1163+ if status and get_unit_hostname() in status:
1164+ return True
1165+
1166+ if status and "resource %s is NOT running" % (resource) in status:
1167+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
1168+
1169+ return False
1170+
1171+
1172+def is_leader(resource):
1173+ log("is_leader is deprecated. Please consider using is_crm_leader "
1174+ "instead.", level=WARNING)
1175+ return is_crm_leader(resource)
1176+
1177+
1178+def peer_units(peer_relation="cluster"):
1179+ peers = []
1180+ for r_id in (relation_ids(peer_relation) or []):
1181+ for unit in (relation_list(r_id) or []):
1182+ peers.append(unit)
1183+ return peers
1184+
1185+
1186+def peer_ips(peer_relation='cluster', addr_key='private-address'):
1187+ '''Return a dict of peers and their private-address'''
1188+ peers = {}
1189+ for r_id in relation_ids(peer_relation):
1190+ for unit in relation_list(r_id):
1191+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
1192+ return peers
1193+
1194+
1195+def oldest_peer(peers):
1196+ """Determines who the oldest peer is by comparing unit numbers."""
1197+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
1198+ for peer in peers:
1199+ remote_unit_no = int(peer.split('/')[1])
1200+ if remote_unit_no < local_unit_no:
1201+ return False
1202+ return True
1203+
1204+
1205+def eligible_leader(resource):
1206+ log("eligible_leader is deprecated. Please consider using "
1207+ "is_elected_leader instead.", level=WARNING)
1208+ return is_elected_leader(resource)
1209+
1210+
1211+def https():
1212+ '''
1213+ Determines whether enough data has been provided in configuration
1214+ or relation data to configure HTTPS
1215+ .
1216+ returns: boolean
1217+ '''
1218+ use_https = config_get('use-https')
1219+ if use_https and bool_from_string(use_https):
1220+ return True
1221+ if config_get('ssl_cert') and config_get('ssl_key'):
1222+ return True
1223+ for r_id in relation_ids('identity-service'):
1224+ for unit in relation_list(r_id):
1225+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
1226+ rel_state = [
1227+ relation_get('https_keystone', rid=r_id, unit=unit),
1228+ relation_get('ca_cert', rid=r_id, unit=unit),
1229+ ]
1230+ # NOTE: works around (LP: #1203241)
1231+ if (None not in rel_state) and ('' not in rel_state):
1232+ return True
1233+ return False
1234+
1235+
1236+def determine_api_port(public_port, singlenode_mode=False):
1237+ '''
1238+ Determine correct API server listening port based on
1239+ existence of HTTPS reverse proxy and/or haproxy.
1240+
1241+ public_port: int: standard public port for given service
1242+
1243+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1244+
1245+ returns: int: the correct listening port for the API service
1246+ '''
1247+ i = 0
1248+ if singlenode_mode:
1249+ i += 1
1250+ elif len(peer_units()) > 0 or is_clustered():
1251+ i += 1
1252+ if https():
1253+ i += 1
1254+ return public_port - (i * 10)
1255+
1256+
1257+def determine_apache_port(public_port, singlenode_mode=False):
1258+ '''
1259+ Description: Determine correct apache listening port based on public IP +
1260+ state of the cluster.
1261+
1262+ public_port: int: standard public port for given service
1263+
1264+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
1265+
1266+ returns: int: the correct listening port for the HAProxy service
1267+ '''
1268+ i = 0
1269+ if singlenode_mode:
1270+ i += 1
1271+ elif len(peer_units()) > 0 or is_clustered():
1272+ i += 1
1273+ return public_port - (i * 10)
1274+
1275+
1276+def get_hacluster_config(exclude_keys=None):
1277+ '''
1278+ Obtains all relevant configuration from charm configuration required
1279+ for initiating a relation to hacluster:
1280+
1281+ ha-bindiface, ha-mcastport, vip
1282+
1283+ param: exclude_keys: list of setting key(s) to be excluded.
1284+ returns: dict: A dict containing settings keyed by setting name.
1285+ raises: HAIncompleteConfig if settings are missing.
1286+ '''
1287+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
1288+ conf = {}
1289+ for setting in settings:
1290+ if exclude_keys and setting in exclude_keys:
1291+ continue
1292+
1293+ conf[setting] = config_get(setting)
1294+ missing = []
1295+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
1296+ if missing:
1297+ log('Insufficient config data to configure hacluster.', level=ERROR)
1298+ raise HAIncompleteConfig
1299+ return conf
1300+
1301+
1302+def canonical_url(configs, vip_setting='vip'):
1303+ '''
1304+ Returns the correct HTTP URL to this host given the state of HTTPS
1305+ configuration and hacluster.
1306+
1307+ :configs : OSTemplateRenderer: A config tempating object to inspect for
1308+ a complete https context.
1309+
1310+ :vip_setting: str: Setting in charm config that specifies
1311+ VIP address.
1312+ '''
1313+ scheme = 'http'
1314+ if 'https' in configs.complete_contexts():
1315+ scheme = 'https'
1316+ if is_clustered():
1317+ addr = config_get(vip_setting)
1318+ else:
1319+ addr = unit_get('private-address')
1320+ return '%s://%s' % (scheme, addr)
1321
1322=== added directory 'charmhelpers.new/contrib/network'
1323=== added file 'charmhelpers.new/contrib/network/ip.py'
1324--- charmhelpers.new/contrib/network/ip.py 1970-01-01 00:00:00 +0000
1325+++ charmhelpers.new/contrib/network/ip.py 2016-01-30 12:38:43 +0000
1326@@ -0,0 +1,458 @@
1327+# Copyright 2014-2015 Canonical Limited.
1328+#
1329+# This file is part of charm-helpers.
1330+#
1331+# charm-helpers is free software: you can redistribute it and/or modify
1332+# it under the terms of the GNU Lesser General Public License version 3 as
1333+# published by the Free Software Foundation.
1334+#
1335+# charm-helpers is distributed in the hope that it will be useful,
1336+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1337+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1338+# GNU Lesser General Public License for more details.
1339+#
1340+# You should have received a copy of the GNU Lesser General Public License
1341+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1342+
1343+import glob
1344+import re
1345+import subprocess
1346+import six
1347+import socket
1348+
1349+from functools import partial
1350+
1351+from charmhelpers.core.hookenv import unit_get
1352+from charmhelpers.fetch import apt_install, apt_update
1353+from charmhelpers.core.hookenv import (
1354+ log,
1355+ WARNING,
1356+)
1357+
1358+try:
1359+ import netifaces
1360+except ImportError:
1361+ apt_update(fatal=True)
1362+ apt_install('python-netifaces', fatal=True)
1363+ import netifaces
1364+
1365+try:
1366+ import netaddr
1367+except ImportError:
1368+ apt_update(fatal=True)
1369+ apt_install('python-netaddr', fatal=True)
1370+ import netaddr
1371+
1372+
1373+def _validate_cidr(network):
1374+ try:
1375+ netaddr.IPNetwork(network)
1376+ except (netaddr.core.AddrFormatError, ValueError):
1377+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1378+ network)
1379+
1380+
1381+def no_ip_found_error_out(network):
1382+ errmsg = ("No IP address found in network(s): %s" % network)
1383+ raise ValueError(errmsg)
1384+
1385+
1386+def get_address_in_network(network, fallback=None, fatal=False):
1387+ """Get an IPv4 or IPv6 address within the network from the host.
1388+
1389+ :param network (str): CIDR presentation format. For example,
1390+ '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
1391+ :param fallback (str): If no address is found, return fallback.
1392+ :param fatal (boolean): If no address is found, fallback is not
1393+ set and fatal is True then exit(1).
1394+ """
1395+ if network is None:
1396+ if fallback is not None:
1397+ return fallback
1398+
1399+ if fatal:
1400+ no_ip_found_error_out(network)
1401+ else:
1402+ return None
1403+
1404+ networks = network.split() or [network]
1405+ for network in networks:
1406+ _validate_cidr(network)
1407+ network = netaddr.IPNetwork(network)
1408+ for iface in netifaces.interfaces():
1409+ addresses = netifaces.ifaddresses(iface)
1410+ if network.version == 4 and netifaces.AF_INET in addresses:
1411+ addr = addresses[netifaces.AF_INET][0]['addr']
1412+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1413+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1414+ if cidr in network:
1415+ return str(cidr.ip)
1416+
1417+ if network.version == 6 and netifaces.AF_INET6 in addresses:
1418+ for addr in addresses[netifaces.AF_INET6]:
1419+ if not addr['addr'].startswith('fe80'):
1420+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1421+ addr['netmask']))
1422+ if cidr in network:
1423+ return str(cidr.ip)
1424+
1425+ if fallback is not None:
1426+ return fallback
1427+
1428+ if fatal:
1429+ no_ip_found_error_out(network)
1430+
1431+ return None
1432+
1433+
1434+def is_ipv6(address):
1435+ """Determine whether provided address is IPv6 or not."""
1436+ try:
1437+ address = netaddr.IPAddress(address)
1438+ except netaddr.AddrFormatError:
1439+ # probably a hostname - so not an address at all!
1440+ return False
1441+
1442+ return address.version == 6
1443+
1444+
1445+def is_address_in_network(network, address):
1446+ """
1447+ Determine whether the provided address is within a network range.
1448+
1449+ :param network (str): CIDR presentation format. For example,
1450+ '192.168.1.0/24'.
1451+ :param address: An individual IPv4 or IPv6 address without a net
1452+ mask or subnet prefix. For example, '192.168.1.1'.
1453+ :returns boolean: Flag indicating whether address is in network.
1454+ """
1455+ try:
1456+ network = netaddr.IPNetwork(network)
1457+ except (netaddr.core.AddrFormatError, ValueError):
1458+ raise ValueError("Network (%s) is not in CIDR presentation format" %
1459+ network)
1460+
1461+ try:
1462+ address = netaddr.IPAddress(address)
1463+ except (netaddr.core.AddrFormatError, ValueError):
1464+ raise ValueError("Address (%s) is not in correct presentation format" %
1465+ address)
1466+
1467+ if address in network:
1468+ return True
1469+ else:
1470+ return False
1471+
1472+
1473+def _get_for_address(address, key):
1474+ """Retrieve an attribute of or the physical interface that
1475+ the IP address provided could be bound to.
1476+
1477+ :param address (str): An individual IPv4 or IPv6 address without a net
1478+ mask or subnet prefix. For example, '192.168.1.1'.
1479+ :param key: 'iface' for the physical interface name or an attribute
1480+ of the configured interface, for example 'netmask'.
1481+ :returns str: Requested attribute or None if address is not bindable.
1482+ """
1483+ address = netaddr.IPAddress(address)
1484+ for iface in netifaces.interfaces():
1485+ addresses = netifaces.ifaddresses(iface)
1486+ if address.version == 4 and netifaces.AF_INET in addresses:
1487+ addr = addresses[netifaces.AF_INET][0]['addr']
1488+ netmask = addresses[netifaces.AF_INET][0]['netmask']
1489+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
1490+ cidr = network.cidr
1491+ if address in cidr:
1492+ if key == 'iface':
1493+ return iface
1494+ else:
1495+ return addresses[netifaces.AF_INET][0][key]
1496+
1497+ if address.version == 6 and netifaces.AF_INET6 in addresses:
1498+ for addr in addresses[netifaces.AF_INET6]:
1499+ if not addr['addr'].startswith('fe80'):
1500+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
1501+ addr['netmask']))
1502+ cidr = network.cidr
1503+ if address in cidr:
1504+ if key == 'iface':
1505+ return iface
1506+ elif key == 'netmask' and cidr:
1507+ return str(cidr).split('/')[1]
1508+ else:
1509+ return addr[key]
1510+
1511+ return None
1512+
1513+
1514+get_iface_for_address = partial(_get_for_address, key='iface')
1515+
1516+
1517+get_netmask_for_address = partial(_get_for_address, key='netmask')
1518+
1519+
1520+def format_ipv6_addr(address):
1521+ """If address is IPv6, wrap it in '[]' otherwise return None.
1522+
1523+ This is required by most configuration files when specifying IPv6
1524+ addresses.
1525+ """
1526+ if is_ipv6(address):
1527+ return "[%s]" % address
1528+
1529+ return None
1530+
1531+
1532+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
1533+ fatal=True, exc_list=None):
1534+ """Return the assigned IP address for a given interface, if any."""
1535+ # Extract nic if passed /dev/ethX
1536+ if '/' in iface:
1537+ iface = iface.split('/')[-1]
1538+
1539+ if not exc_list:
1540+ exc_list = []
1541+
1542+ try:
1543+ inet_num = getattr(netifaces, inet_type)
1544+ except AttributeError:
1545+ raise Exception("Unknown inet type '%s'" % str(inet_type))
1546+
1547+ interfaces = netifaces.interfaces()
1548+ if inc_aliases:
1549+ ifaces = []
1550+ for _iface in interfaces:
1551+ if iface == _iface or _iface.split(':')[0] == iface:
1552+ ifaces.append(_iface)
1553+
1554+ if fatal and not ifaces:
1555+ raise Exception("Invalid interface '%s'" % iface)
1556+
1557+ ifaces.sort()
1558+ else:
1559+ if iface not in interfaces:
1560+ if fatal:
1561+ raise Exception("Interface '%s' not found " % (iface))
1562+ else:
1563+ return []
1564+
1565+ else:
1566+ ifaces = [iface]
1567+
1568+ addresses = []
1569+ for netiface in ifaces:
1570+ net_info = netifaces.ifaddresses(netiface)
1571+ if inet_num in net_info:
1572+ for entry in net_info[inet_num]:
1573+ if 'addr' in entry and entry['addr'] not in exc_list:
1574+ addresses.append(entry['addr'])
1575+
1576+ if fatal and not addresses:
1577+ raise Exception("Interface '%s' doesn't have any %s addresses." %
1578+ (iface, inet_type))
1579+
1580+ return sorted(addresses)
1581+
1582+
1583+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
1584+
1585+
1586+def get_iface_from_addr(addr):
1587+ """Work out on which interface the provided address is configured."""
1588+ for iface in netifaces.interfaces():
1589+ addresses = netifaces.ifaddresses(iface)
1590+ for inet_type in addresses:
1591+ for _addr in addresses[inet_type]:
1592+ _addr = _addr['addr']
1593+ # link local
1594+ ll_key = re.compile("(.+)%.*")
1595+ raw = re.match(ll_key, _addr)
1596+ if raw:
1597+ _addr = raw.group(1)
1598+
1599+ if _addr == addr:
1600+ log("Address '%s' is configured on iface '%s'" %
1601+ (addr, iface))
1602+ return iface
1603+
1604+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
1605+ raise Exception(msg)
1606+
1607+
1608+def sniff_iface(f):
1609+ """Ensure decorated function is called with a value for iface.
1610+
1611+ If no iface provided, inject net iface inferred from unit private address.
1612+ """
1613+ def iface_sniffer(*args, **kwargs):
1614+ if not kwargs.get('iface', None):
1615+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
1616+
1617+ return f(*args, **kwargs)
1618+
1619+ return iface_sniffer
1620+
1621+
1622+@sniff_iface
1623+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
1624+ dynamic_only=True):
1625+ """Get assigned IPv6 address for a given interface.
1626+
1627+ Returns list of addresses found. If no address found, returns empty list.
1628+
1629+ If iface is None, we infer the current primary interface by doing a reverse
1630+ lookup on the unit private-address.
1631+
1632+ We currently only support scope global IPv6 addresses i.e. non-temporary
1633+ addresses. If no global IPv6 address is found, return the first one found
1634+ in the ipv6 address list.
1635+ """
1636+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
1637+ inc_aliases=inc_aliases, fatal=fatal,
1638+ exc_list=exc_list)
1639+
1640+ if addresses:
1641+ global_addrs = []
1642+ for addr in addresses:
1643+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
1644+ m = re.match(key_scope_link_local, addr)
1645+ if m:
1646+ eui_64_mac = m.group(1)
1647+ iface = m.group(2)
1648+ else:
1649+ global_addrs.append(addr)
1650+
1651+ if global_addrs:
1652+ # Make sure any found global addresses are not temporary
1653+ cmd = ['ip', 'addr', 'show', iface]
1654+ out = subprocess.check_output(cmd).decode('UTF-8')
1655+ if dynamic_only:
1656+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
1657+ else:
1658+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
1659+
1660+ addrs = []
1661+ for line in out.split('\n'):
1662+ line = line.strip()
1663+ m = re.match(key, line)
1664+ if m and 'temporary' not in line:
1665+ # Return the first valid address we find
1666+ for addr in global_addrs:
1667+ if m.group(1) == addr:
1668+ if not dynamic_only or \
1669+ m.group(1).endswith(eui_64_mac):
1670+ addrs.append(addr)
1671+
1672+ if addrs:
1673+ return addrs
1674+
1675+ if fatal:
1676+ raise Exception("Interface '%s' does not have a scope global "
1677+ "non-temporary ipv6 address." % iface)
1678+
1679+ return []
1680+
1681+
1682+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
1683+ """Return a list of bridges on the system."""
1684+ b_regex = "%s/*/bridge" % vnic_dir
1685+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
1686+
1687+
1688+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
1689+ """Return a list of nics comprising a given bridge on the system."""
1690+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
1691+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
1692+
1693+
1694+def is_bridge_member(nic):
1695+ """Check if a given nic is a member of a bridge."""
1696+ for bridge in get_bridges():
1697+ if nic in get_bridge_nics(bridge):
1698+ return True
1699+
1700+ return False
1701+
1702+
1703+def is_ip(address):
1704+ """
1705+ Returns True if address is a valid IP address.
1706+ """
1707+ try:
1708+ # Test to see if already an IPv4 address
1709+ socket.inet_aton(address)
1710+ return True
1711+ except socket.error:
1712+ return False
1713+
1714+
1715+def ns_query(address):
1716+ try:
1717+ import dns.resolver
1718+ except ImportError:
1719+ apt_install('python-dnspython')
1720+ import dns.resolver
1721+
1722+ if isinstance(address, dns.name.Name):
1723+ rtype = 'PTR'
1724+ elif isinstance(address, six.string_types):
1725+ rtype = 'A'
1726+ else:
1727+ return None
1728+
1729+ answers = dns.resolver.query(address, rtype)
1730+ if answers:
1731+ return str(answers[0])
1732+ return None
1733+
1734+
1735+def get_host_ip(hostname, fallback=None):
1736+ """
1737+ Resolves the IP for a given hostname, or returns
1738+ the input if it is already an IP.
1739+ """
1740+ if is_ip(hostname):
1741+ return hostname
1742+
1743+ ip_addr = ns_query(hostname)
1744+ if not ip_addr:
1745+ try:
1746+ ip_addr = socket.gethostbyname(hostname)
1747+ except:
1748+ log("Failed to resolve hostname '%s'" % (hostname),
1749+ level=WARNING)
1750+ return fallback
1751+ return ip_addr
1752+
1753+
1754+def get_hostname(address, fqdn=True):
1755+ """
1756+ Resolves hostname for given IP, or returns the input
1757+ if it is already a hostname.
1758+ """
1759+ if is_ip(address):
1760+ try:
1761+ import dns.reversename
1762+ except ImportError:
1763+ apt_install("python-dnspython")
1764+ import dns.reversename
1765+
1766+ rev = dns.reversename.from_address(address)
1767+ result = ns_query(rev)
1768+
1769+ if not result:
1770+ try:
1771+ result = socket.gethostbyaddr(address)[0]
1772+ except:
1773+ return None
1774+ else:
1775+ result = address
1776+
1777+ if fqdn:
1778+ # strip trailing .
1779+ if result.endswith('.'):
1780+ return result[:-1]
1781+ else:
1782+ return result
1783+ else:
1784+ return result.split('.')[0]
1785
1786=== added directory 'charmhelpers.new/contrib/openstack'
1787=== added directory 'charmhelpers.new/contrib/openstack/amulet'
1788=== added file 'charmhelpers.new/contrib/openstack/amulet/deployment.py'
1789--- charmhelpers.new/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
1790+++ charmhelpers.new/contrib/openstack/amulet/deployment.py 2016-01-30 12:38:43 +0000
1791@@ -0,0 +1,302 @@
1792+# Copyright 2014-2015 Canonical Limited.
1793+#
1794+# This file is part of charm-helpers.
1795+#
1796+# charm-helpers is free software: you can redistribute it and/or modify
1797+# it under the terms of the GNU Lesser General Public License version 3 as
1798+# published by the Free Software Foundation.
1799+#
1800+# charm-helpers is distributed in the hope that it will be useful,
1801+# but WITHOUT ANY WARRANTY; without even the implied warranty of
1802+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1803+# GNU Lesser General Public License for more details.
1804+#
1805+# You should have received a copy of the GNU Lesser General Public License
1806+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
1807+
1808+import logging
1809+import re
1810+import sys
1811+import six
1812+from collections import OrderedDict
1813+from charmhelpers.contrib.amulet.deployment import (
1814+ AmuletDeployment
1815+)
1816+
1817+DEBUG = logging.DEBUG
1818+ERROR = logging.ERROR
1819+
1820+
1821+class OpenStackAmuletDeployment(AmuletDeployment):
1822+ """OpenStack amulet deployment.
1823+
1824+ This class inherits from AmuletDeployment and has additional support
1825+ that is specifically for use by OpenStack charms.
1826+ """
1827+
1828+ def __init__(self, series=None, openstack=None, source=None,
1829+ stable=True, log_level=DEBUG):
1830+ """Initialize the deployment environment."""
1831+ super(OpenStackAmuletDeployment, self).__init__(series)
1832+ self.log = self.get_logger(level=log_level)
1833+ self.log.info('OpenStackAmuletDeployment: init')
1834+ self.openstack = openstack
1835+ self.source = source
1836+ self.stable = stable
1837+ # Note(coreycb): this needs to be changed when new next branches come
1838+ # out.
1839+ self.current_next = "trusty"
1840+
1841+ def get_logger(self, name="deployment-logger", level=logging.DEBUG):
1842+ """Get a logger object that will log to stdout."""
1843+ log = logging
1844+ logger = log.getLogger(name)
1845+ fmt = log.Formatter("%(asctime)s %(funcName)s "
1846+ "%(levelname)s: %(message)s")
1847+
1848+ handler = log.StreamHandler(stream=sys.stdout)
1849+ handler.setLevel(level)
1850+ handler.setFormatter(fmt)
1851+
1852+ logger.addHandler(handler)
1853+ logger.setLevel(level)
1854+
1855+ return logger
1856+
1857+ def _determine_branch_locations(self, other_services):
1858+ """Determine the branch locations for the other services.
1859+
1860+ Determine if the local branch being tested is derived from its
1861+ stable or next (dev) branch, and based on this, use the corresonding
1862+ stable or next branches for the other_services."""
1863+
1864+ self.log.info('OpenStackAmuletDeployment: determine branch locations')
1865+
1866+ # Charms outside the lp:~openstack-charmers namespace
1867+ base_charms = ['mysql', 'mongodb', 'nrpe']
1868+
1869+ # Force these charms to current series even when using an older series.
1870+ # ie. Use trusty/nrpe even when series is precise, as the P charm
1871+ # does not possess the necessary external master config and hooks.
1872+ force_series_current = ['nrpe']
1873+
1874+ if self.series in ['precise', 'trusty']:
1875+ base_series = self.series
1876+ else:
1877+ base_series = self.current_next
1878+
1879+ for svc in other_services:
1880+ if svc['name'] in force_series_current:
1881+ base_series = self.current_next
1882+ # If a location has been explicitly set, use it
1883+ if svc.get('location'):
1884+ continue
1885+ if self.stable:
1886+ temp = 'lp:charms/{}/{}'
1887+ svc['location'] = temp.format(base_series,
1888+ svc['name'])
1889+ else:
1890+ if svc['name'] in base_charms:
1891+ temp = 'lp:charms/{}/{}'
1892+ svc['location'] = temp.format(base_series,
1893+ svc['name'])
1894+ else:
1895+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
1896+ svc['location'] = temp.format(self.current_next,
1897+ svc['name'])
1898+
1899+ return other_services
1900+
1901+ def _add_services(self, this_service, other_services):
1902+ """Add services to the deployment and set openstack-origin/source."""
1903+ self.log.info('OpenStackAmuletDeployment: adding services')
1904+
1905+ other_services = self._determine_branch_locations(other_services)
1906+
1907+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
1908+ other_services)
1909+
1910+ services = other_services
1911+ services.append(this_service)
1912+
1913+ # Charms which should use the source config option
1914+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
1915+ 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
1916+
1917+ # Charms which can not use openstack-origin, ie. many subordinates
1918+ no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
1919+ 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
1920+ 'cinder-backup']
1921+
1922+ if self.openstack:
1923+ for svc in services:
1924+ if svc['name'] not in use_source + no_origin:
1925+ config = {'openstack-origin': self.openstack}
1926+ self.d.configure(svc['name'], config)
1927+
1928+ if self.source:
1929+ for svc in services:
1930+ if svc['name'] in use_source and svc['name'] not in no_origin:
1931+ config = {'source': self.source}
1932+ self.d.configure(svc['name'], config)
1933+
1934+ def _configure_services(self, configs):
1935+ """Configure all of the services."""
1936+ self.log.info('OpenStackAmuletDeployment: configure services')
1937+ for service, config in six.iteritems(configs):
1938+ self.d.configure(service, config)
1939+
1940+ def _auto_wait_for_status(self, message=None, exclude_services=None,
1941+ include_only=None, timeout=1800):
1942+ """Wait for all units to have a specific extended status, except
1943+ for any defined as excluded. Unless specified via message, any
1944+ status containing any case of 'ready' will be considered a match.
1945+
1946+ Examples of message usage:
1947+
1948+ Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
1949+ message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
1950+
1951+ Wait for all units to reach this status (exact match):
1952+ message = re.compile('^Unit is ready and clustered$')
1953+
1954+ Wait for all units to reach any one of these (exact match):
1955+ message = re.compile('Unit is ready|OK|Ready')
1956+
1957+ Wait for at least one unit to reach this status (exact match):
1958+ message = {'ready'}
1959+
1960+ See Amulet's sentry.wait_for_messages() for message usage detail.
1961+ https://github.com/juju/amulet/blob/master/amulet/sentry.py
1962+
1963+ :param message: Expected status match
1964+ :param exclude_services: List of juju service names to ignore,
1965+ not to be used in conjuction with include_only.
1966+ :param include_only: List of juju service names to exclusively check,
1967+ not to be used in conjuction with exclude_services.
1968+ :param timeout: Maximum time in seconds to wait for status match
1969+ :returns: None. Raises if timeout is hit.
1970+ """
1971+ self.log.info('Waiting for extended status on units...')
1972+
1973+ all_services = self.d.services.keys()
1974+
1975+ if exclude_services and include_only:
1976+ raise ValueError('exclude_services can not be used '
1977+ 'with include_only')
1978+
1979+ if message:
1980+ if isinstance(message, re._pattern_type):
1981+ match = message.pattern
1982+ else:
1983+ match = message
1984+
1985+ self.log.debug('Custom extended status wait match: '
1986+ '{}'.format(match))
1987+ else:
1988+ self.log.debug('Default extended status wait match: contains '
1989+ 'READY (case-insensitive)')
1990+ message = re.compile('.*ready.*', re.IGNORECASE)
1991+
1992+ if exclude_services:
1993+ self.log.debug('Excluding services from extended status match: '
1994+ '{}'.format(exclude_services))
1995+ else:
1996+ exclude_services = []
1997+
1998+ if include_only:
1999+ services = include_only
2000+ else:
2001+ services = list(set(all_services) - set(exclude_services))
2002+
2003+ self.log.debug('Waiting up to {}s for extended status on services: '
2004+ '{}'.format(timeout, services))
2005+ service_messages = {service: message for service in services}
2006+ self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
2007+ self.log.info('OK')
2008+
2009+ def _get_openstack_release(self):
2010+ """Get openstack release.
2011+
2012+ Return an integer representing the enum value of the openstack
2013+ release.
2014+ """
2015+ # Must be ordered by OpenStack release (not by Ubuntu release):
2016+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
2017+ self.precise_havana, self.precise_icehouse,
2018+ self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
2019+ self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
2020+ self.wily_liberty, self.trusty_mitaka,
2021+ self.xenial_mitaka) = range(14)
2022+
2023+ releases = {
2024+ ('precise', None): self.precise_essex,
2025+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
2026+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
2027+ ('precise', 'cloud:precise-havana'): self.precise_havana,
2028+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
2029+ ('trusty', None): self.trusty_icehouse,
2030+ ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
2031+ ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
2032+ ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
2033+ ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
2034+ ('utopic', None): self.utopic_juno,
2035+ ('vivid', None): self.vivid_kilo,
2036+ ('wily', None): self.wily_liberty,
2037+ ('xenial', None): self.xenial_mitaka}
2038+ return releases[(self.series, self.openstack)]
2039+
2040+ def _get_openstack_release_string(self):
2041+ """Get openstack release string.
2042+
2043+ Return a string representing the openstack release.
2044+ """
2045+ releases = OrderedDict([
2046+ ('precise', 'essex'),
2047+ ('quantal', 'folsom'),
2048+ ('raring', 'grizzly'),
2049+ ('saucy', 'havana'),
2050+ ('trusty', 'icehouse'),
2051+ ('utopic', 'juno'),
2052+ ('vivid', 'kilo'),
2053+ ('wily', 'liberty'),
2054+ ('xenial', 'mitaka'),
2055+ ])
2056+ if self.openstack:
2057+ os_origin = self.openstack.split(':')[1]
2058+ return os_origin.split('%s-' % self.series)[1].split('/')[0]
2059+ else:
2060+ return releases[self.series]
2061+
2062+ def get_ceph_expected_pools(self, radosgw=False):
2063+ """Return a list of expected ceph pools in a ceph + cinder + glance
2064+ test scenario, based on OpenStack release and whether ceph radosgw
2065+ is flagged as present or not."""
2066+
2067+ if self._get_openstack_release() >= self.trusty_kilo:
2068+ # Kilo or later
2069+ pools = [
2070+ 'rbd',
2071+ 'cinder',
2072+ 'glance'
2073+ ]
2074+ else:
2075+ # Juno or earlier
2076+ pools = [
2077+ 'data',
2078+ 'metadata',
2079+ 'rbd',
2080+ 'cinder',
2081+ 'glance'
2082+ ]
2083+
2084+ if radosgw:
2085+ pools.extend([
2086+ '.rgw.root',
2087+ '.rgw.control',
2088+ '.rgw',
2089+ '.rgw.gc',
2090+ '.users.uid'
2091+ ])
2092+
2093+ return pools
2094
2095=== added file 'charmhelpers.new/contrib/openstack/amulet/utils.py'
2096--- charmhelpers.new/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
2097+++ charmhelpers.new/contrib/openstack/amulet/utils.py 2016-01-30 12:38:43 +0000
2098@@ -0,0 +1,985 @@
2099+# Copyright 2014-2015 Canonical Limited.
2100+#
2101+# This file is part of charm-helpers.
2102+#
2103+# charm-helpers is free software: you can redistribute it and/or modify
2104+# it under the terms of the GNU Lesser General Public License version 3 as
2105+# published by the Free Software Foundation.
2106+#
2107+# charm-helpers is distributed in the hope that it will be useful,
2108+# but WITHOUT ANY WARRANTY; without even the implied warranty of
2109+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2110+# GNU Lesser General Public License for more details.
2111+#
2112+# You should have received a copy of the GNU Lesser General Public License
2113+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
2114+
2115+import amulet
2116+import json
2117+import logging
2118+import os
2119+import re
2120+import six
2121+import time
2122+import urllib
2123+
2124+import cinderclient.v1.client as cinder_client
2125+import glanceclient.v1.client as glance_client
2126+import heatclient.v1.client as heat_client
2127+import keystoneclient.v2_0 as keystone_client
2128+import novaclient.v1_1.client as nova_client
2129+import pika
2130+import swiftclient
2131+
2132+from charmhelpers.contrib.amulet.utils import (
2133+ AmuletUtils
2134+)
2135+
2136+DEBUG = logging.DEBUG
2137+ERROR = logging.ERROR
2138+
2139+
2140+class OpenStackAmuletUtils(AmuletUtils):
2141+ """OpenStack amulet utilities.
2142+
2143+ This class inherits from AmuletUtils and has additional support
2144+ that is specifically for use by OpenStack charm tests.
2145+ """
2146+
2147+ def __init__(self, log_level=ERROR):
2148+ """Initialize the deployment environment."""
2149+ super(OpenStackAmuletUtils, self).__init__(log_level)
2150+
2151+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
2152+ public_port, expected):
2153+ """Validate endpoint data.
2154+
2155+ Validate actual endpoint data vs expected endpoint data. The ports
2156+ are used to find the matching endpoint.
2157+ """
2158+ self.log.debug('Validating endpoint data...')
2159+ self.log.debug('actual: {}'.format(repr(endpoints)))
2160+ found = False
2161+ for ep in endpoints:
2162+ self.log.debug('endpoint: {}'.format(repr(ep)))
2163+ if (admin_port in ep.adminurl and
2164+ internal_port in ep.internalurl and
2165+ public_port in ep.publicurl):
2166+ found = True
2167+ actual = {'id': ep.id,
2168+ 'region': ep.region,
2169+ 'adminurl': ep.adminurl,
2170+ 'internalurl': ep.internalurl,
2171+ 'publicurl': ep.publicurl,
2172+ 'service_id': ep.service_id}
2173+ ret = self._validate_dict_data(expected, actual)
2174+ if ret:
2175+ return 'unexpected endpoint data - {}'.format(ret)
2176+
2177+ if not found:
2178+ return 'endpoint not found'
2179+
2180+ def validate_svc_catalog_endpoint_data(self, expected, actual):
2181+ """Validate service catalog endpoint data.
2182+
2183+ Validate a list of actual service catalog endpoints vs a list of
2184+ expected service catalog endpoints.
2185+ """
2186+ self.log.debug('Validating service catalog endpoint data...')
2187+ self.log.debug('actual: {}'.format(repr(actual)))
2188+ for k, v in six.iteritems(expected):
2189+ if k in actual:
2190+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
2191+ if ret:
2192+ return self.endpoint_error(k, ret)
2193+ else:
2194+ return "endpoint {} does not exist".format(k)
2195+ return ret
2196+
2197+ def validate_tenant_data(self, expected, actual):
2198+ """Validate tenant data.
2199+
2200+ Validate a list of actual tenant data vs list of expected tenant
2201+ data.
2202+ """
2203+ self.log.debug('Validating tenant data...')
2204+ self.log.debug('actual: {}'.format(repr(actual)))
2205+ for e in expected:
2206+ found = False
2207+ for act in actual:
2208+ a = {'enabled': act.enabled, 'description': act.description,
2209+ 'name': act.name, 'id': act.id}
2210+ if e['name'] == a['name']:
2211+ found = True
2212+ ret = self._validate_dict_data(e, a)
2213+ if ret:
2214+ return "unexpected tenant data - {}".format(ret)
2215+ if not found:
2216+ return "tenant {} does not exist".format(e['name'])
2217+ return ret
2218+
2219+ def validate_role_data(self, expected, actual):
2220+ """Validate role data.
2221+
2222+ Validate a list of actual role data vs a list of expected role
2223+ data.
2224+ """
2225+ self.log.debug('Validating role data...')
2226+ self.log.debug('actual: {}'.format(repr(actual)))
2227+ for e in expected:
2228+ found = False
2229+ for act in actual:
2230+ a = {'name': act.name, 'id': act.id}
2231+ if e['name'] == a['name']:
2232+ found = True
2233+ ret = self._validate_dict_data(e, a)
2234+ if ret:
2235+ return "unexpected role data - {}".format(ret)
2236+ if not found:
2237+ return "role {} does not exist".format(e['name'])
2238+ return ret
2239+
2240+ def validate_user_data(self, expected, actual):
2241+ """Validate user data.
2242+
2243+ Validate a list of actual user data vs a list of expected user
2244+ data.
2245+ """
2246+ self.log.debug('Validating user data...')
2247+ self.log.debug('actual: {}'.format(repr(actual)))
2248+ for e in expected:
2249+ found = False
2250+ for act in actual:
2251+ a = {'enabled': act.enabled, 'name': act.name,
2252+ 'email': act.email, 'tenantId': act.tenantId,
2253+ 'id': act.id}
2254+ if e['name'] == a['name']:
2255+ found = True
2256+ ret = self._validate_dict_data(e, a)
2257+ if ret:
2258+ return "unexpected user data - {}".format(ret)
2259+ if not found:
2260+ return "user {} does not exist".format(e['name'])
2261+ return ret
2262+
2263+ def validate_flavor_data(self, expected, actual):
2264+ """Validate flavor data.
2265+
2266+ Validate a list of actual flavors vs a list of expected flavors.
2267+ """
2268+ self.log.debug('Validating flavor data...')
2269+ self.log.debug('actual: {}'.format(repr(actual)))
2270+ act = [a.name for a in actual]
2271+ return self._validate_list_data(expected, act)
2272+
2273+ def tenant_exists(self, keystone, tenant):
2274+ """Return True if tenant exists."""
2275+ self.log.debug('Checking if tenant exists ({})...'.format(tenant))
2276+ return tenant in [t.name for t in keystone.tenants.list()]
2277+
2278+ def authenticate_cinder_admin(self, keystone_sentry, username,
2279+ password, tenant):
2280+ """Authenticates admin user with cinder."""
2281+ # NOTE(beisner): cinder python client doesn't accept tokens.
2282+ service_ip = \
2283+ keystone_sentry.relation('shared-db',
2284+ 'mysql:shared-db')['private-address']
2285+ ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
2286+ return cinder_client.Client(username, password, tenant, ept)
2287+
2288+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
2289+ tenant):
2290+ """Authenticates admin user with the keystone admin endpoint."""
2291+ self.log.debug('Authenticating keystone admin...')
2292+ unit = keystone_sentry
2293+ service_ip = unit.relation('shared-db',
2294+ 'mysql:shared-db')['private-address']
2295+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
2296+ return keystone_client.Client(username=user, password=password,
2297+ tenant_name=tenant, auth_url=ep)
2298+
2299+ def authenticate_keystone_user(self, keystone, user, password, tenant):
2300+ """Authenticates a regular user with the keystone public endpoint."""
2301+ self.log.debug('Authenticating keystone user ({})...'.format(user))
2302+ ep = keystone.service_catalog.url_for(service_type='identity',
2303+ endpoint_type='publicURL')
2304+ return keystone_client.Client(username=user, password=password,
2305+ tenant_name=tenant, auth_url=ep)
2306+
2307+ def authenticate_glance_admin(self, keystone):
2308+ """Authenticates admin user with glance."""
2309+ self.log.debug('Authenticating glance admin...')
2310+ ep = keystone.service_catalog.url_for(service_type='image',
2311+ endpoint_type='adminURL')
2312+ return glance_client.Client(ep, token=keystone.auth_token)
2313+
2314+ def authenticate_heat_admin(self, keystone):
2315+ """Authenticates the admin user with heat."""
2316+ self.log.debug('Authenticating heat admin...')
2317+ ep = keystone.service_catalog.url_for(service_type='orchestration',
2318+ endpoint_type='publicURL')
2319+ return heat_client.Client(endpoint=ep, token=keystone.auth_token)
2320+
2321+ def authenticate_nova_user(self, keystone, user, password, tenant):
2322+ """Authenticates a regular user with nova-api."""
2323+ self.log.debug('Authenticating nova user ({})...'.format(user))
2324+ ep = keystone.service_catalog.url_for(service_type='identity',
2325+ endpoint_type='publicURL')
2326+ return nova_client.Client(username=user, api_key=password,
2327+ project_id=tenant, auth_url=ep)
2328+
2329+ def authenticate_swift_user(self, keystone, user, password, tenant):
2330+ """Authenticates a regular user with swift api."""
2331+ self.log.debug('Authenticating swift user ({})...'.format(user))
2332+ ep = keystone.service_catalog.url_for(service_type='identity',
2333+ endpoint_type='publicURL')
2334+ return swiftclient.Connection(authurl=ep,
2335+ user=user,
2336+ key=password,
2337+ tenant_name=tenant,
2338+ auth_version='2.0')
2339+
2340+ def create_cirros_image(self, glance, image_name):
2341+ """Download the latest cirros image and upload it to glance,
2342+ validate and return a resource pointer.
2343+
2344+ :param glance: pointer to authenticated glance connection
2345+ :param image_name: display name for new image
2346+ :returns: glance image pointer
2347+ """
2348+ self.log.debug('Creating glance cirros image '
2349+ '({})...'.format(image_name))
2350+
2351+ # Download cirros image
2352+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
2353+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
2354+ if http_proxy:
2355+ proxies = {'http': http_proxy}
2356+ opener = urllib.FancyURLopener(proxies)
2357+ else:
2358+ opener = urllib.FancyURLopener()
2359+
2360+ f = opener.open('http://download.cirros-cloud.net/version/released')
2361+ version = f.read().strip()
2362+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
2363+ local_path = os.path.join('tests', cirros_img)
2364+
2365+ if not os.path.exists(local_path):
2366+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
2367+ version, cirros_img)
2368+ opener.retrieve(cirros_url, local_path)
2369+ f.close()
2370+
2371+ # Create glance image
2372+ with open(local_path) as f:
2373+ image = glance.images.create(name=image_name, is_public=True,
2374+ disk_format='qcow2',
2375+ container_format='bare', data=f)
2376+
2377+ # Wait for image to reach active status
2378+ img_id = image.id
2379+ ret = self.resource_reaches_status(glance.images, img_id,
2380+ expected_stat='active',
2381+ msg='Image status wait')
2382+ if not ret:
2383+ msg = 'Glance image failed to reach expected state.'
2384+ amulet.raise_status(amulet.FAIL, msg=msg)
2385+
2386+ # Re-validate new image
2387+ self.log.debug('Validating image attributes...')
2388+ val_img_name = glance.images.get(img_id).name
2389+ val_img_stat = glance.images.get(img_id).status
2390+ val_img_pub = glance.images.get(img_id).is_public
2391+ val_img_cfmt = glance.images.get(img_id).container_format
2392+ val_img_dfmt = glance.images.get(img_id).disk_format
2393+ msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
2394+ 'container fmt:{} disk fmt:{}'.format(
2395+ val_img_name, val_img_pub, img_id,
2396+ val_img_stat, val_img_cfmt, val_img_dfmt))
2397+
2398+ if val_img_name == image_name and val_img_stat == 'active' \
2399+ and val_img_pub is True and val_img_cfmt == 'bare' \
2400+ and val_img_dfmt == 'qcow2':
2401+ self.log.debug(msg_attr)
2402+ else:
2403+ msg = ('Volume validation failed, {}'.format(msg_attr))
2404+ amulet.raise_status(amulet.FAIL, msg=msg)
2405+
2406+ return image
2407+
2408+ def delete_image(self, glance, image):
2409+ """Delete the specified image."""
2410+
2411+ # /!\ DEPRECATION WARNING
2412+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2413+ 'delete_resource instead of delete_image.')
2414+ self.log.debug('Deleting glance image ({})...'.format(image))
2415+ return self.delete_resource(glance.images, image, msg='glance image')
2416+
2417+ def create_instance(self, nova, image_name, instance_name, flavor):
2418+ """Create the specified instance."""
2419+ self.log.debug('Creating instance '
2420+ '({}|{}|{})'.format(instance_name, image_name, flavor))
2421+ image = nova.images.find(name=image_name)
2422+ flavor = nova.flavors.find(name=flavor)
2423+ instance = nova.servers.create(name=instance_name, image=image,
2424+ flavor=flavor)
2425+
2426+ count = 1
2427+ status = instance.status
2428+ while status != 'ACTIVE' and count < 60:
2429+ time.sleep(3)
2430+ instance = nova.servers.get(instance.id)
2431+ status = instance.status
2432+ self.log.debug('instance status: {}'.format(status))
2433+ count += 1
2434+
2435+ if status != 'ACTIVE':
2436+ self.log.error('instance creation timed out')
2437+ return None
2438+
2439+ return instance
2440+
2441+ def delete_instance(self, nova, instance):
2442+ """Delete the specified instance."""
2443+
2444+ # /!\ DEPRECATION WARNING
2445+ self.log.warn('/!\\ DEPRECATION WARNING: use '
2446+ 'delete_resource instead of delete_instance.')
2447+ self.log.debug('Deleting instance ({})...'.format(instance))
2448+ return self.delete_resource(nova.servers, instance,
2449+ msg='nova instance')
2450+
2451+ def create_or_get_keypair(self, nova, keypair_name="testkey"):
2452+ """Create a new keypair, or return pointer if it already exists."""
2453+ try:
2454+ _keypair = nova.keypairs.get(keypair_name)
2455+ self.log.debug('Keypair ({}) already exists, '
2456+ 'using it.'.format(keypair_name))
2457+ return _keypair
2458+ except:
2459+ self.log.debug('Keypair ({}) does not exist, '
2460+ 'creating it.'.format(keypair_name))
2461+
2462+ _keypair = nova.keypairs.create(name=keypair_name)
2463+ return _keypair
2464+
2465+ def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
2466+ img_id=None, src_vol_id=None, snap_id=None):
2467+ """Create cinder volume, optionally from a glance image, OR
2468+ optionally as a clone of an existing volume, OR optionally
2469+ from a snapshot. Wait for the new volume status to reach
2470+ the expected status, validate and return a resource pointer.
2471+
2472+ :param vol_name: cinder volume display name
2473+ :param vol_size: size in gigabytes
2474+ :param img_id: optional glance image id
2475+ :param src_vol_id: optional source volume id to clone
2476+ :param snap_id: optional snapshot id to use
2477+ :returns: cinder volume pointer
2478+ """
2479+ # Handle parameter input and avoid impossible combinations
2480+ if img_id and not src_vol_id and not snap_id:
2481+ # Create volume from image
2482+ self.log.debug('Creating cinder volume from glance image...')
2483+ bootable = 'true'
2484+ elif src_vol_id and not img_id and not snap_id:
2485+ # Clone an existing volume
2486+ self.log.debug('Cloning cinder volume...')
2487+ bootable = cinder.volumes.get(src_vol_id).bootable
2488+ elif snap_id and not src_vol_id and not img_id:
2489+ # Create volume from snapshot
2490+ self.log.debug('Creating cinder volume from snapshot...')
2491+ snap = cinder.volume_snapshots.find(id=snap_id)
2492+ vol_size = snap.size
2493+ snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
2494+ bootable = cinder.volumes.get(snap_vol_id).bootable
2495+ elif not img_id and not src_vol_id and not snap_id:
2496+ # Create volume
2497+ self.log.debug('Creating cinder volume...')
2498+ bootable = 'false'
2499+ else:
2500+ # Impossible combination of parameters
2501+ msg = ('Invalid method use - name:{} size:{} img_id:{} '
2502+ 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
2503+ img_id, src_vol_id,
2504+ snap_id))
2505+ amulet.raise_status(amulet.FAIL, msg=msg)
2506+
2507+ # Create new volume
2508+ try:
2509+ vol_new = cinder.volumes.create(display_name=vol_name,
2510+ imageRef=img_id,
2511+ size=vol_size,
2512+ source_volid=src_vol_id,
2513+ snapshot_id=snap_id)
2514+ vol_id = vol_new.id
2515+ except Exception as e:
2516+ msg = 'Failed to create volume: {}'.format(e)
2517+ amulet.raise_status(amulet.FAIL, msg=msg)
2518+
2519+ # Wait for volume to reach available status
2520+ ret = self.resource_reaches_status(cinder.volumes, vol_id,
2521+ expected_stat="available",
2522+ msg="Volume status wait")
2523+ if not ret:
2524+ msg = 'Cinder volume failed to reach expected state.'
2525+ amulet.raise_status(amulet.FAIL, msg=msg)
2526+
2527+ # Re-validate new volume
2528+ self.log.debug('Validating volume attributes...')
2529+ val_vol_name = cinder.volumes.get(vol_id).display_name
2530+ val_vol_boot = cinder.volumes.get(vol_id).bootable
2531+ val_vol_stat = cinder.volumes.get(vol_id).status
2532+ val_vol_size = cinder.volumes.get(vol_id).size
2533+ msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
2534+ '{} size:{}'.format(val_vol_name, vol_id,
2535+ val_vol_stat, val_vol_boot,
2536+ val_vol_size))
2537+
2538+ if val_vol_boot == bootable and val_vol_stat == 'available' \
2539+ and val_vol_name == vol_name and val_vol_size == vol_size:
2540+ self.log.debug(msg_attr)
2541+ else:
2542+ msg = ('Volume validation failed, {}'.format(msg_attr))
2543+ amulet.raise_status(amulet.FAIL, msg=msg)
2544+
2545+ return vol_new
2546+
2547+ def delete_resource(self, resource, resource_id,
2548+ msg="resource", max_wait=120):
2549+ """Delete one openstack resource, such as one instance, keypair,
2550+ image, volume, stack, etc., and confirm deletion within max wait time.
2551+
2552+ :param resource: pointer to os resource type, ex:glance_client.images
2553+ :param resource_id: unique name or id for the openstack resource
2554+ :param msg: text to identify purpose in logging
2555+ :param max_wait: maximum wait time in seconds
2556+ :returns: True if successful, otherwise False
2557+ """
2558+ self.log.debug('Deleting OpenStack resource '
2559+ '{} ({})'.format(resource_id, msg))
2560+ num_before = len(list(resource.list()))
2561+ resource.delete(resource_id)
2562+
2563+ tries = 0
2564+ num_after = len(list(resource.list()))
2565+ while num_after != (num_before - 1) and tries < (max_wait / 4):
2566+ self.log.debug('{} delete check: '
2567+ '{} [{}:{}] {}'.format(msg, tries,
2568+ num_before,
2569+ num_after,
2570+ resource_id))
2571+ time.sleep(4)
2572+ num_after = len(list(resource.list()))
2573+ tries += 1
2574+
2575+ self.log.debug('{}: expected, actual count = {}, '
2576+ '{}'.format(msg, num_before - 1, num_after))
2577+
2578+ if num_after == (num_before - 1):
2579+ return True
2580+ else:
2581+ self.log.error('{} delete timed out'.format(msg))
2582+ return False
2583+
2584+ def resource_reaches_status(self, resource, resource_id,
2585+ expected_stat='available',
2586+ msg='resource', max_wait=120):
2587+ """Wait for an openstack resources status to reach an
2588+ expected status within a specified time. Useful to confirm that
2589+ nova instances, cinder vols, snapshots, glance images, heat stacks
2590+ and other resources eventually reach the expected status.
2591+
2592+ :param resource: pointer to os resource type, ex: heat_client.stacks
2593+ :param resource_id: unique id for the openstack resource
2594+ :param expected_stat: status to expect resource to reach
2595+ :param msg: text to identify purpose in logging
2596+ :param max_wait: maximum wait time in seconds
2597+ :returns: True if successful, False if status is not reached
2598+ """
2599+
2600+ tries = 0
2601+ resource_stat = resource.get(resource_id).status
2602+ while resource_stat != expected_stat and tries < (max_wait / 4):
2603+ self.log.debug('{} status check: '
2604+ '{} [{}:{}] {}'.format(msg, tries,
2605+ resource_stat,
2606+ expected_stat,
2607+ resource_id))
2608+ time.sleep(4)
2609+ resource_stat = resource.get(resource_id).status
2610+ tries += 1
2611+
2612+ self.log.debug('{}: expected, actual status = {}, '
2613+ '{}'.format(msg, resource_stat, expected_stat))
2614+
2615+ if resource_stat == expected_stat:
2616+ return True
2617+ else:
2618+ self.log.debug('{} never reached expected status: '
2619+ '{}'.format(resource_id, expected_stat))
2620+ return False
2621+
2622+ def get_ceph_osd_id_cmd(self, index):
2623+ """Produce a shell command that will return a ceph-osd id."""
2624+ return ("`initctl list | grep 'ceph-osd ' | "
2625+ "awk 'NR=={} {{ print $2 }}' | "
2626+ "grep -o '[0-9]*'`".format(index + 1))
2627+
2628+ def get_ceph_pools(self, sentry_unit):
2629+ """Return a dict of ceph pools from a single ceph unit, with
2630+ pool name as keys, pool id as vals."""
2631+ pools = {}
2632+ cmd = 'sudo ceph osd lspools'
2633+ output, code = sentry_unit.run(cmd)
2634+ if code != 0:
2635+ msg = ('{} `{}` returned {} '
2636+ '{}'.format(sentry_unit.info['unit_name'],
2637+ cmd, code, output))
2638+ amulet.raise_status(amulet.FAIL, msg=msg)
2639+
2640+ # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
2641+ for pool in str(output).split(','):
2642+ pool_id_name = pool.split(' ')
2643+ if len(pool_id_name) == 2:
2644+ pool_id = pool_id_name[0]
2645+ pool_name = pool_id_name[1]
2646+ pools[pool_name] = int(pool_id)
2647+
2648+ self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
2649+ pools))
2650+ return pools
2651+
2652+ def get_ceph_df(self, sentry_unit):
2653+ """Return dict of ceph df json output, including ceph pool state.
2654+
2655+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2656+ :returns: Dict of ceph df output
2657+ """
2658+ cmd = 'sudo ceph df --format=json'
2659+ output, code = sentry_unit.run(cmd)
2660+ if code != 0:
2661+ msg = ('{} `{}` returned {} '
2662+ '{}'.format(sentry_unit.info['unit_name'],
2663+ cmd, code, output))
2664+ amulet.raise_status(amulet.FAIL, msg=msg)
2665+ return json.loads(output)
2666+
2667+ def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
2668+ """Take a sample of attributes of a ceph pool, returning ceph
2669+ pool name, object count and disk space used for the specified
2670+ pool ID number.
2671+
2672+ :param sentry_unit: Pointer to amulet sentry instance (juju unit)
2673+ :param pool_id: Ceph pool ID
2674+ :returns: List of pool name, object count, kb disk space used
2675+ """
2676+ df = self.get_ceph_df(sentry_unit)
2677+ pool_name = df['pools'][pool_id]['name']
2678+ obj_count = df['pools'][pool_id]['stats']['objects']
2679+ kb_used = df['pools'][pool_id]['stats']['kb_used']
2680+ self.log.debug('Ceph {} pool (ID {}): {} objects, '
2681+ '{} kb used'.format(pool_name, pool_id,
2682+ obj_count, kb_used))
2683+ return pool_name, obj_count, kb_used
2684+
2685+ def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
2686+ """Validate ceph pool samples taken over time, such as pool
2687+ object counts or pool kb used, before adding, after adding, and
2688+ after deleting items which affect those pool attributes. The
2689+ 2nd element is expected to be greater than the 1st; 3rd is expected
2690+ to be less than the 2nd.
2691+
2692+ :param samples: List containing 3 data samples
2693+ :param sample_type: String for logging and usage context
2694+ :returns: None if successful, Failure message otherwise
2695+ """
2696+ original, created, deleted = range(3)
2697+ if samples[created] <= samples[original] or \
2698+ samples[deleted] >= samples[created]:
2699+ return ('Ceph {} samples ({}) '
2700+ 'unexpected.'.format(sample_type, samples))
2701+ else:
2702+ self.log.debug('Ceph {} samples (OK): '
2703+ '{}'.format(sample_type, samples))
2704+ return None
2705+
2706+ # rabbitmq/amqp specific helpers:
2707+
2708+ def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
2709+ """Wait for rmq units extended status to show cluster readiness,
2710+ after an optional initial sleep period. Initial sleep is likely
2711+ necessary to be effective following a config change, as status
2712+ message may not instantly update to non-ready."""
2713+
2714+ if init_sleep:
2715+ time.sleep(init_sleep)
2716+
2717+ message = re.compile('^Unit is ready and clustered$')
2718+ deployment._auto_wait_for_status(message=message,
2719+ timeout=timeout,
2720+ include_only=['rabbitmq-server'])
2721+
2722+ def add_rmq_test_user(self, sentry_units,
2723+ username="testuser1", password="changeme"):
2724+ """Add a test user via the first rmq juju unit, check connection as
2725+ the new user against all sentry units.
2726+
2727+ :param sentry_units: list of sentry unit pointers
2728+ :param username: amqp user name, default to testuser1
2729+ :param password: amqp user password
2730+ :returns: None if successful. Raise on error.
2731+ """
2732+ self.log.debug('Adding rmq user ({})...'.format(username))
2733+
2734+ # Check that user does not already exist
2735+ cmd_user_list = 'rabbitmqctl list_users'
2736+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2737+ if username in output:
2738+ self.log.warning('User ({}) already exists, returning '
2739+ 'gracefully.'.format(username))
2740+ return
2741+
2742+ perms = '".*" ".*" ".*"'
2743+ cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
2744+ 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
2745+
2746+ # Add user via first unit
2747+ for cmd in cmds:
2748+ output, _ = self.run_cmd_unit(sentry_units[0], cmd)
2749+
2750+ # Check connection against the other sentry_units
2751+ self.log.debug('Checking user connect against units...')
2752+ for sentry_unit in sentry_units:
2753+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
2754+ username=username,
2755+ password=password)
2756+ connection.close()
2757+
2758+ def delete_rmq_test_user(self, sentry_units, username="testuser1"):
2759+ """Delete a rabbitmq user via the first rmq juju unit.
2760+
2761+ :param sentry_units: list of sentry unit pointers
2762+ :param username: amqp user name, default to testuser1
2763+ :param password: amqp user password
2764+ :returns: None if successful or no such user.
2765+ """
2766+ self.log.debug('Deleting rmq user ({})...'.format(username))
2767+
2768+ # Check that the user exists
2769+ cmd_user_list = 'rabbitmqctl list_users'
2770+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
2771+
2772+ if username not in output:
2773+ self.log.warning('User ({}) does not exist, returning '
2774+ 'gracefully.'.format(username))
2775+ return
2776+
2777+ # Delete the user
2778+ cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
2779+ output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
2780+
2781+ def get_rmq_cluster_status(self, sentry_unit):
2782+ """Execute rabbitmq cluster status command on a unit and return
2783+ the full output.
2784+
2785+ :param unit: sentry unit
2786+ :returns: String containing console output of cluster status command
2787+ """
2788+ cmd = 'rabbitmqctl cluster_status'
2789+ output, _ = self.run_cmd_unit(sentry_unit, cmd)
2790+ self.log.debug('{} cluster_status:\n{}'.format(
2791+ sentry_unit.info['unit_name'], output))
2792+ return str(output)
2793+
2794+ def get_rmq_cluster_running_nodes(self, sentry_unit):
2795+ """Parse rabbitmqctl cluster_status output string, return list of
2796+ running rabbitmq cluster nodes.
2797+
2798+ :param unit: sentry unit
2799+ :returns: List containing node names of running nodes
2800+ """
2801+ # NOTE(beisner): rabbitmqctl cluster_status output is not
2802+ # json-parsable, do string chop foo, then json.loads that.
2803+ str_stat = self.get_rmq_cluster_status(sentry_unit)
2804+ if 'running_nodes' in str_stat:
2805+ pos_start = str_stat.find("{running_nodes,") + 15
2806+ pos_end = str_stat.find("]},", pos_start) + 1
2807+ str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
2808+ run_nodes = json.loads(str_run_nodes)
2809+ return run_nodes
2810+ else:
2811+ return []
2812+
2813+ def validate_rmq_cluster_running_nodes(self, sentry_units):
2814+ """Check that all rmq unit hostnames are represented in the
2815+ cluster_status output of all units.
2816+
2817+ :param host_names: dict of juju unit names to host names
2818+ :param units: list of sentry unit pointers (all rmq units)
2819+ :returns: None if successful, otherwise return error message
2820+ """
2821+ host_names = self.get_unit_hostnames(sentry_units)
2822+ errors = []
2823+
2824+ # Query every unit for cluster_status running nodes
2825+ for query_unit in sentry_units:
2826+ query_unit_name = query_unit.info['unit_name']
2827+ running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
2828+
2829+ # Confirm that every unit is represented in the queried unit's
2830+ # cluster_status running nodes output.
2831+ for validate_unit in sentry_units:
2832+ val_host_name = host_names[validate_unit.info['unit_name']]
2833+ val_node_name = 'rabbit@{}'.format(val_host_name)
2834+
2835+ if val_node_name not in running_nodes:
2836+ errors.append('Cluster member check failed on {}: {} not '
2837+ 'in {}\n'.format(query_unit_name,
2838+ val_node_name,
2839+ running_nodes))
2840+ if errors:
2841+ return ''.join(errors)
2842+
2843+ def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
2844+ """Check a single juju rmq unit for ssl and port in the config file."""
2845+ host = sentry_unit.info['public-address']
2846+ unit_name = sentry_unit.info['unit_name']
2847+
2848+ conf_file = '/etc/rabbitmq/rabbitmq.config'
2849+ conf_contents = str(self.file_contents_safe(sentry_unit,
2850+ conf_file, max_wait=16))
2851+ # Checks
2852+ conf_ssl = 'ssl' in conf_contents
2853+ conf_port = str(port) in conf_contents
2854+
2855+ # Port explicitly checked in config
2856+ if port and conf_port and conf_ssl:
2857+ self.log.debug('SSL is enabled @{}:{} '
2858+ '({})'.format(host, port, unit_name))
2859+ return True
2860+ elif port and not conf_port and conf_ssl:
2861+ self.log.debug('SSL is enabled @{} but not on port {} '
2862+ '({})'.format(host, port, unit_name))
2863+ return False
2864+ # Port not checked (useful when checking that ssl is disabled)
2865+ elif not port and conf_ssl:
2866+ self.log.debug('SSL is enabled @{}:{} '
2867+ '({})'.format(host, port, unit_name))
2868+ return True
2869+ elif not conf_ssl:
2870+ self.log.debug('SSL not enabled @{}:{} '
2871+ '({})'.format(host, port, unit_name))
2872+ return False
2873+ else:
2874+ msg = ('Unknown condition when checking SSL status @{}:{} '
2875+ '({})'.format(host, port, unit_name))
2876+ amulet.raise_status(amulet.FAIL, msg)
2877+
2878+ def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
2879+ """Check that ssl is enabled on rmq juju sentry units.
2880+
2881+ :param sentry_units: list of all rmq sentry units
2882+ :param port: optional ssl port override to validate
2883+ :returns: None if successful, otherwise return error message
2884+ """
2885+ for sentry_unit in sentry_units:
2886+ if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
2887+ return ('Unexpected condition: ssl is disabled on unit '
2888+ '({})'.format(sentry_unit.info['unit_name']))
2889+ return None
2890+
2891+ def validate_rmq_ssl_disabled_units(self, sentry_units):
2892+ """Check that ssl is enabled on listed rmq juju sentry units.
2893+
2894+ :param sentry_units: list of all rmq sentry units
2895+ :returns: True if successful. Raise on error.
2896+ """
2897+ for sentry_unit in sentry_units:
2898+ if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
2899+ return ('Unexpected condition: ssl is enabled on unit '
2900+ '({})'.format(sentry_unit.info['unit_name']))
2901+ return None
2902+
2903+ def configure_rmq_ssl_on(self, sentry_units, deployment,
2904+ port=None, max_wait=60):
2905+ """Turn ssl charm config option on, with optional non-default
2906+ ssl port specification. Confirm that it is enabled on every
2907+ unit.
2908+
2909+ :param sentry_units: list of sentry units
2910+ :param deployment: amulet deployment object pointer
2911+ :param port: amqp port, use defaults if None
2912+ :param max_wait: maximum time to wait in seconds to confirm
2913+ :returns: None if successful. Raise on error.
2914+ """
2915+ self.log.debug('Setting ssl charm config option: on')
2916+
2917+ # Enable RMQ SSL
2918+ config = {'ssl': 'on'}
2919+ if port:
2920+ config['ssl_port'] = port
2921+
2922+ deployment.d.configure('rabbitmq-server', config)
2923+
2924+ # Wait for unit status
2925+ self.rmq_wait_for_cluster(deployment)
2926+
2927+ # Confirm
2928+ tries = 0
2929+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2930+ while ret and tries < (max_wait / 4):
2931+ time.sleep(4)
2932+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2933+ ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
2934+ tries += 1
2935+
2936+ if ret:
2937+ amulet.raise_status(amulet.FAIL, ret)
2938+
2939+ def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
2940+ """Turn ssl charm config option off, confirm that it is disabled
2941+ on every unit.
2942+
2943+ :param sentry_units: list of sentry units
2944+ :param deployment: amulet deployment object pointer
2945+ :param max_wait: maximum time to wait in seconds to confirm
2946+ :returns: None if successful. Raise on error.
2947+ """
2948+ self.log.debug('Setting ssl charm config option: off')
2949+
2950+ # Disable RMQ SSL
2951+ config = {'ssl': 'off'}
2952+ deployment.d.configure('rabbitmq-server', config)
2953+
2954+ # Wait for unit status
2955+ self.rmq_wait_for_cluster(deployment)
2956+
2957+ # Confirm
2958+ tries = 0
2959+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2960+ while ret and tries < (max_wait / 4):
2961+ time.sleep(4)
2962+ self.log.debug('Attempt {}: {}'.format(tries, ret))
2963+ ret = self.validate_rmq_ssl_disabled_units(sentry_units)
2964+ tries += 1
2965+
2966+ if ret:
2967+ amulet.raise_status(amulet.FAIL, ret)
2968+
2969+ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
2970+ port=None, fatal=True,
2971+ username="testuser1", password="changeme"):
2972+ """Establish and return a pika amqp connection to the rabbitmq service
2973+ running on a rmq juju unit.
2974+
2975+ :param sentry_unit: sentry unit pointer
2976+ :param ssl: boolean, default to False
2977+ :param port: amqp port, use defaults if None
2978+ :param fatal: boolean, default to True (raises on connect error)
2979+ :param username: amqp user name, default to testuser1
2980+ :param password: amqp user password
2981+ :returns: pika amqp connection pointer or None if failed and non-fatal
2982+ """
2983+ host = sentry_unit.info['public-address']
2984+ unit_name = sentry_unit.info['unit_name']
2985+
2986+ # Default port logic if port is not specified
2987+ if ssl and not port:
2988+ port = 5671
2989+ elif not ssl and not port:
2990+ port = 5672
2991+
2992+ self.log.debug('Connecting to amqp on {}:{} ({}) as '
2993+ '{}...'.format(host, port, unit_name, username))
2994+
2995+ try:
2996+ credentials = pika.PlainCredentials(username, password)
2997+ parameters = pika.ConnectionParameters(host=host, port=port,
2998+ credentials=credentials,
2999+ ssl=ssl,
3000+ connection_attempts=3,
3001+ retry_delay=5,
3002+ socket_timeout=1)
3003+ connection = pika.BlockingConnection(parameters)
3004+ assert connection.server_properties['product'] == 'RabbitMQ'
3005+ self.log.debug('Connect OK')
3006+ return connection
3007+ except Exception as e:
3008+ msg = ('amqp connection failed to {}:{} as '
3009+ '{} ({})'.format(host, port, username, str(e)))
3010+ if fatal:
3011+ amulet.raise_status(amulet.FAIL, msg)
3012+ else:
3013+ self.log.warn(msg)
3014+ return None
3015+
3016+ def publish_amqp_message_by_unit(self, sentry_unit, message,
3017+ queue="test", ssl=False,
3018+ username="testuser1",
3019+ password="changeme",
3020+ port=None):
3021+ """Publish an amqp message to a rmq juju unit.
3022+
3023+ :param sentry_unit: sentry unit pointer
3024+ :param message: amqp message string
3025+ :param queue: message queue, default to test
3026+ :param username: amqp user name, default to testuser1
3027+ :param password: amqp user password
3028+ :param ssl: boolean, default to False
3029+ :param port: amqp port, use defaults if None
3030+ :returns: None. Raises exception if publish failed.
3031+ """
3032+ self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
3033+ message))
3034+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
3035+ port=port,
3036+ username=username,
3037+ password=password)
3038+
3039+ # NOTE(beisner): extra debug here re: pika hang potential:
3040+ # https://github.com/pika/pika/issues/297
3041+ # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
3042+ self.log.debug('Defining channel...')
3043+ channel = connection.channel()
3044+ self.log.debug('Declaring queue...')
3045+ channel.queue_declare(queue=queue, auto_delete=False, durable=True)
3046+ self.log.debug('Publishing message...')
3047+ channel.basic_publish(exchange='', routing_key=queue, body=message)
3048+ self.log.debug('Closing channel...')
3049+ channel.close()
3050+ self.log.debug('Closing connection...')
3051+ connection.close()
3052+
3053+ def get_amqp_message_by_unit(self, sentry_unit, queue="test",
3054+ username="testuser1",
3055+ password="changeme",
3056+ ssl=False, port=None):
3057+ """Get an amqp message from a rmq juju unit.
3058+
3059+ :param sentry_unit: sentry unit pointer
3060+ :param queue: message queue, default to test
3061+ :param username: amqp user name, default to testuser1
3062+ :param password: amqp user password
3063+ :param ssl: boolean, default to False
3064+ :param port: amqp port, use defaults if None
3065+ :returns: amqp message body as string. Raise if get fails.
3066+ """
3067+ connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
3068+ port=port,
3069+ username=username,
3070+ password=password)
3071+ channel = connection.channel()
3072+ method_frame, _, body = channel.basic_get(queue)
3073+
3074+ if method_frame:
3075+ self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
3076+ body))
3077+ channel.basic_ack(method_frame.delivery_tag)
3078+ channel.close()
3079+ connection.close()
3080+ return body
3081+ else:
3082+ msg = 'No message retrieved.'
3083+ amulet.raise_status(amulet.FAIL, msg)
3084
3085=== added file 'charmhelpers.new/contrib/openstack/context.py'
3086--- charmhelpers.new/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
3087+++ charmhelpers.new/contrib/openstack/context.py 2016-01-30 12:38:43 +0000
3088@@ -0,0 +1,1477 @@
3089+# Copyright 2014-2015 Canonical Limited.
3090+#
3091+# This file is part of charm-helpers.
3092+#
3093+# charm-helpers is free software: you can redistribute it and/or modify
3094+# it under the terms of the GNU Lesser General Public License version 3 as
3095+# published by the Free Software Foundation.
3096+#
3097+# charm-helpers is distributed in the hope that it will be useful,
3098+# but WITHOUT ANY WARRANTY; without even the implied warranty of
3099+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3100+# GNU Lesser General Public License for more details.
3101+#
3102+# You should have received a copy of the GNU Lesser General Public License
3103+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
3104+
3105+import glob
3106+import json
3107+import os
3108+import re
3109+import time
3110+from base64 import b64decode
3111+from subprocess import check_call
3112+
3113+import six
3114+import yaml
3115+
3116+from charmhelpers.fetch import (
3117+ apt_install,
3118+ filter_installed_packages,
3119+)
3120+from charmhelpers.core.hookenv import (
3121+ config,
3122+ is_relation_made,
3123+ local_unit,
3124+ log,
3125+ relation_get,
3126+ relation_ids,
3127+ related_units,
3128+ relation_set,
3129+ unit_get,
3130+ unit_private_ip,
3131+ charm_name,
3132+ DEBUG,
3133+ INFO,
3134+ WARNING,
3135+ ERROR,
3136+)
3137+
3138+from charmhelpers.core.sysctl import create as sysctl_create
3139+from charmhelpers.core.strutils import bool_from_string
3140+
3141+from charmhelpers.core.host import (
3142+ get_bond_master,
3143+ is_phy_iface,
3144+ list_nics,
3145+ get_nic_hwaddr,
3146+ mkdir,
3147+ write_file,
3148+ pwgen,
3149+)
3150+from charmhelpers.contrib.hahelpers.cluster import (
3151+ determine_apache_port,
3152+ determine_api_port,
3153+ https,
3154+ is_clustered,
3155+)
3156+from charmhelpers.contrib.hahelpers.apache import (
3157+ get_cert,
3158+ get_ca_cert,
3159+ install_ca_cert,
3160+)
3161+from charmhelpers.contrib.openstack.neutron import (
3162+ neutron_plugin_attribute,
3163+ parse_data_port_mappings,
3164+)
3165+from charmhelpers.contrib.openstack.ip import (
3166+ resolve_address,
3167+ INTERNAL,
3168+)
3169+from charmhelpers.contrib.network.ip import (
3170+ get_address_in_network,
3171+ get_ipv4_addr,
3172+ get_ipv6_addr,
3173+ get_netmask_for_address,
3174+ format_ipv6_addr,
3175+ is_address_in_network,
3176+ is_bridge_member,
3177+)
3178+from charmhelpers.contrib.openstack.utils import get_host_ip
3179+from charmhelpers.core.unitdata import kv
3180+
3181+try:
3182+ import psutil
3183+except ImportError:
3184+ apt_install('python-psutil', fatal=True)
3185+ import psutil
3186+
3187+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
3188+ADDRESS_TYPES = ['admin', 'internal', 'public']
3189+
3190+
3191+class OSContextError(Exception):
3192+ pass
3193+
3194+
3195+def ensure_packages(packages):
3196+ """Install but do not upgrade required plugin packages."""
3197+ required = filter_installed_packages(packages)
3198+ if required:
3199+ apt_install(required, fatal=True)
3200+
3201+
3202+def context_complete(ctxt):
3203+ _missing = []
3204+ for k, v in six.iteritems(ctxt):
3205+ if v is None or v == '':
3206+ _missing.append(k)
3207+
3208+ if _missing:
3209+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
3210+ return False
3211+
3212+ return True
3213+
3214+
3215+def config_flags_parser(config_flags):
3216+ """Parses config flags string into dict.
3217+
3218+ This parsing method supports a few different formats for the config
3219+ flag values to be parsed:
3220+
3221+ 1. A string in the simple format of key=value pairs, with the possibility
3222+ of specifying multiple key value pairs within the same string. For
3223+ example, a string in the format of 'key1=value1, key2=value2' will
3224+ return a dict of:
3225+
3226+ {'key1': 'value1',
3227+ 'key2': 'value2'}.
3228+
3229+ 2. A string in the above format, but supporting a comma-delimited list
3230+ of values for the same key. For example, a string in the format of
3231+ 'key1=value1, key2=value3,value4,value5' will return a dict of:
3232+
3233+ {'key1', 'value1',
3234+ 'key2', 'value2,value3,value4'}
3235+
3236+ 3. A string containing a colon character (:) prior to an equal
3237+ character (=) will be treated as yaml and parsed as such. This can be
3238+ used to specify more complex key value pairs. For example,
3239+ a string in the format of 'key1: subkey1=value1, subkey2=value2' will
3240+ return a dict of:
3241+
3242+ {'key1', 'subkey1=value1, subkey2=value2'}
3243+
3244+ The provided config_flags string may be a list of comma-separated values
3245+ which themselves may be comma-separated list of values.
3246+ """
3247+ # If we find a colon before an equals sign then treat it as yaml.
3248+ # Note: limit it to finding the colon first since this indicates assignment
3249+ # for inline yaml.
3250+ colon = config_flags.find(':')
3251+ equals = config_flags.find('=')
3252+ if colon > 0:
3253+ if colon < equals or equals < 0:
3254+ return yaml.safe_load(config_flags)
3255+
3256+ if config_flags.find('==') >= 0:
3257+ log("config_flags is not in expected format (key=value)", level=ERROR)
3258+ raise OSContextError
3259+
3260+ # strip the following from each value.
3261+ post_strippers = ' ,'
3262+ # we strip any leading/trailing '=' or ' ' from the string then
3263+ # split on '='.
3264+ split = config_flags.strip(' =').split('=')
3265+ limit = len(split)
3266+ flags = {}
3267+ for i in range(0, limit - 1):
3268+ current = split[i]
3269+ next = split[i + 1]
3270+ vindex = next.rfind(',')
3271+ if (i == limit - 2) or (vindex < 0):
3272+ value = next
3273+ else:
3274+ value = next[:vindex]
3275+
3276+ if i == 0:
3277+ key = current
3278+ else:
3279+ # if this not the first entry, expect an embedded key.
3280+ index = current.rfind(',')
3281+ if index < 0:
3282+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
3283+ raise OSContextError
3284+ key = current[index + 1:]
3285+
3286+ # Add to collection.
3287+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
3288+
3289+ return flags
3290+
3291+
3292+class OSContextGenerator(object):
3293+ """Base class for all context generators."""
3294+ interfaces = []
3295+ related = False
3296+ complete = False
3297+ missing_data = []
3298+
3299+ def __call__(self):
3300+ raise NotImplementedError
3301+
3302+ def context_complete(self, ctxt):
3303+ """Check for missing data for the required context data.
3304+ Set self.missing_data if it exists and return False.
3305+ Set self.complete if no missing data and return True.
3306+ """
3307+ # Fresh start
3308+ self.complete = False
3309+ self.missing_data = []
3310+ for k, v in six.iteritems(ctxt):
3311+ if v is None or v == '':
3312+ if k not in self.missing_data:
3313+ self.missing_data.append(k)
3314+
3315+ if self.missing_data:
3316+ self.complete = False
3317+ log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
3318+ else:
3319+ self.complete = True
3320+ return self.complete
3321+
3322+ def get_related(self):
3323+ """Check if any of the context interfaces have relation ids.
3324+ Set self.related and return True if one of the interfaces
3325+ has relation ids.
3326+ """
3327+ # Fresh start
3328+ self.related = False
3329+ try:
3330+ for interface in self.interfaces:
3331+ if relation_ids(interface):
3332+ self.related = True
3333+ return self.related
3334+ except AttributeError as e:
3335+ log("{} {}"
3336+ "".format(self, e), 'INFO')
3337+ return self.related
3338+
3339+
3340+class SharedDBContext(OSContextGenerator):
3341+ interfaces = ['shared-db']
3342+
3343+ def __init__(self,
3344+ database=None, user=None, relation_prefix=None, ssl_dir=None):
3345+ """Allows inspecting relation for settings prefixed with
3346+ relation_prefix. This is useful for parsing access for multiple
3347+ databases returned via the shared-db interface (eg, nova_password,
3348+ quantum_password)
3349+ """
3350+ self.relation_prefix = relation_prefix
3351+ self.database = database
3352+ self.user = user
3353+ self.ssl_dir = ssl_dir
3354+ self.rel_name = self.interfaces[0]
3355+
3356+ def __call__(self):
3357+ self.database = self.database or config('database')
3358+ self.user = self.user or config('database-user')
3359+ if None in [self.database, self.user]:
3360+ log("Could not generate shared_db context. Missing required charm "
3361+ "config options. (database name and user)", level=ERROR)
3362+ raise OSContextError
3363+
3364+ ctxt = {}
3365+
3366+ # NOTE(jamespage) if mysql charm provides a network upon which
3367+ # access to the database should be made, reconfigure relation
3368+ # with the service units local address and defer execution
3369+ access_network = relation_get('access-network')
3370+ if access_network is not None:
3371+ if self.relation_prefix is not None:
3372+ hostname_key = "{}_hostname".format(self.relation_prefix)
3373+ else:
3374+ hostname_key = "hostname"
3375+ access_hostname = get_address_in_network(access_network,
3376+ unit_get('private-address'))
3377+ set_hostname = relation_get(attribute=hostname_key,
3378+ unit=local_unit())
3379+ if set_hostname != access_hostname:
3380+ relation_set(relation_settings={hostname_key: access_hostname})
3381+ return None # Defer any further hook execution for now....
3382+
3383+ password_setting = 'password'
3384+ if self.relation_prefix:
3385+ password_setting = self.relation_prefix + '_password'
3386+
3387+ for rid in relation_ids(self.interfaces[0]):
3388+ self.related = True
3389+ for unit in related_units(rid):
3390+ rdata = relation_get(rid=rid, unit=unit)
3391+ host = rdata.get('db_host')
3392+ host = format_ipv6_addr(host) or host
3393+ ctxt = {
3394+ 'database_host': host,
3395+ 'database': self.database,
3396+ 'database_user': self.user,
3397+ 'database_password': rdata.get(password_setting),
3398+ 'database_type': 'mysql'
3399+ }
3400+ if self.context_complete(ctxt):
3401+ db_ssl(rdata, ctxt, self.ssl_dir)
3402+ return ctxt
3403+ return {}
3404+
3405+
3406+class PostgresqlDBContext(OSContextGenerator):
3407+ interfaces = ['pgsql-db']
3408+
3409+ def __init__(self, database=None):
3410+ self.database = database
3411+
3412+ def __call__(self):
3413+ self.database = self.database or config('database')
3414+ if self.database is None:
3415+ log('Could not generate postgresql_db context. Missing required '
3416+ 'charm config options. (database name)', level=ERROR)
3417+ raise OSContextError
3418+
3419+ ctxt = {}
3420+ for rid in relation_ids(self.interfaces[0]):
3421+ self.related = True
3422+ for unit in related_units(rid):
3423+ rel_host = relation_get('host', rid=rid, unit=unit)
3424+ rel_user = relation_get('user', rid=rid, unit=unit)
3425+ rel_passwd = relation_get('password', rid=rid, unit=unit)
3426+ ctxt = {'database_host': rel_host,
3427+ 'database': self.database,
3428+ 'database_user': rel_user,
3429+ 'database_password': rel_passwd,
3430+ 'database_type': 'postgresql'}
3431+ if self.context_complete(ctxt):
3432+ return ctxt
3433+
3434+ return {}
3435+
3436+
3437+def db_ssl(rdata, ctxt, ssl_dir):
3438+ if 'ssl_ca' in rdata and ssl_dir:
3439+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
3440+ with open(ca_path, 'w') as fh:
3441+ fh.write(b64decode(rdata['ssl_ca']))
3442+
3443+ ctxt['database_ssl_ca'] = ca_path
3444+ elif 'ssl_ca' in rdata:
3445+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
3446+ return ctxt
3447+
3448+ if 'ssl_cert' in rdata:
3449+ cert_path = os.path.join(
3450+ ssl_dir, 'db-client.cert')
3451+ if not os.path.exists(cert_path):
3452+ log("Waiting 1m for ssl client cert validity", level=INFO)
3453+ time.sleep(60)
3454+
3455+ with open(cert_path, 'w') as fh:
3456+ fh.write(b64decode(rdata['ssl_cert']))
3457+
3458+ ctxt['database_ssl_cert'] = cert_path
3459+ key_path = os.path.join(ssl_dir, 'db-client.key')
3460+ with open(key_path, 'w') as fh:
3461+ fh.write(b64decode(rdata['ssl_key']))
3462+
3463+ ctxt['database_ssl_key'] = key_path
3464+
3465+ return ctxt
3466+
3467+
3468+class IdentityServiceContext(OSContextGenerator):
3469+
3470+ def __init__(self, service=None, service_user=None, rel_name='identity-service'):
3471+ self.service = service
3472+ self.service_user = service_user
3473+ self.rel_name = rel_name
3474+ self.interfaces = [self.rel_name]
3475+
3476+ def __call__(self):
3477+ log('Generating template context for ' + self.rel_name, level=DEBUG)
3478+ ctxt = {}
3479+
3480+ if self.service and self.service_user:
3481+ # This is required for pki token signing if we don't want /tmp to
3482+ # be used.
3483+ cachedir = '/var/cache/%s' % (self.service)
3484+ if not os.path.isdir(cachedir):
3485+ log("Creating service cache dir %s" % (cachedir), level=DEBUG)
3486+ mkdir(path=cachedir, owner=self.service_user,
3487+ group=self.service_user, perms=0o700)
3488+
3489+ ctxt['signing_dir'] = cachedir
3490+
3491+ for rid in relation_ids(self.rel_name):
3492+ self.related = True
3493+ for unit in related_units(rid):
3494+ rdata = relation_get(rid=rid, unit=unit)
3495+ serv_host = rdata.get('service_host')
3496+ serv_host = format_ipv6_addr(serv_host) or serv_host
3497+ auth_host = rdata.get('auth_host')
3498+ auth_host = format_ipv6_addr(auth_host) or auth_host
3499+ svc_protocol = rdata.get('service_protocol') or 'http'
3500+ auth_protocol = rdata.get('auth_protocol') or 'http'
3501+ ctxt.update({'service_port': rdata.get('service_port'),
3502+ 'service_host': serv_host,
3503+ 'auth_host': auth_host,
3504+ 'auth_port': rdata.get('auth_port'),
3505+ 'admin_tenant_name': rdata.get('service_tenant'),
3506+ 'admin_user': rdata.get('service_username'),
3507+ 'admin_password': rdata.get('service_password'),
3508+ 'service_protocol': svc_protocol,
3509+ 'auth_protocol': auth_protocol})
3510+
3511+ if self.context_complete(ctxt):
3512+ # NOTE(jamespage) this is required for >= icehouse
3513+ # so a missing value just indicates keystone needs
3514+ # upgrading
3515+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
3516+ return ctxt
3517+
3518+ return {}
3519+
3520+
3521+class AMQPContext(OSContextGenerator):
3522+
3523+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
3524+ self.ssl_dir = ssl_dir
3525+ self.rel_name = rel_name
3526+ self.relation_prefix = relation_prefix
3527+ self.interfaces = [rel_name]
3528+
3529+ def __call__(self):
3530+ log('Generating template context for amqp', level=DEBUG)
3531+ conf = config()
3532+ if self.relation_prefix:
3533+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
3534+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
3535+ else:
3536+ user_setting = 'rabbit-user'
3537+ vhost_setting = 'rabbit-vhost'
3538+
3539+ try:
3540+ username = conf[user_setting]
3541+ vhost = conf[vhost_setting]
3542+ except KeyError as e:
3543+ log('Could not generate shared_db context. Missing required charm '
3544+ 'config options: %s.' % e, level=ERROR)
3545+ raise OSContextError
3546+
3547+ ctxt = {}
3548+ for rid in relation_ids(self.rel_name):
3549+ ha_vip_only = False
3550+ self.related = True
3551+ for unit in related_units(rid):
3552+ if relation_get('clustered', rid=rid, unit=unit):
3553+ ctxt['clustered'] = True
3554+ vip = relation_get('vip', rid=rid, unit=unit)
3555+ vip = format_ipv6_addr(vip) or vip
3556+ ctxt['rabbitmq_host'] = vip
3557+ else:
3558+ host = relation_get('private-address', rid=rid, unit=unit)
3559+ host = format_ipv6_addr(host) or host
3560+ ctxt['rabbitmq_host'] = host
3561+
3562+ ctxt.update({
3563+ 'rabbitmq_user': username,
3564+ 'rabbitmq_password': relation_get('password', rid=rid,
3565+ unit=unit),
3566+ 'rabbitmq_virtual_host': vhost,
3567+ })
3568+
3569+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
3570+ if ssl_port:
3571+ ctxt['rabbit_ssl_port'] = ssl_port
3572+
3573+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
3574+ if ssl_ca:
3575+ ctxt['rabbit_ssl_ca'] = ssl_ca
3576+
3577+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
3578+ ctxt['rabbitmq_ha_queues'] = True
3579+
3580+ ha_vip_only = relation_get('ha-vip-only',
3581+ rid=rid, unit=unit) is not None
3582+
3583+ if self.context_complete(ctxt):
3584+ if 'rabbit_ssl_ca' in ctxt:
3585+ if not self.ssl_dir:
3586+ log("Charm not setup for ssl support but ssl ca "
3587+ "found", level=INFO)
3588+ break
3589+
3590+ ca_path = os.path.join(
3591+ self.ssl_dir, 'rabbit-client-ca.pem')
3592+ with open(ca_path, 'w') as fh:
3593+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
3594+ ctxt['rabbit_ssl_ca'] = ca_path
3595+
3596+ # Sufficient information found = break out!
3597+ break
3598+
3599+ # Used for active/active rabbitmq >= grizzly
3600+ if (('clustered' not in ctxt or ha_vip_only) and
3601+ len(related_units(rid)) > 1):
3602+ rabbitmq_hosts = []
3603+ for unit in related_units(rid):
3604+ host = relation_get('private-address', rid=rid, unit=unit)
3605+ host = format_ipv6_addr(host) or host
3606+ rabbitmq_hosts.append(host)
3607+
3608+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
3609+
3610+ oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
3611+ if oslo_messaging_flags:
3612+ ctxt['oslo_messaging_flags'] = config_flags_parser(
3613+ oslo_messaging_flags)
3614+
3615+ if not self.complete:
3616+ return {}
3617+
3618+ return ctxt
3619+
3620+
3621+class CephContext(OSContextGenerator):
3622+ """Generates context for /etc/ceph/ceph.conf templates."""
3623+ interfaces = ['ceph']
3624+
3625+ def __call__(self):
3626+ if not relation_ids('ceph'):
3627+ return {}
3628+
3629+ log('Generating template context for ceph', level=DEBUG)
3630+ mon_hosts = []
3631+ ctxt = {
3632+ 'use_syslog': str(config('use-syslog')).lower()
3633+ }
3634+ for rid in relation_ids('ceph'):
3635+ for unit in related_units(rid):
3636+ if not ctxt.get('auth'):
3637+ ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
3638+ if not ctxt.get('key'):
3639+ ctxt['key'] = relation_get('key', rid=rid, unit=unit)
3640+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
3641+ unit=unit)
3642+ unit_priv_addr = relation_get('private-address', rid=rid,
3643+ unit=unit)
3644+ ceph_addr = ceph_pub_addr or unit_priv_addr
3645+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
3646+ mon_hosts.append(ceph_addr)
3647+
3648+ ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
3649+
3650+ if not os.path.isdir('/etc/ceph'):
3651+ os.mkdir('/etc/ceph')
3652+
3653+ if not self.context_complete(ctxt):
3654+ return {}
3655+
3656+ ensure_packages(['ceph-common'])
3657+ return ctxt
3658+
3659+
3660+class HAProxyContext(OSContextGenerator):
3661+ """Provides half a context for the haproxy template, which describes
3662+ all peers to be included in the cluster. Each charm needs to include
3663+ its own context generator that describes the port mapping.
3664+ """
3665+ interfaces = ['cluster']
3666+
3667+ def __init__(self, singlenode_mode=False):
3668+ self.singlenode_mode = singlenode_mode
3669+
3670+ def __call__(self):
3671+ if not relation_ids('cluster') and not self.singlenode_mode:
3672+ return {}
3673+
3674+ if config('prefer-ipv6'):
3675+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
3676+ else:
3677+ addr = get_host_ip(unit_get('private-address'))
3678+
3679+ l_unit = local_unit().replace('/', '-')
3680+ cluster_hosts = {}
3681+
3682+ # NOTE(jamespage): build out map of configured network endpoints
3683+ # and associated backends
3684+ for addr_type in ADDRESS_TYPES:
3685+ cfg_opt = 'os-{}-network'.format(addr_type)
3686+ laddr = get_address_in_network(config(cfg_opt))
3687+ if laddr:
3688+ netmask = get_netmask_for_address(laddr)
3689+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
3690+ netmask),
3691+ 'backends': {l_unit: laddr}}
3692+ for rid in relation_ids('cluster'):
3693+ for unit in related_units(rid):
3694+ _laddr = relation_get('{}-address'.format(addr_type),
3695+ rid=rid, unit=unit)
3696+ if _laddr:
3697+ _unit = unit.replace('/', '-')
3698+ cluster_hosts[laddr]['backends'][_unit] = _laddr
3699+
3700+ # NOTE(jamespage) add backend based on private address - this
3701+ # with either be the only backend or the fallback if no acls
3702+ # match in the frontend
3703+ cluster_hosts[addr] = {}
3704+ netmask = get_netmask_for_address(addr)
3705+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
3706+ 'backends': {l_unit: addr}}
3707+ for rid in relation_ids('cluster'):
3708+ for unit in related_units(rid):
3709+ _laddr = relation_get('private-address',
3710+ rid=rid, unit=unit)
3711+ if _laddr:
3712+ _unit = unit.replace('/', '-')
3713+ cluster_hosts[addr]['backends'][_unit] = _laddr
3714+
3715+ ctxt = {
3716+ 'frontends': cluster_hosts,
3717+ 'default_backend': addr
3718+ }
3719+
3720+ if config('haproxy-server-timeout'):
3721+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
3722+
3723+ if config('haproxy-client-timeout'):
3724+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
3725+
3726+ if config('haproxy-queue-timeout'):
3727+ ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
3728+
3729+ if config('haproxy-connect-timeout'):
3730+ ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
3731+
3732+ if config('prefer-ipv6'):
3733+ ctxt['ipv6'] = True
3734+ ctxt['local_host'] = 'ip6-localhost'
3735+ ctxt['haproxy_host'] = '::'
3736+ else:
3737+ ctxt['local_host'] = '127.0.0.1'
3738+ ctxt['haproxy_host'] = '0.0.0.0'
3739+
3740+ ctxt['stat_port'] = '8888'
3741+
3742+ db = kv()
3743+ ctxt['stat_password'] = db.get('stat-password')
3744+ if not ctxt['stat_password']:
3745+ ctxt['stat_password'] = db.set('stat-password',
3746+ pwgen(32))
3747+ db.flush()
3748+
3749+ for frontend in cluster_hosts:
3750+ if (len(cluster_hosts[frontend]['backends']) > 1 or
3751+ self.singlenode_mode):
3752+ # Enable haproxy when we have enough peers.
3753+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
3754+ level=DEBUG)
3755+ with open('/etc/default/haproxy', 'w') as out:
3756+ out.write('ENABLED=1\n')
3757+
3758+ return ctxt
3759+
3760+ log('HAProxy context is incomplete, this unit has no peers.',
3761+ level=INFO)
3762+ return {}
3763+
3764+
3765+class ImageServiceContext(OSContextGenerator):
3766+ interfaces = ['image-service']
3767+
3768+ def __call__(self):
3769+ """Obtains the glance API server from the image-service relation.
3770+ Useful in nova and cinder (currently).
3771+ """
3772+ log('Generating template context for image-service.', level=DEBUG)
3773+ rids = relation_ids('image-service')
3774+ if not rids:
3775+ return {}
3776+
3777+ for rid in rids:
3778+ for unit in related_units(rid):
3779+ api_server = relation_get('glance-api-server',
3780+ rid=rid, unit=unit)
3781+ if api_server:
3782+ return {'glance_api_servers': api_server}
3783+
3784+ log("ImageService context is incomplete. Missing required relation "
3785+ "data.", level=INFO)
3786+ return {}
3787+
3788+
3789+class ApacheSSLContext(OSContextGenerator):
3790+ """Generates a context for an apache vhost configuration that configures
3791+ HTTPS reverse proxying for one or many endpoints. Generated context
3792+ looks something like::
3793+
3794+ {
3795+ 'namespace': 'cinder',
3796+ 'private_address': 'iscsi.mycinderhost.com',
3797+ 'endpoints': [(8776, 8766), (8777, 8767)]
3798+ }
3799+
3800+ The endpoints list consists of a tuples mapping external ports
3801+ to internal ports.
3802+ """
3803+ interfaces = ['https']
3804+
3805+ # charms should inherit this context and set external ports
3806+ # and service namespace accordingly.
3807+ external_ports = []
3808+ service_namespace = None
3809+
3810+ def enable_modules(self):
3811+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
3812+ check_call(cmd)
3813+
3814+ def configure_cert(self, cn=None):
3815+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
3816+ mkdir(path=ssl_dir)
3817+ cert, key = get_cert(cn)
3818+ if cn:
3819+ cert_filename = 'cert_{}'.format(cn)
3820+ key_filename = 'key_{}'.format(cn)
3821+ else:
3822+ cert_filename = 'cert'
3823+ key_filename = 'key'
3824+
3825+ write_file(path=os.path.join(ssl_dir, cert_filename),
3826+ content=b64decode(cert))
3827+ write_file(path=os.path.join(ssl_dir, key_filename),
3828+ content=b64decode(key))
3829+
3830+ def configure_ca(self):
3831+ ca_cert = get_ca_cert()
3832+ if ca_cert:
3833+ install_ca_cert(b64decode(ca_cert))
3834+
3835+ def canonical_names(self):
3836+ """Figure out which canonical names clients will access this service.
3837+ """
3838+ cns = []
3839+ for r_id in relation_ids('identity-service'):
3840+ for unit in related_units(r_id):
3841+ rdata = relation_get(rid=r_id, unit=unit)
3842+ for k in rdata:
3843+ if k.startswith('ssl_key_'):
3844+ cns.append(k.lstrip('ssl_key_'))
3845+
3846+ return sorted(list(set(cns)))
3847+
3848+ def get_network_addresses(self):
3849+ """For each network configured, return corresponding address and vip
3850+ (if available).
3851+
3852+ Returns a list of tuples of the form:
3853+
3854+ [(address_in_net_a, vip_in_net_a),
3855+ (address_in_net_b, vip_in_net_b),
3856+ ...]
3857+
3858+ or, if no vip(s) available:
3859+
3860+ [(address_in_net_a, address_in_net_a),
3861+ (address_in_net_b, address_in_net_b),
3862+ ...]
3863+ """
3864+ addresses = []
3865+ if config('vip'):
3866+ vips = config('vip').split()
3867+ else:
3868+ vips = []
3869+
3870+ for net_type in ['os-internal-network', 'os-admin-network',
3871+ 'os-public-network']:
3872+ addr = get_address_in_network(config(net_type),
3873+ unit_get('private-address'))
3874+ if len(vips) > 1 and is_clustered():
3875+ if not config(net_type):
3876+ log("Multiple networks configured but net_type "
3877+ "is None (%s)." % net_type, level=WARNING)
3878+ continue
3879+
3880+ for vip in vips:
3881+ if is_address_in_network(config(net_type), vip):
3882+ addresses.append((addr, vip))
3883+ break
3884+
3885+ elif is_clustered() and config('vip'):
3886+ addresses.append((addr, config('vip')))
3887+ else:
3888+ addresses.append((addr, addr))
3889+
3890+ return sorted(addresses)
3891+
3892+ def __call__(self):
3893+ if isinstance(self.external_ports, six.string_types):
3894+ self.external_ports = [self.external_ports]
3895+
3896+ if not self.external_ports or not https():
3897+ return {}
3898+
3899+ self.configure_ca()
3900+ self.enable_modules()
3901+
3902+ ctxt = {'namespace': self.service_namespace,
3903+ 'endpoints': [],
3904+ 'ext_ports': []}
3905+
3906+ cns = self.canonical_names()
3907+ if cns:
3908+ for cn in cns:
3909+ self.configure_cert(cn)
3910+ else:
3911+ # Expect cert/key provided in config (currently assumed that ca
3912+ # uses ip for cn)
3913+ cn = resolve_address(endpoint_type=INTERNAL)
3914+ self.configure_cert(cn)
3915+
3916+ addresses = self.get_network_addresses()
3917+ for address, endpoint in sorted(set(addresses)):
3918+ for api_port in self.external_ports:
3919+ ext_port = determine_apache_port(api_port,
3920+ singlenode_mode=True)
3921+ int_port = determine_api_port(api_port, singlenode_mode=True)
3922+ portmap = (address, endpoint, int(ext_port), int(int_port))
3923+ ctxt['endpoints'].append(portmap)
3924+ ctxt['ext_ports'].append(int(ext_port))
3925+
3926+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
3927+ return ctxt
3928+
3929+
3930+class NeutronContext(OSContextGenerator):
3931+ interfaces = []
3932+
3933+ @property
3934+ def plugin(self):
3935+ return None
3936+
3937+ @property
3938+ def network_manager(self):
3939+ return None
3940+
3941+ @property
3942+ def packages(self):
3943+ return neutron_plugin_attribute(self.plugin, 'packages',
3944+ self.network_manager)
3945+
3946+ @property
3947+ def neutron_security_groups(self):
3948+ return None
3949+
3950+ def _ensure_packages(self):
3951+ for pkgs in self.packages:
3952+ ensure_packages(pkgs)
3953+
3954+ def _save_flag_file(self):
3955+ if self.network_manager == 'quantum':
3956+ _file = '/etc/nova/quantum_plugin.conf'
3957+ else:
3958+ _file = '/etc/nova/neutron_plugin.conf'
3959+
3960+ with open(_file, 'wb') as out:
3961+ out.write(self.plugin + '\n')
3962+
3963+ def ovs_ctxt(self):
3964+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3965+ self.network_manager)
3966+ config = neutron_plugin_attribute(self.plugin, 'config',
3967+ self.network_manager)
3968+ ovs_ctxt = {'core_plugin': driver,
3969+ 'neutron_plugin': 'ovs',
3970+ 'neutron_security_groups': self.neutron_security_groups,
3971+ 'local_ip': unit_private_ip(),
3972+ 'config': config}
3973+
3974+ return ovs_ctxt
3975+
3976+ def nuage_ctxt(self):
3977+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3978+ self.network_manager)
3979+ config = neutron_plugin_attribute(self.plugin, 'config',
3980+ self.network_manager)
3981+ nuage_ctxt = {'core_plugin': driver,
3982+ 'neutron_plugin': 'vsp',
3983+ 'neutron_security_groups': self.neutron_security_groups,
3984+ 'local_ip': unit_private_ip(),
3985+ 'config': config}
3986+
3987+ return nuage_ctxt
3988+
3989+ def nvp_ctxt(self):
3990+ driver = neutron_plugin_attribute(self.plugin, 'driver',
3991+ self.network_manager)
3992+ config = neutron_plugin_attribute(self.plugin, 'config',
3993+ self.network_manager)
3994+ nvp_ctxt = {'core_plugin': driver,
3995+ 'neutron_plugin': 'nvp',
3996+ 'neutron_security_groups': self.neutron_security_groups,
3997+ 'local_ip': unit_private_ip(),
3998+ 'config': config}
3999+
4000+ return nvp_ctxt
4001+
4002+ def n1kv_ctxt(self):
4003+ driver = neutron_plugin_attribute(self.plugin, 'driver',
4004+ self.network_manager)
4005+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
4006+ self.network_manager)
4007+ n1kv_user_config_flags = config('n1kv-config-flags')
4008+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
4009+ n1kv_ctxt = {'core_plugin': driver,
4010+ 'neutron_plugin': 'n1kv',
4011+ 'neutron_security_groups': self.neutron_security_groups,
4012+ 'local_ip': unit_private_ip(),
4013+ 'config': n1kv_config,
4014+ 'vsm_ip': config('n1kv-vsm-ip'),
4015+ 'vsm_username': config('n1kv-vsm-username'),
4016+ 'vsm_password': config('n1kv-vsm-password'),
4017+ 'restrict_policy_profiles': restrict_policy_profiles}
4018+
4019+ if n1kv_user_config_flags:
4020+ flags = config_flags_parser(n1kv_user_config_flags)
4021+ n1kv_ctxt['user_config_flags'] = flags
4022+
4023+ return n1kv_ctxt
4024+
4025+ def calico_ctxt(self):
4026+ driver = neutron_plugin_attribute(self.plugin, 'driver',
4027+ self.network_manager)
4028+ config = neutron_plugin_attribute(self.plugin, 'config',
4029+ self.network_manager)
4030+ calico_ctxt = {'core_plugin': driver,
4031+ 'neutron_plugin': 'Calico',
4032+ 'neutron_security_groups': self.neutron_security_groups,
4033+ 'local_ip': unit_private_ip(),
4034+ 'config': config}
4035+
4036+ return calico_ctxt
4037+
4038+ def neutron_ctxt(self):
4039+ if https():
4040+ proto = 'https'
4041+ else:
4042+ proto = 'http'
4043+
4044+ if is_clustered():
4045+ host = config('vip')
4046+ else:
4047+ host = unit_get('private-address')
4048+
4049+ ctxt = {'network_manager': self.network_manager,
4050+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
4051+ return ctxt
4052+
4053+ def pg_ctxt(self):
4054+ driver = neutron_plugin_attribute(self.plugin, 'driver',
4055+ self.network_manager)
4056+ config = neutron_plugin_attribute(self.plugin, 'config',
4057+ self.network_manager)
4058+ ovs_ctxt = {'core_plugin': driver,
4059+ 'neutron_plugin': 'plumgrid',
4060+ 'neutron_security_groups': self.neutron_security_groups,
4061+ 'local_ip': unit_private_ip(),
4062+ 'config': config}
4063+ return ovs_ctxt
4064+
4065+ def midonet_ctxt(self):
4066+ driver = neutron_plugin_attribute(self.plugin, 'driver',
4067+ self.network_manager)
4068+ midonet_config = neutron_plugin_attribute(self.plugin, 'config',
4069+ self.network_manager)
4070+ mido_ctxt = {'core_plugin': driver,
4071+ 'neutron_plugin': 'midonet',
4072+ 'neutron_security_groups': self.neutron_security_groups,
4073+ 'local_ip': unit_private_ip(),
4074+ 'config': midonet_config}
4075+
4076+ return mido_ctxt
4077+
4078+ def __call__(self):
4079+ if self.network_manager not in ['quantum', 'neutron']:
4080+ return {}
4081+
4082+ if not self.plugin:
4083+ return {}
4084+
4085+ ctxt = self.neutron_ctxt()
4086+
4087+ if self.plugin == 'ovs':
4088+ ctxt.update(self.ovs_ctxt())
4089+ elif self.plugin in ['nvp', 'nsx']:
4090+ ctxt.update(self.nvp_ctxt())
4091+ elif self.plugin == 'n1kv':
4092+ ctxt.update(self.n1kv_ctxt())
4093+ elif self.plugin == 'Calico':
4094+ ctxt.update(self.calico_ctxt())
4095+ elif self.plugin == 'vsp':
4096+ ctxt.update(self.nuage_ctxt())
4097+ elif self.plugin == 'plumgrid':
4098+ ctxt.update(self.pg_ctxt())
4099+ elif self.plugin == 'midonet':
4100+ ctxt.update(self.midonet_ctxt())
4101+
4102+ alchemy_flags = config('neutron-alchemy-flags')
4103+ if alchemy_flags:
4104+ flags = config_flags_parser(alchemy_flags)
4105+ ctxt['neutron_alchemy_flags'] = flags
4106+
4107+ self._save_flag_file()
4108+ return ctxt
4109+
4110+
4111+class NeutronPortContext(OSContextGenerator):
4112+
4113+ def resolve_ports(self, ports):
4114+ """Resolve NICs not yet bound to bridge(s)
4115+
4116+ If hwaddress provided then returns resolved hwaddress otherwise NIC.
4117+ """
4118+ if not ports:
4119+ return None
4120+
4121+ hwaddr_to_nic = {}
4122+ hwaddr_to_ip = {}
4123+ for nic in list_nics():
4124+ # Ignore virtual interfaces (bond masters will be identified from
4125+ # their slaves)
4126+ if not is_phy_iface(nic):
4127+ continue
4128+
4129+ _nic = get_bond_master(nic)
4130+ if _nic:
4131+ log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
4132+ level=DEBUG)
4133+ nic = _nic
4134+
4135+ hwaddr = get_nic_hwaddr(nic)
4136+ hwaddr_to_nic[hwaddr] = nic
4137+ addresses = get_ipv4_addr(nic, fatal=False)
4138+ addresses += get_ipv6_addr(iface=nic, fatal=False)
4139+ hwaddr_to_ip[hwaddr] = addresses
4140+
4141+ resolved = []
4142+ mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
4143+ for entry in ports:
4144+ if re.match(mac_regex, entry):
4145+ # NIC is in known NICs and does NOT hace an IP address
4146+ if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
4147+ # If the nic is part of a bridge then don't use it
4148+ if is_bridge_member(hwaddr_to_nic[entry]):
4149+ continue
4150+
4151+ # Entry is a MAC address for a valid interface that doesn't
4152+ # have an IP address assigned yet.
4153+ resolved.append(hwaddr_to_nic[entry])
4154+ else:
4155+ # If the passed entry is not a MAC address, assume it's a valid
4156+ # interface, and that the user put it there on purpose (we can
4157+ # trust it to be the real external network).
4158+ resolved.append(entry)
4159+
4160+ # Ensure no duplicates
4161+ return list(set(resolved))
4162+
4163+
4164+class OSConfigFlagContext(OSContextGenerator):
4165+ """Provides support for user-defined config flags.
4166+
4167+ Users can define a comma-seperated list of key=value pairs
4168+ in the charm configuration and apply them at any point in
4169+ any file by using a template flag.
4170+
4171+ Sometimes users might want config flags inserted within a
4172+ specific section so this class allows users to specify the
4173+ template flag name, allowing for multiple template flags
4174+ (sections) within the same context.
4175+
4176+ NOTE: the value of config-flags may be a comma-separated list of
4177+ key=value pairs and some Openstack config files support
4178+ comma-separated lists as values.
4179+ """
4180+
4181+ def __init__(self, charm_flag='config-flags',
4182+ template_flag='user_config_flags'):
4183+ """
4184+ :param charm_flag: config flags in charm configuration.
4185+ :param template_flag: insert point for user-defined flags in template
4186+ file.
4187+ """
4188+ super(OSConfigFlagContext, self).__init__()
4189+ self._charm_flag = charm_flag
4190+ self._template_flag = template_flag
4191+
4192+ def __call__(self):
4193+ config_flags = config(self._charm_flag)
4194+ if not config_flags:
4195+ return {}
4196+
4197+ return {self._template_flag:
4198+ config_flags_parser(config_flags)}
4199+
4200+
4201+class LibvirtConfigFlagsContext(OSContextGenerator):
4202+ """
4203+ This context provides support for extending
4204+ the libvirt section through user-defined flags.
4205+ """
4206+ def __call__(self):
4207+ ctxt = {}
4208+ libvirt_flags = config('libvirt-flags')
4209+ if libvirt_flags:
4210+ ctxt['libvirt_flags'] = config_flags_parser(
4211+ libvirt_flags)
4212+ return ctxt
4213+
4214+
4215+class SubordinateConfigContext(OSContextGenerator):
4216+
4217+ """
4218+ Responsible for inspecting relations to subordinates that
4219+ may be exporting required config via a json blob.
4220+
4221+ The subordinate interface allows subordinates to export their
4222+ configuration requirements to the principle for multiple config
4223+ files and multiple serivces. Ie, a subordinate that has interfaces
4224+ to both glance and nova may export to following yaml blob as json::
4225+
4226+ glance:
4227+ /etc/glance/glance-api.conf:
4228+ sections:
4229+ DEFAULT:
4230+ - [key1, value1]
4231+ /etc/glance/glance-registry.conf:
4232+ MYSECTION:
4233+ - [key2, value2]
4234+ nova:
4235+ /etc/nova/nova.conf:
4236+ sections:
4237+ DEFAULT:
4238+ - [key3, value3]
4239+
4240+
4241+ It is then up to the principle charms to subscribe this context to
4242+ the service+config file it is interestd in. Configuration data will
4243+ be available in the template context, in glance's case, as::
4244+
4245+ ctxt = {
4246+ ... other context ...
4247+ 'subordinate_configuration': {
4248+ 'DEFAULT': {
4249+ 'key1': 'value1',
4250+ },
4251+ 'MYSECTION': {
4252+ 'key2': 'value2',
4253+ },
4254+ }
4255+ }
4256+ """
4257+
4258+ def __init__(self, service, config_file, interface):
4259+ """
4260+ :param service : Service name key to query in any subordinate
4261+ data found
4262+ :param config_file : Service's config file to query sections
4263+ :param interface : Subordinate interface to inspect
4264+ """
4265+ self.config_file = config_file
4266+ if isinstance(service, list):
4267+ self.services = service
4268+ else:
4269+ self.services = [service]
4270+ if isinstance(interface, list):
4271+ self.interfaces = interface
4272+ else:
4273+ self.interfaces = [interface]
4274+
4275+ def __call__(self):
4276+ ctxt = {'sections': {}}
4277+ rids = []
4278+ for interface in self.interfaces:
4279+ rids.extend(relation_ids(interface))
4280+ for rid in rids:
4281+ for unit in related_units(rid):
4282+ sub_config = relation_get('subordinate_configuration',
4283+ rid=rid, unit=unit)
4284+ if sub_config and sub_config != '':
4285+ try:
4286+ sub_config = json.loads(sub_config)
4287+ except:
4288+ log('Could not parse JSON from '
4289+ 'subordinate_configuration setting from %s'
4290+ % rid, level=ERROR)
4291+ continue
4292+
4293+ for service in self.services:
4294+ if service not in sub_config:
4295+ log('Found subordinate_configuration on %s but it '
4296+ 'contained nothing for %s service'
4297+ % (rid, service), level=INFO)
4298+ continue
4299+
4300+ sub_config = sub_config[service]
4301+ if self.config_file not in sub_config:
4302+ log('Found subordinate_configuration on %s but it '
4303+ 'contained nothing for %s'
4304+ % (rid, self.config_file), level=INFO)
4305+ continue
4306+
4307+ sub_config = sub_config[self.config_file]
4308+ for k, v in six.iteritems(sub_config):
4309+ if k == 'sections':
4310+ for section, config_list in six.iteritems(v):
4311+ log("adding section '%s'" % (section),
4312+ level=DEBUG)
4313+ if ctxt[k].get(section):
4314+ ctxt[k][section].extend(config_list)
4315+ else:
4316+ ctxt[k][section] = config_list
4317+ else:
4318+ ctxt[k] = v
4319+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
4320+ return ctxt
4321+
4322+
4323+class LogLevelContext(OSContextGenerator):
4324+
4325+ def __call__(self):
4326+ ctxt = {}
4327+ ctxt['debug'] = \
4328+ False if config('debug') is None else config('debug')
4329+ ctxt['verbose'] = \
4330+ False if config('verbose') is None else config('verbose')
4331+
4332+ return ctxt
4333+
4334+
4335+class SyslogContext(OSContextGenerator):
4336+
4337+ def __call__(self):
4338+ ctxt = {'use_syslog': config('use-syslog')}
4339+ return ctxt
4340+
4341+
4342+class BindHostContext(OSContextGenerator):
4343+
4344+ def __call__(self):
4345+ if config('prefer-ipv6'):
4346+ return {'bind_host': '::'}
4347+ else:
4348+ return {'bind_host': '0.0.0.0'}
4349+
4350+
4351+class WorkerConfigContext(OSContextGenerator):
4352+
4353+ @property
4354+ def num_cpus(self):
4355+ # NOTE: use cpu_count if present (16.04 support)
4356+ if hasattr(psutil, 'cpu_count'):
4357+ return psutil.cpu_count()
4358+ else:
4359+ return psutil.NUM_CPUS
4360+
4361+ def __call__(self):
4362+ multiplier = config('worker-multiplier') or 0
4363+ ctxt = {"workers": self.num_cpus * multiplier}
4364+ return ctxt
4365+
4366+
4367+class ZeroMQContext(OSContextGenerator):
4368+ interfaces = ['zeromq-configuration']
4369+
4370+ def __call__(self):
4371+ ctxt = {}
4372+ if is_relation_made('zeromq-configuration', 'host'):
4373+ for rid in relation_ids('zeromq-configuration'):
4374+ for unit in related_units(rid):
4375+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
4376+ ctxt['zmq_host'] = relation_get('host', unit, rid)
4377+ ctxt['zmq_redis_address'] = relation_get(
4378+ 'zmq_redis_address', unit, rid)
4379+
4380+ return ctxt
4381+
4382+
4383+class NotificationDriverContext(OSContextGenerator):
4384+
4385+ def __init__(self, zmq_relation='zeromq-configuration',
4386+ amqp_relation='amqp'):
4387+ """
4388+ :param zmq_relation: Name of Zeromq relation to check
4389+ """
4390+ self.zmq_relation = zmq_relation
4391+ self.amqp_relation = amqp_relation
4392+
4393+ def __call__(self):
4394+ ctxt = {'notifications': 'False'}
4395+ if is_relation_made(self.amqp_relation):
4396+ ctxt['notifications'] = "True"
4397+
4398+ return ctxt
4399+
4400+
4401+class SysctlContext(OSContextGenerator):
4402+ """This context check if the 'sysctl' option exists on configuration
4403+ then creates a file with the loaded contents"""
4404+ def __call__(self):
4405+ sysctl_dict = config('sysctl')
4406+ if sysctl_dict:
4407+ sysctl_create(sysctl_dict,
4408+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
4409+ return {'sysctl': sysctl_dict}
4410+
4411+
4412+class NeutronAPIContext(OSContextGenerator):
4413+ '''
4414+ Inspects current neutron-plugin-api relation for neutron settings. Return
4415+ defaults if it is not present.
4416+ '''
4417+ interfaces = ['neutron-plugin-api']
4418+
4419+ def __call__(self):
4420+ self.neutron_defaults = {
4421+ 'l2_population': {
4422+ 'rel_key': 'l2-population',
4423+ 'default': False,
4424+ },
4425+ 'overlay_network_type': {
4426+ 'rel_key': 'overlay-network-type',
4427+ 'default': 'gre',
4428+ },
4429+ 'neutron_security_groups': {
4430+ 'rel_key': 'neutron-security-groups',
4431+ 'default': False,
4432+ },
4433+ 'network_device_mtu': {
4434+ 'rel_key': 'network-device-mtu',
4435+ 'default': None,
4436+ },
4437+ 'enable_dvr': {
4438+ 'rel_key': 'enable-dvr',
4439+ 'default': False,
4440+ },
4441+ 'enable_l3ha': {
4442+ 'rel_key': 'enable-l3ha',
4443+ 'default': False,
4444+ },
4445+ }
4446+ ctxt = self.get_neutron_options({})
4447+ for rid in relation_ids('neutron-plugin-api'):
4448+ for unit in related_units(rid):
4449+ rdata = relation_get(rid=rid, unit=unit)
4450+ if 'l2-population' in rdata:
4451+ ctxt.update(self.get_neutron_options(rdata))
4452+
4453+ return ctxt
4454+
4455+ def get_neutron_options(self, rdata):
4456+ settings = {}
4457+ for nkey in self.neutron_defaults.keys():
4458+ defv = self.neutron_defaults[nkey]['default']
4459+ rkey = self.neutron_defaults[nkey]['rel_key']
4460+ if rkey in rdata.keys():
4461+ if type(defv) is bool:
4462+ settings[nkey] = bool_from_string(rdata[rkey])
4463+ else:
4464+ settings[nkey] = rdata[rkey]
4465+ else:
4466+ settings[nkey] = defv
4467+ return settings
4468+
4469+
4470+class ExternalPortContext(NeutronPortContext):
4471+
4472+ def __call__(self):
4473+ ctxt = {}
4474+ ports = config('ext-port')
4475+ if ports:
4476+ ports = [p.strip() for p in ports.split()]
4477+ ports = self.resolve_ports(ports)
4478+ if ports:
4479+ ctxt = {"ext_port": ports[0]}
4480+ napi_settings = NeutronAPIContext()()
4481+ mtu = napi_settings.get('network_device_mtu')
4482+ if mtu:
4483+ ctxt['ext_port_mtu'] = mtu
4484+
4485+ return ctxt
4486+
4487+
4488+class DataPortContext(NeutronPortContext):
4489+
4490+ def __call__(self):
4491+ ports = config('data-port')
4492+ if ports:
4493+ # Map of {port/mac:bridge}
4494+ portmap = parse_data_port_mappings(ports)
4495+ ports = portmap.keys()
4496+ # Resolve provided ports or mac addresses and filter out those
4497+ # already attached to a bridge.
4498+ resolved = self.resolve_ports(ports)
4499+ # FIXME: is this necessary?
4500+ normalized = {get_nic_hwaddr(port): port for port in resolved
4501+ if port not in ports}
4502+ normalized.update({port: port for port in resolved
4503+ if port in ports})
4504+ if resolved:
4505+ return {normalized[port]: bridge for port, bridge in
4506+ six.iteritems(portmap) if port in normalized.keys()}
4507+
4508+ return None
4509+
4510+
4511+class PhyNICMTUContext(DataPortContext):
4512+
4513+ def __call__(self):
4514+ ctxt = {}
4515+ mappings = super(PhyNICMTUContext, self).__call__()
4516+ if mappings and mappings.keys():
4517+ ports = sorted(mappings.keys())
4518+ napi_settings = NeutronAPIContext()()
4519+ mtu = napi_settings.get('network_device_mtu')
4520+ all_ports = set()
4521+ # If any of ports is a vlan device, its underlying device must have
4522+ # mtu applied first.
4523+ for port in ports:
4524+ for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
4525+ lport = os.path.basename(lport)
4526+ all_ports.add(lport.split('_')[1])
4527+
4528+ all_ports = list(all_ports)
4529+ all_ports.extend(ports)
4530+ if mtu:
4531+ ctxt["devs"] = '\\n'.join(all_ports)
4532+ ctxt['mtu'] = mtu
4533+
4534+ return ctxt
4535+
4536+
4537+class NetworkServiceContext(OSContextGenerator):
4538+
4539+ def __init__(self, rel_name='quantum-network-service'):
4540+ self.rel_name = rel_name
4541+ self.interfaces = [rel_name]
4542+
4543+ def __call__(self):
4544+ for rid in relation_ids(self.rel_name):
4545+ for unit in related_units(rid):
4546+ rdata = relation_get(rid=rid, unit=unit)
4547+ ctxt = {
4548+ 'keystone_host': rdata.get('keystone_host'),
4549+ 'service_port': rdata.get('service_port'),
4550+ 'auth_port': rdata.get('auth_port'),
4551+ 'service_tenant': rdata.get('service_tenant'),
4552+ 'service_username': rdata.get('service_username'),
4553+ 'service_password': rdata.get('service_password'),
4554+ 'quantum_host': rdata.get('quantum_host'),
4555+ 'quantum_port': rdata.get('quantum_port'),
4556+ 'quantum_url': rdata.get('quantum_url'),
4557+ 'region': rdata.get('region'),
4558+ 'service_protocol':
4559+ rdata.get('service_protocol') or 'http',
4560+ 'auth_protocol':
4561+ rdata.get('auth_protocol') or 'http',
4562+ }
4563+ if self.context_complete(ctxt):
4564+ return ctxt
4565+ return {}
4566
4567=== added directory 'charmhelpers.new/contrib/openstack/files'
4568=== added file 'charmhelpers.new/contrib/openstack/files/check_haproxy.sh'
4569--- charmhelpers.new/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
4570+++ charmhelpers.new/contrib/openstack/files/check_haproxy.sh 2016-01-30 12:38:43 +0000
4571@@ -0,0 +1,34 @@
4572+#!/bin/bash
4573+#--------------------------------------------
4574+# This file is managed by Juju
4575+#--------------------------------------------
4576+#
4577+# Copyright 2009,2012 Canonical Ltd.
4578+# Author: Tom Haddon
4579+
4580+CRITICAL=0
4581+NOTACTIVE=''
4582+LOGFILE=/var/log/nagios/check_haproxy.log
4583+AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
4584+
4585+typeset -i N_INSTANCES=0
4586+for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
4587+do
4588+ N_INSTANCES=N_INSTANCES+1
4589+ output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
4590+ if [ $? != 0 ]; then
4591+ date >> $LOGFILE
4592+ echo $output >> $LOGFILE
4593+ /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
4594+ CRITICAL=1
4595+ NOTACTIVE="${NOTACTIVE} $appserver"
4596+ fi
4597+done
4598+
4599+if [ $CRITICAL = 1 ]; then
4600+ echo "CRITICAL:${NOTACTIVE}"
4601+ exit 2
4602+fi
4603+
4604+echo "OK: All haproxy instances ($N_INSTANCES) looking good"
4605+exit 0
4606
4607=== added file 'charmhelpers.new/contrib/openstack/ip.py'
4608--- charmhelpers.new/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
4609+++ charmhelpers.new/contrib/openstack/ip.py 2016-01-30 12:38:43 +0000
4610@@ -0,0 +1,151 @@
4611+# Copyright 2014-2015 Canonical Limited.
4612+#
4613+# This file is part of charm-helpers.
4614+#
4615+# charm-helpers is free software: you can redistribute it and/or modify
4616+# it under the terms of the GNU Lesser General Public License version 3 as
4617+# published by the Free Software Foundation.
4618+#
4619+# charm-helpers is distributed in the hope that it will be useful,
4620+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4621+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4622+# GNU Lesser General Public License for more details.
4623+#
4624+# You should have received a copy of the GNU Lesser General Public License
4625+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4626+
4627+from charmhelpers.core.hookenv import (
4628+ config,
4629+ unit_get,
4630+ service_name,
4631+)
4632+from charmhelpers.contrib.network.ip import (
4633+ get_address_in_network,
4634+ is_address_in_network,
4635+ is_ipv6,
4636+ get_ipv6_addr,
4637+)
4638+from charmhelpers.contrib.hahelpers.cluster import is_clustered
4639+
4640+PUBLIC = 'public'
4641+INTERNAL = 'int'
4642+ADMIN = 'admin'
4643+
4644+ADDRESS_MAP = {
4645+ PUBLIC: {
4646+ 'config': 'os-public-network',
4647+ 'fallback': 'public-address',
4648+ 'override': 'os-public-hostname',
4649+ },
4650+ INTERNAL: {
4651+ 'config': 'os-internal-network',
4652+ 'fallback': 'private-address',
4653+ 'override': 'os-internal-hostname',
4654+ },
4655+ ADMIN: {
4656+ 'config': 'os-admin-network',
4657+ 'fallback': 'private-address',
4658+ 'override': 'os-admin-hostname',
4659+ }
4660+}
4661+
4662+
4663+def canonical_url(configs, endpoint_type=PUBLIC):
4664+ """Returns the correct HTTP URL to this host given the state of HTTPS
4665+ configuration, hacluster and charm configuration.
4666+
4667+ :param configs: OSTemplateRenderer config templating object to inspect
4668+ for a complete https context.
4669+ :param endpoint_type: str endpoint type to resolve.
4670+ :param returns: str base URL for services on the current service unit.
4671+ """
4672+ scheme = _get_scheme(configs)
4673+
4674+ address = resolve_address(endpoint_type)
4675+ if is_ipv6(address):
4676+ address = "[{}]".format(address)
4677+
4678+ return '%s://%s' % (scheme, address)
4679+
4680+
4681+def _get_scheme(configs):
4682+ """Returns the scheme to use for the url (either http or https)
4683+ depending upon whether https is in the configs value.
4684+
4685+ :param configs: OSTemplateRenderer config templating object to inspect
4686+ for a complete https context.
4687+ :returns: either 'http' or 'https' depending on whether https is
4688+ configured within the configs context.
4689+ """
4690+ scheme = 'http'
4691+ if configs and 'https' in configs.complete_contexts():
4692+ scheme = 'https'
4693+ return scheme
4694+
4695+
4696+def _get_address_override(endpoint_type=PUBLIC):
4697+ """Returns any address overrides that the user has defined based on the
4698+ endpoint type.
4699+
4700+ Note: this function allows for the service name to be inserted into the
4701+ address if the user specifies {service_name}.somehost.org.
4702+
4703+ :param endpoint_type: the type of endpoint to retrieve the override
4704+ value for.
4705+ :returns: any endpoint address or hostname that the user has overridden
4706+ or None if an override is not present.
4707+ """
4708+ override_key = ADDRESS_MAP[endpoint_type]['override']
4709+ addr_override = config(override_key)
4710+ if not addr_override:
4711+ return None
4712+ else:
4713+ return addr_override.format(service_name=service_name())
4714+
4715+
4716+def resolve_address(endpoint_type=PUBLIC):
4717+ """Return unit address depending on net config.
4718+
4719+ If unit is clustered with vip(s) and has net splits defined, return vip on
4720+ correct network. If clustered with no nets defined, return primary vip.
4721+
4722+ If not clustered, return unit address ensuring address is on configured net
4723+ split if one is configured.
4724+
4725+ :param endpoint_type: Network endpoing type
4726+ """
4727+ resolved_address = _get_address_override(endpoint_type)
4728+ if resolved_address:
4729+ return resolved_address
4730+
4731+ vips = config('vip')
4732+ if vips:
4733+ vips = vips.split()
4734+
4735+ net_type = ADDRESS_MAP[endpoint_type]['config']
4736+ net_addr = config(net_type)
4737+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
4738+ clustered = is_clustered()
4739+ if clustered:
4740+ if not net_addr:
4741+ # If no net-splits defined, we expect a single vip
4742+ resolved_address = vips[0]
4743+ else:
4744+ for vip in vips:
4745+ if is_address_in_network(net_addr, vip):
4746+ resolved_address = vip
4747+ break
4748+ else:
4749+ if config('prefer-ipv6'):
4750+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
4751+ else:
4752+ fallback_addr = unit_get(net_fallback)
4753+
4754+ resolved_address = get_address_in_network(net_addr, fallback_addr)
4755+
4756+ if resolved_address is None:
4757+ raise ValueError("Unable to resolve a suitable IP address based on "
4758+ "charm state and configuration. (net_type=%s, "
4759+ "clustered=%s)" % (net_type, clustered))
4760+
4761+ return resolved_address
4762
4763=== added file 'charmhelpers.new/contrib/openstack/neutron.py'
4764--- charmhelpers.new/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
4765+++ charmhelpers.new/contrib/openstack/neutron.py 2016-01-30 12:38:43 +0000
4766@@ -0,0 +1,378 @@
4767+# Copyright 2014-2015 Canonical Limited.
4768+#
4769+# This file is part of charm-helpers.
4770+#
4771+# charm-helpers is free software: you can redistribute it and/or modify
4772+# it under the terms of the GNU Lesser General Public License version 3 as
4773+# published by the Free Software Foundation.
4774+#
4775+# charm-helpers is distributed in the hope that it will be useful,
4776+# but WITHOUT ANY WARRANTY; without even the implied warranty of
4777+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4778+# GNU Lesser General Public License for more details.
4779+#
4780+# You should have received a copy of the GNU Lesser General Public License
4781+# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
4782+
4783+# Various utilies for dealing with Neutron and the renaming from Quantum.
4784+
4785+import six
4786+from subprocess import check_output
4787+
4788+from charmhelpers.core.hookenv import (
4789+ config,
4790+ log,
4791+ ERROR,
4792+)
4793+
4794+from charmhelpers.contrib.openstack.utils import os_release
4795+
4796+
4797+def headers_package():
4798+ """Ensures correct linux-headers for running kernel are installed,
4799+ for building DKMS package"""
4800+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4801+ return 'linux-headers-%s' % kver
4802+
4803+QUANTUM_CONF_DIR = '/etc/quantum'
4804+
4805+
4806+def kernel_version():
4807+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
4808+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
4809+ kver = kver.split('.')
4810+ return (int(kver[0]), int(kver[1]))
4811+
4812+
4813+def determine_dkms_package():
4814+ """ Determine which DKMS package should be used based on kernel version """
4815+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
4816+ if kernel_version() >= (3, 13):
4817+ return []
4818+ else:
4819+ return [headers_package(), 'openvswitch-datapath-dkms']
4820+
4821+
4822+# legacy
4823+
4824+
4825+def quantum_plugins():
4826+ from charmhelpers.contrib.openstack import context
4827+ return {
4828+ 'ovs': {
4829+ 'config': '/etc/quantum/plugins/openvswitch/'
4830+ 'ovs_quantum_plugin.ini',
4831+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
4832+ 'OVSQuantumPluginV2',
4833+ 'contexts': [
4834+ context.SharedDBContext(user=config('neutron-database-user'),
4835+ database=config('neutron-database'),
4836+ relation_prefix='neutron',
4837+ ssl_dir=QUANTUM_CONF_DIR)],
4838+ 'services': ['quantum-plugin-openvswitch-agent'],
4839+ 'packages': [determine_dkms_package(),
4840+ ['quantum-plugin-openvswitch-agent']],
4841+ 'server_packages': ['quantum-server',
4842+ 'quantum-plugin-openvswitch'],
4843+ 'server_services': ['quantum-server']
4844+ },
4845+ 'nvp': {
4846+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
4847+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
4848+ 'QuantumPlugin.NvpPluginV2',
4849+ 'contexts': [
4850+ context.SharedDBContext(user=config('neutron-database-user'),
4851+ database=config('neutron-database'),
4852+ relation_prefix='neutron',
4853+ ssl_dir=QUANTUM_CONF_DIR)],
4854+ 'services': [],
4855+ 'packages': [],
4856+ 'server_packages': ['quantum-server',
4857+ 'quantum-plugin-nicira'],
4858+ 'server_services': ['quantum-server']
4859+ }
4860+ }
4861+
4862+NEUTRON_CONF_DIR = '/etc/neutron'
4863+
4864+
4865+def neutron_plugins():
4866+ from charmhelpers.contrib.openstack import context
4867+ release = os_release('nova-common')
4868+ plugins = {
4869+ 'ovs': {
4870+ 'config': '/etc/neutron/plugins/openvswitch/'
4871+ 'ovs_neutron_plugin.ini',
4872+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
4873+ 'OVSNeutronPluginV2',
4874+ 'contexts': [
4875+ context.SharedDBContext(user=config('neutron-database-user'),
4876+ database=config('neutron-database'),
4877+ relation_prefix='neutron',
4878+ ssl_dir=NEUTRON_CONF_DIR)],
4879+ 'services': ['neutron-plugin-openvswitch-agent'],
4880+ 'packages': [determine_dkms_package(),
4881+ ['neutron-plugin-openvswitch-agent']],
4882+ 'server_packages': ['neutron-server',
4883+ 'neutron-plugin-openvswitch'],
4884+ 'server_services': ['neutron-server']
4885+ },
4886+ 'nvp': {
4887+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
4888+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
4889+ 'NeutronPlugin.NvpPluginV2',
4890+ 'contexts': [
4891+ context.SharedDBContext(user=config('neutron-database-user'),
4892+ database=config('neutron-database'),
4893+ relation_prefix='neutron',
4894+ ssl_dir=NEUTRON_CONF_DIR)],
4895+ 'services': [],
4896+ 'packages': [],
4897+ 'server_packages': ['neutron-server',
4898+ 'neutron-plugin-nicira'],
4899+ 'server_services': ['neutron-server']
4900+ },
4901+ 'nsx': {
4902+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
4903+ 'driver': 'vmware',
4904+ 'contexts': [
4905+ context.SharedDBContext(user=config('neutron-database-user'),
4906+ database=config('neutron-database'),
4907+ relation_prefix='neutron',
4908+ ssl_dir=NEUTRON_CONF_DIR)],
4909+ 'services': [],
4910+ 'packages': [],
4911+ 'server_packages': ['neutron-server',
4912+ 'neutron-plugin-vmware'],
4913+ 'server_services': ['neutron-server']
4914+ },
4915+ 'n1kv': {
4916+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
4917+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
4918+ 'contexts': [
4919+ context.SharedDBContext(user=config('neutron-database-user'),
4920+ database=config('neutron-database'),
4921+ relation_prefix='neutron',
4922+ ssl_dir=NEUTRON_CONF_DIR)],
4923+ 'services': [],
4924+ 'packages': [determine_dkms_package(),
4925+ ['neutron-plugin-cisco']],
4926+ 'server_packages': ['neutron-server',
4927+ 'neutron-plugin-cisco'],
4928+ 'server_services': ['neutron-server']
4929+ },
4930+ 'Calico': {
4931+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
4932+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
4933+ 'contexts': [
4934+ context.SharedDBContext(user=config('neutron-database-user'),
4935+ database=config('neutron-database'),
4936+ relation_prefix='neutron',
4937+ ssl_dir=NEUTRON_CONF_DIR)],
4938+ 'services': ['calico-felix',
4939+ 'bird',
4940+ 'neutron-dhcp-agent',
4941+ 'nova-api-metadata',
4942+ 'etcd'],
4943+ 'packages': [determine_dkms_package(),
4944+ ['calico-compute',
4945+ 'bird',
4946+ 'neutron-dhcp-agent',
4947+ 'nova-api-metadata',
4948+ 'etcd']],
4949+ 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
4950+ 'server_services': ['neutron-server', 'etcd']
4951+ },
4952+ 'vsp': {
4953+ 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
4954+ 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
4955+ 'contexts': [
4956+ context.SharedDBContext(user=config('neutron-database-user'),
4957+ database=config('neutron-database'),
4958+ relation_prefix='neutron',
4959+ ssl_dir=NEUTRON_CONF_DIR)],
4960+ 'services': [],
4961+ 'packages': [],
4962+ 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
4963+ 'server_services': ['neutron-server']
4964+ },
4965+ 'plumgrid': {
4966+ 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
4967+ 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
4968+ 'contexts': [
4969+ context.SharedDBContext(user=config('database-user'),
4970+ database=config('database'),
4971+ ssl_dir=NEUTRON_CONF_DIR)],
4972+ 'services': [],
4973+ 'packages': ['plumgrid-lxc',
4974+ 'iovisor-dkms'],
4975+ 'server_packages': ['neutron-server',
4976+ 'neutron-plugin-plumgrid'],
4977+ 'server_services': ['neutron-server']
4978+ },
4979+ 'midonet': {
4980+ 'config': '/etc/neutron/plugins/midonet/midonet.ini',
4981+ 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
4982+ 'contexts': [
4983+ context.SharedDBContext(user=config('neutron-database-user'),
4984+ database=config('neutron-database'),
4985+ relation_prefix='neutron',
4986+ ssl_dir=NEUTRON_CONF_DIR)],
4987+ 'services': [],
4988+ 'packages': [determine_dkms_package()],
4989+ 'server_packages': ['neutron-server',
4990+ 'python-neutron-plugin-midonet'],
4991+ 'server_services': ['neutron-server']
4992+ }
4993+ }
4994+ if release >= 'icehouse':
4995+ # NOTE: patch in ml2 plugin for icehouse onwards
4996+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
4997+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
4998+ plugins['ovs']['server_packages'] = ['neutron-server',
4999+ 'neutron-plugin-ml2']
5000+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches