Merge lp:~fcorrea/charms/trusty/glance/fix-pause-action into lp:~openstack-charmers-archive/charms/trusty/glance/trunk

Proposed by Fernando Correa Neto
Status: Superseded
Proposed branch: lp:~fcorrea/charms/trusty/glance/fix-pause-action
Merge into: lp:~openstack-charmers-archive/charms/trusty/glance/trunk
Diff against target: 12620 lines (+12105/-44) (has conflicts)
54 files modified
.bzrignore (+2/-0)
.testr.conf (+8/-0)
actions/actions.py (+59/-0)
actions/openstack_upgrade.py (+28/-0)
charm-helpers-hooks.yaml (+5/-0)
charmhelpers.new/cli/__init__.py (+191/-0)
charmhelpers.new/cli/benchmark.py (+36/-0)
charmhelpers.new/cli/commands.py (+32/-0)
charmhelpers.new/cli/hookenv.py (+23/-0)
charmhelpers.new/cli/host.py (+31/-0)
charmhelpers.new/cli/unitdata.py (+39/-0)
charmhelpers.new/contrib/charmsupport/nrpe.py (+396/-0)
charmhelpers.new/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers.new/contrib/network/ip.py (+456/-0)
charmhelpers.new/contrib/openstack/amulet/deployment.py (+297/-0)
charmhelpers.new/contrib/openstack/amulet/utils.py (+985/-0)
charmhelpers.new/contrib/openstack/context.py (+1457/-0)
charmhelpers.new/contrib/openstack/ip.py (+151/-0)
charmhelpers.new/contrib/openstack/neutron.py (+370/-0)
charmhelpers.new/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers.new/contrib/openstack/templating.py (+323/-0)
charmhelpers.new/contrib/openstack/utils.py (+998/-0)
charmhelpers.new/contrib/python/packages.py (+121/-0)
charmhelpers.new/contrib/storage/linux/ceph.py (+673/-0)
charmhelpers.new/core/files.py (+45/-0)
charmhelpers.new/core/hookenv.py (+944/-0)
charmhelpers.new/core/host.py (+617/-0)
charmhelpers.new/core/hugepage.py (+71/-0)
charmhelpers.new/core/kernel.py (+68/-0)
charmhelpers.new/core/services/base.py (+353/-0)
charmhelpers.new/core/services/helpers.py (+286/-0)
charmhelpers.new/core/strutils.py (+72/-0)
charmhelpers.new/core/templating.py (+75/-0)
charmhelpers.new/core/unitdata.py (+521/-0)
charmhelpers.new/fetch/__init__.py (+456/-0)
charmhelpers.new/fetch/archiveurl.py (+167/-0)
charmhelpers.new/fetch/bzrurl.py (+82/-0)
charmhelpers.new/fetch/giturl.py (+73/-0)
hooks/glance_relations.py (+19/-3)
hooks/glance_utils.py (+115/-29)
metadata.yaml (+12/-2)
requirements/requirements-precise.txt (+6/-0)
requirements/requirements-trusty.txt (+6/-0)
requirements/test-requirements.txt (+7/-0)
tests/020-basic-trusty-liberty (+11/-0)
tests/021-basic-wily-liberty (+9/-0)
tests/052-basic-trusty-kilo-git (+12/-0)
tests/basic_deployment.py (+7/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+126/-9)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+650/-1)
tests/tests.yaml (+20/-0)
tox.ini (+35/-0)
unit_tests/test_actions.py (+162/-0)
unit_tests/test_actions_openstack_upgrade.py (+60/-0)
Conflict adding file .testr.conf.  Moved existing file to .testr.conf.moved.
Conflict adding file actions/__init__.py.  Moved existing file to actions/__init__.py.moved.
Conflict adding file actions/actions.py.  Moved existing file to actions/actions.py.moved.
Conflict adding file actions/charmhelpers.  Moved existing file to actions/charmhelpers.moved.
Conflict adding file actions/hooks.  Moved existing file to actions/hooks.moved.
Conflict adding file actions/openstack-upgrade.  Moved existing file to actions/openstack-upgrade.moved.
Conflict adding file actions/openstack_upgrade.py.  Moved existing file to actions/openstack_upgrade.py.moved.
Conflict adding file actions/pause.  Moved existing file to actions/pause.moved.
Conflict adding file actions/resume.  Moved existing file to actions/resume.moved.
Text conflict in charm-helpers-hooks.yaml
Conflict adding file charmhelpers.  Moved existing file to charmhelpers.moved.
Conflict: charmhelpers.new is not a directory, but has files in it.  Created directory.
Conflict adding files to charmhelpers.new/contrib.  Created directory.
Conflict because charmhelpers.new/contrib is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/charmsupport.  Created directory.
Conflict because charmhelpers.new/contrib/charmsupport is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/hahelpers.  Created directory.
Conflict because charmhelpers.new/contrib/hahelpers is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/network.  Created directory.
Conflict because charmhelpers.new/contrib/network is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack.  Created directory.
Conflict because charmhelpers.new/contrib/openstack is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/amulet.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/amulet is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/templates.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/templates is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/python.  Created directory.
Conflict because charmhelpers.new/contrib/python is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage.  Created directory.
Conflict because charmhelpers.new/contrib/storage is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage/linux.  Created directory.
Conflict because charmhelpers.new/contrib/storage/linux is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core.  Created directory.
Conflict because charmhelpers.new/core is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core/services.  Created directory.
Conflict because charmhelpers.new/core/services is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/fetch.  Created directory.
Conflict because charmhelpers.new/fetch is not versioned, but has versioned children.  Versioned directory.
Text conflict in hooks/glance_relations.py
Text conflict in hooks/glance_utils.py
Conflict adding file hooks/install.real.  Moved existing file to hooks/install.real.moved.
Conflict adding file hooks/update-status.  Moved existing file to hooks/update-status.moved.
Text conflict in metadata.yaml
Conflict adding file requirements.  Moved existing file to requirements.moved.
Conflict adding file tests/020-basic-trusty-liberty.  Moved existing file to tests/020-basic-trusty-liberty.moved.
Conflict adding file tests/021-basic-wily-liberty.  Moved existing file to tests/021-basic-wily-liberty.moved.
Conflict adding file tests/052-basic-trusty-kilo-git.  Moved existing file to tests/052-basic-trusty-kilo-git.moved.
Text conflict in tests/basic_deployment.py
Text conflict in tests/charmhelpers/contrib/openstack/amulet/deployment.py
Text conflict in tests/charmhelpers/contrib/openstack/amulet/utils.py
Conflict adding file tests/tests.yaml.  Moved existing file to tests/tests.yaml.moved.
Conflict adding file tox.ini.  Moved existing file to tox.ini.moved.
Conflict adding file unit_tests/test_actions.py.  Moved existing file to unit_tests/test_actions.py.moved.
Conflict adding file unit_tests/test_actions_openstack_upgrade.py.  Moved existing file to unit_tests/test_actions_openstack_upgrade.py.moved.
To merge this branch: bzr merge lp:~fcorrea/charms/trusty/glance/fix-pause-action
Reviewer Review Type Date Requested Status
Landscape Pending
Review via email: mp+278498@code.launchpad.net

This proposal has been superseded by a proposal from 2015-11-24.

Description of the change

This branch changes the pause action to change the kv database instead of calling set_os_workload_status directly, which is the same pattern used in the swift charm.
This prevents the charm from immediately bouncing back to active after a 'pause' action was performed.

A follow up branch will add a bit more logic to deal with hacluster so it stops sending requests for the unit.

To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #14311 glance for fcorrea mp278498
    LINT OK: passed

Build: http://10.245.162.77:8080/job/charm_lint_check/14311/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #13339 glance for fcorrea mp278498
    UNIT OK: passed

Build: http://10.245.162.77:8080/job/charm_unit_test/13339/

Unmerged revisions

155. By Fernando Correa Neto

- backport changes from previous branch

154. By Fernando Correa Neto

- get new charmhelpers

153. By James Page

Update maintainer

152. By David Ames

[ionutbalutoiu, r=thedac] Adds additional variables to the image-service relation. These are available only when a relation with Swift object storage is present.
In case any charm needs to generate temporary URLs from Glance with Swift backend, it needs a temp-url-key which must be posted to Swift with the glance account. (Details: http://docs.openstack.org/liberty/config-reference/content/object-storage-tempurl.html)
This is needed for OpenStack Ironic charm (http://bazaar.launchpad.net/~cloudbaseit/charms/trusty/ironic/trunk/view/head:/hooks/ironic_context.py#L76), but might also be generally useful.

151. By Corey Bryant

[ddellav,r=corey.bryant] Action managed upgrade support.

150. By James Page

Refactor to assess status after every hook execution, add update-status hook

149. By Corey Bryant

[beisner,r=corey.bryant] Add Amulet test dependencies and run unit tests with -v.

148. By David Ames

[thedac,r=gnuoy] Make messaging an optional relation for workload status

147. By David Ames

[thedac, trivial] s/message/messaging

146. By Corey Bryant

[ack,r=corey.bryant] Fix amulet tests for pause/resume actions and sync charm-helpers

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file '.bzrignore'
--- .bzrignore 2015-04-01 16:48:59 +0000
+++ .bzrignore 2015-11-24 19:47:41 +0000
@@ -1,3 +1,5 @@
1.coverage1.coverage
2bin2bin
3tags3tags
4.tox
5.testrepository
46
=== added file '.testr.conf'
--- .testr.conf 1970-01-01 00:00:00 +0000
+++ .testr.conf 2015-11-24 19:47:41 +0000
@@ -0,0 +1,8 @@
1[DEFAULT]
2test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
3 OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
4 OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
5 ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
6
7test_id_option=--load-list $IDFILE
8test_list_option=--list
09
=== renamed file '.testr.conf' => '.testr.conf.moved'
=== added file 'actions/__init__.py'
=== renamed file 'actions/__init__.py' => 'actions/__init__.py.moved'
=== added file 'actions/actions.py'
--- actions/actions.py 1970-01-01 00:00:00 +0000
+++ actions/actions.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,59 @@
1#!/usr/bin/python
2
3import sys
4import os
5
6from charmhelpers.core.host import service_pause, service_resume
7from charmhelpers.core.hookenv import action_fail, status_set
8from charmhelpers.core.unitdata import HookData, kv
9
10from hooks.glance_utils import services, assess_status
11
12
13def pause(args):
14 """Pause all the Glance services.
15
16 @raises Exception if any services fail to stop
17 """
18 for service in services():
19 stopped = service_pause(service)
20 if not stopped:
21 raise Exception("{} didn't stop cleanly.".format(service))
22 with HookData()():
23 kv().set('unit-paused', True)
24 state, message = assess_status()
25 status_set(state, message)
26
27
28def resume(args):
29 """Resume all the Glance services.
30
31 @raises Exception if any services fail to start
32 """
33 for service in services():
34 started = service_resume(service)
35 if not started:
36 raise Exception("{} didn't start cleanly.".format(service))
37 status_set("active", "")
38
39
40# A dictionary of all the defined actions to callables (which take
41# parsed arguments).
42ACTIONS = {"pause": pause, "resume": resume}
43
44
45def main(args):
46 action_name = os.path.basename(args[0])
47 try:
48 action = ACTIONS[action_name]
49 except KeyError:
50 return "Action %s undefined" % action_name
51 else:
52 try:
53 action(args)
54 except Exception as e:
55 action_fail(str(e))
56
57
58if __name__ == "__main__":
59 sys.exit(main(sys.argv))
060
=== renamed file 'actions/actions.py' => 'actions/actions.py.moved'
=== added symlink 'actions/charmhelpers'
=== target is u'../charmhelpers'
=== renamed symlink 'actions/charmhelpers' => 'actions/charmhelpers.moved'
=== added symlink 'actions/hooks'
=== target is u'../hooks'
=== renamed symlink 'actions/hooks' => 'actions/hooks.moved'
=== added symlink 'actions/openstack-upgrade'
=== target is u'openstack_upgrade.py'
=== renamed symlink 'actions/openstack-upgrade' => 'actions/openstack-upgrade.moved'
=== added file 'actions/openstack_upgrade.py'
--- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000
+++ actions/openstack_upgrade.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,28 @@
1#!/usr/bin/python
2
3from charmhelpers.contrib.openstack.utils import (
4 do_action_openstack_upgrade,
5)
6
7from hooks.glance_relations import (
8 config_changed,
9 CONFIGS
10)
11
12from hooks.glance_utils import do_openstack_upgrade
13
14
15def openstack_upgrade():
16 """Upgrade packages to config-set Openstack version.
17
18 If the charm was installed from source we cannot upgrade it.
19 For backwards compatibility a config flag must be set for this
20 code to run, otherwise a full service level upgrade will fire
21 on config-changed."""
22 if (do_action_openstack_upgrade('glance-common',
23 do_openstack_upgrade,
24 CONFIGS)):
25 config_changed()
26
27if __name__ == '__main__':
28 openstack_upgrade()
029
=== renamed file 'actions/openstack_upgrade.py' => 'actions/openstack_upgrade.py.moved'
=== added symlink 'actions/pause'
=== target is u'actions.py'
=== renamed symlink 'actions/pause' => 'actions/pause.moved'
=== added symlink 'actions/resume'
=== target is u'actions.py'
=== renamed symlink 'actions/resume' => 'actions/resume.moved'
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2015-10-22 16:09:23 +0000
+++ charm-helpers-hooks.yaml 2015-11-24 19:47:41 +0000
@@ -1,5 +1,10 @@
1<<<<<<< TREE
1branch: lp:~openstack-charmers/charm-helpers/stable2branch: lp:~openstack-charmers/charm-helpers/stable
2destination: charmhelpers3destination: charmhelpers
4=======
5branch: lp:charm-helpers
6destination: charmhelpers
7>>>>>>> MERGE-SOURCE
3include:8include:
4 - core9 - core
5 - cli10 - cli
611
=== renamed directory 'charmhelpers' => 'charmhelpers.moved'
=== renamed symlink 'hooks/charmhelpers' => 'charmhelpers.new'
=== target was u'../charmhelpers'
=== added directory 'charmhelpers.new/cli'
=== added file 'charmhelpers.new/cli/__init__.py'
--- charmhelpers.new/cli/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/__init__.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,191 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import inspect
18import argparse
19import sys
20
21from six.moves import zip
22
23import charmhelpers.core.unitdata
24
25
26class OutputFormatter(object):
27 def __init__(self, outfile=sys.stdout):
28 self.formats = (
29 "raw",
30 "json",
31 "py",
32 "yaml",
33 "csv",
34 "tab",
35 )
36 self.outfile = outfile
37
38 def add_arguments(self, argument_parser):
39 formatgroup = argument_parser.add_mutually_exclusive_group()
40 choices = self.supported_formats
41 formatgroup.add_argument("--format", metavar='FMT',
42 help="Select output format for returned data, "
43 "where FMT is one of: {}".format(choices),
44 choices=choices, default='raw')
45 for fmt in self.formats:
46 fmtfunc = getattr(self, fmt)
47 formatgroup.add_argument("-{}".format(fmt[0]),
48 "--{}".format(fmt), action='store_const',
49 const=fmt, dest='format',
50 help=fmtfunc.__doc__)
51
52 @property
53 def supported_formats(self):
54 return self.formats
55
56 def raw(self, output):
57 """Output data as raw string (default)"""
58 if isinstance(output, (list, tuple)):
59 output = '\n'.join(map(str, output))
60 self.outfile.write(str(output))
61
62 def py(self, output):
63 """Output data as a nicely-formatted python data structure"""
64 import pprint
65 pprint.pprint(output, stream=self.outfile)
66
67 def json(self, output):
68 """Output data in JSON format"""
69 import json
70 json.dump(output, self.outfile)
71
72 def yaml(self, output):
73 """Output data in YAML format"""
74 import yaml
75 yaml.safe_dump(output, self.outfile)
76
77 def csv(self, output):
78 """Output data as excel-compatible CSV"""
79 import csv
80 csvwriter = csv.writer(self.outfile)
81 csvwriter.writerows(output)
82
83 def tab(self, output):
84 """Output data in excel-compatible tab-delimited format"""
85 import csv
86 csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
87 csvwriter.writerows(output)
88
89 def format_output(self, output, fmt='raw'):
90 fmtfunc = getattr(self, fmt)
91 fmtfunc(output)
92
93
94class CommandLine(object):
95 argument_parser = None
96 subparsers = None
97 formatter = None
98 exit_code = 0
99
100 def __init__(self):
101 if not self.argument_parser:
102 self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
103 if not self.formatter:
104 self.formatter = OutputFormatter()
105 self.formatter.add_arguments(self.argument_parser)
106 if not self.subparsers:
107 self.subparsers = self.argument_parser.add_subparsers(help='Commands')
108
109 def subcommand(self, command_name=None):
110 """
111 Decorate a function as a subcommand. Use its arguments as the
112 command-line arguments"""
113 def wrapper(decorated):
114 cmd_name = command_name or decorated.__name__
115 subparser = self.subparsers.add_parser(cmd_name,
116 description=decorated.__doc__)
117 for args, kwargs in describe_arguments(decorated):
118 subparser.add_argument(*args, **kwargs)
119 subparser.set_defaults(func=decorated)
120 return decorated
121 return wrapper
122
123 def test_command(self, decorated):
124 """
125 Subcommand is a boolean test function, so bool return values should be
126 converted to a 0/1 exit code.
127 """
128 decorated._cli_test_command = True
129 return decorated
130
131 def no_output(self, decorated):
132 """
133 Subcommand is not expected to return a value, so don't print a spurious None.
134 """
135 decorated._cli_no_output = True
136 return decorated
137
138 def subcommand_builder(self, command_name, description=None):
139 """
140 Decorate a function that builds a subcommand. Builders should accept a
141 single argument (the subparser instance) and return the function to be
142 run as the command."""
143 def wrapper(decorated):
144 subparser = self.subparsers.add_parser(command_name)
145 func = decorated(subparser)
146 subparser.set_defaults(func=func)
147 subparser.description = description or func.__doc__
148 return wrapper
149
150 def run(self):
151 "Run cli, processing arguments and executing subcommands."
152 arguments = self.argument_parser.parse_args()
153 argspec = inspect.getargspec(arguments.func)
154 vargs = []
155 for arg in argspec.args:
156 vargs.append(getattr(arguments, arg))
157 if argspec.varargs:
158 vargs.extend(getattr(arguments, argspec.varargs))
159 output = arguments.func(*vargs)
160 if getattr(arguments.func, '_cli_test_command', False):
161 self.exit_code = 0 if output else 1
162 output = ''
163 if getattr(arguments.func, '_cli_no_output', False):
164 output = ''
165 self.formatter.format_output(output, arguments.format)
166 if charmhelpers.core.unitdata._KV:
167 charmhelpers.core.unitdata._KV.flush()
168
169
170cmdline = CommandLine()
171
172
173def describe_arguments(func):
174 """
175 Analyze a function's signature and return a data structure suitable for
176 passing in as arguments to an argparse parser's add_argument() method."""
177
178 argspec = inspect.getargspec(func)
179 # we should probably raise an exception somewhere if func includes **kwargs
180 if argspec.defaults:
181 positional_args = argspec.args[:-len(argspec.defaults)]
182 keyword_names = argspec.args[-len(argspec.defaults):]
183 for arg, default in zip(keyword_names, argspec.defaults):
184 yield ('--{}'.format(arg),), {'default': default}
185 else:
186 positional_args = argspec.args
187
188 for arg in positional_args:
189 yield (arg,), {}
190 if argspec.varargs:
191 yield (argspec.varargs,), {'nargs': '*'}
0192
=== added file 'charmhelpers.new/cli/benchmark.py'
--- charmhelpers.new/cli/benchmark.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/benchmark.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,36 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.contrib.benchmark import Benchmark
19
20
21@cmdline.subcommand(command_name='benchmark-start')
22def start():
23 Benchmark.start()
24
25
26@cmdline.subcommand(command_name='benchmark-finish')
27def finish():
28 Benchmark.finish()
29
30
31@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
32def service(subparser):
33 subparser.add_argument("value", help="The composite score.")
34 subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
35 subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
36 return Benchmark.set_composite_score
037
=== added file 'charmhelpers.new/cli/commands.py'
--- charmhelpers.new/cli/commands.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/commands.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,32 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""
18This module loads sub-modules into the python runtime so they can be
19discovered via the inspect module. In order to prevent flake8 from (rightfully)
20telling us these are unused modules, throw a ' # noqa' at the end of each import
21so that the warning is suppressed.
22"""
23
24from . import CommandLine # noqa
25
26"""
27Import the sub-modules which have decorated subcommands to register with chlp.
28"""
29from . import host # noqa
30from . import benchmark # noqa
31from . import unitdata # noqa
32from . import hookenv # noqa
033
=== added file 'charmhelpers.new/cli/hookenv.py'
--- charmhelpers.new/cli/hookenv.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/hookenv.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,23 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import hookenv
19
20
21cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
22cmdline.subcommand('service-name')(hookenv.service_name)
23cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
024
=== added file 'charmhelpers.new/cli/host.py'
--- charmhelpers.new/cli/host.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/host.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,31 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import host
19
20
21@cmdline.subcommand()
22def mounts():
23 "List mounts"
24 return host.mounts()
25
26
27@cmdline.subcommand_builder('service', description="Control system services")
28def service(subparser):
29 subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
30 subparser.add_argument("service_name", help="Name of the service to control")
31 return host.service
032
=== added file 'charmhelpers.new/cli/unitdata.py'
--- charmhelpers.new/cli/unitdata.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/unitdata.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,39 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import unitdata
19
20
21@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
22def unitdata_cmd(subparser):
23 nested = subparser.add_subparsers()
24 get_cmd = nested.add_parser('get', help='Retrieve data')
25 get_cmd.add_argument('key', help='Key to retrieve the value of')
26 get_cmd.set_defaults(action='get', value=None)
27 set_cmd = nested.add_parser('set', help='Store data')
28 set_cmd.add_argument('key', help='Key to set')
29 set_cmd.add_argument('value', help='Value to store')
30 set_cmd.set_defaults(action='set')
31
32 def _unitdata_cmd(action, key, value):
33 if action == 'get':
34 return unitdata.kv().get(key)
35 elif action == 'set':
36 unitdata.kv().set(key, value)
37 unitdata.kv().flush()
38 return ''
39 return _unitdata_cmd
040
=== added directory 'charmhelpers.new/contrib'
=== added directory 'charmhelpers.new/contrib/charmsupport'
=== added file 'charmhelpers.new/contrib/charmsupport/nrpe.py'
--- charmhelpers.new/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/charmsupport/nrpe.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,396 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""Compatibility with the nrpe-external-master charm"""
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
23import subprocess
24import pwd
25import grp
26import os
27import glob
28import shutil
29import re
30import shlex
31import yaml
32
33from charmhelpers.core.hookenv import (
34 config,
35 local_unit,
36 log,
37 relation_ids,
38 relation_set,
39 relations_of_type,
40)
41
42from charmhelpers.core.host import service
43
44# This module adds compatibility with the nrpe-external-master and plain nrpe
45# subordinate charms. To use it in your charm:
46#
47# 1. Update metadata.yaml
48#
49# provides:
50# (...)
51# nrpe-external-master:
52# interface: nrpe-external-master
53# scope: container
54#
55# and/or
56#
57# provides:
58# (...)
59# local-monitors:
60# interface: local-monitors
61# scope: container
62
63#
64# 2. Add the following to config.yaml
65#
66# nagios_context:
67# default: "juju"
68# type: string
69# description: |
70# Used by the nrpe subordinate charms.
71# A string that will be prepended to instance name to set the host name
72# in nagios. So for instance the hostname would be something like:
73# juju-myservice-0
74# If you're running multiple environments with the same services in them
75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
82#
83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84#
85# 4. Update your hooks.py with something like this:
86#
87# from charmsupport.nrpe import NRPE
88# (...)
89# def update_nrpe_config():
90# nrpe_compat = NRPE()
91# nrpe_compat.add_check(
92# shortname = "myservice",
93# description = "Check MyService",
94# check_cmd = "check_http -w 2 -c 10 http://localhost"
95# )
96# nrpe_compat.add_check(
97# "myservice_other",
98# "Check for widget failures",
99# check_cmd = "/srv/myapp/scripts/widget_check"
100# )
101# nrpe_compat.write()
102#
103# def config_changed():
104# (...)
105# update_nrpe_config()
106#
107# def nrpe_external_master_relation_changed():
108# update_nrpe_config()
109#
110# def local_monitors_relation_changed():
111# update_nrpe_config()
112#
113# 5. ln -s hooks.py nrpe-external-master-relation-changed
114# ln -s hooks.py local-monitors-relation-changed
115
116
117class CheckException(Exception):
118 pass
119
120
121class Check(object):
122 shortname_re = '[A-Za-z0-9-_]+$'
123 service_template = ("""
124#---------------------------------------------------
125# This file is Juju managed
126#---------------------------------------------------
127define service {{
128 use active-service
129 host_name {nagios_hostname}
130 service_description {nagios_hostname}[{shortname}] """
131 """{description}
132 check_command check_nrpe!{command}
133 servicegroups {nagios_servicegroup}
134}}
135""")
136
137 def __init__(self, shortname, description, check_cmd):
138 super(Check, self).__init__()
139 # XXX: could be better to calculate this from the service name
140 if not re.match(self.shortname_re, shortname):
141 raise CheckException("shortname must match {}".format(
142 Check.shortname_re))
143 self.shortname = shortname
144 self.command = "check_{}".format(shortname)
145 # Note: a set of invalid characters is defined by the
146 # Nagios server config
147 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd)
150
151 def _get_check_filename(self):
152 return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
153
154 def _get_service_filename(self, hostname):
155 return os.path.join(NRPE.nagios_exportdir,
156 'service__{}_{}.cfg'.format(hostname, self.command))
157
158 def _locate_cmd(self, check_cmd):
159 search_path = (
160 '/usr/lib/nagios/plugins',
161 '/usr/local/lib/nagios/plugins',
162 )
163 parts = shlex.split(check_cmd)
164 for path in search_path:
165 if os.path.exists(os.path.join(path, parts[0])):
166 command = os.path.join(path, parts[0])
167 if len(parts) > 1:
168 command += " " + " ".join(parts[1:])
169 return command
170 log('Check command not found: {}'.format(parts[0]))
171 return ''
172
173 def _remove_service_files(self):
174 if not os.path.exists(NRPE.nagios_exportdir):
175 return
176 for f in os.listdir(NRPE.nagios_exportdir):
177 if f.endswith('_{}.cfg'.format(self.command)):
178 os.remove(os.path.join(NRPE.nagios_exportdir, f))
179
180 def remove(self, hostname):
181 nrpe_check_file = self._get_check_filename()
182 if os.path.exists(nrpe_check_file):
183 os.remove(nrpe_check_file)
184 self._remove_service_files()
185
186 def write(self, nagios_context, hostname, nagios_servicegroups):
187 nrpe_check_file = self._get_check_filename()
188 with open(nrpe_check_file, 'w') as nrpe_check_config:
189 nrpe_check_config.write("# check {}\n".format(self.shortname))
190 nrpe_check_config.write("command[{}]={}\n".format(
191 self.command, self.check_cmd))
192
193 if not os.path.exists(NRPE.nagios_exportdir):
194 log('Not writing service config as {} is not accessible'.format(
195 NRPE.nagios_exportdir))
196 else:
197 self.write_service_config(nagios_context, hostname,
198 nagios_servicegroups)
199
200 def write_service_config(self, nagios_context, hostname,
201 nagios_servicegroups):
202 self._remove_service_files()
203
204 templ_vars = {
205 'nagios_hostname': hostname,
206 'nagios_servicegroup': nagios_servicegroups,
207 'description': self.description,
208 'shortname': self.shortname,
209 'command': self.command,
210 }
211 nrpe_service_text = Check.service_template.format(**templ_vars)
212 nrpe_service_file = self._get_service_filename(hostname)
213 with open(nrpe_service_file, 'w') as nrpe_service_config:
214 nrpe_service_config.write(str(nrpe_service_text))
215
216 def run(self):
217 subprocess.call(self.check_cmd)
218
219
220class NRPE(object):
221 nagios_logdir = '/var/log/nagios'
222 nagios_exportdir = '/var/lib/nagios/export'
223 nrpe_confdir = '/etc/nagios/nrpe.d'
224
225 def __init__(self, hostname=None):
226 super(NRPE, self).__init__()
227 self.config = config()
228 self.nagios_context = self.config['nagios_context']
229 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
230 self.nagios_servicegroups = self.config['nagios_servicegroups']
231 else:
232 self.nagios_servicegroups = self.nagios_context
233 self.unit_name = local_unit().replace('/', '-')
234 if hostname:
235 self.hostname = hostname
236 else:
237 nagios_hostname = get_nagios_hostname()
238 if nagios_hostname:
239 self.hostname = nagios_hostname
240 else:
241 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
242 self.checks = []
243
244 def add_check(self, *args, **kwargs):
245 self.checks.append(Check(*args, **kwargs))
246
247 def remove_check(self, *args, **kwargs):
248 if kwargs.get('shortname') is None:
249 raise ValueError('shortname of check must be specified')
250
251 # Use sensible defaults if they're not specified - these are not
252 # actually used during removal, but they're required for constructing
253 # the Check object; check_disk is chosen because it's part of the
254 # nagios-plugins-basic package.
255 if kwargs.get('check_cmd') is None:
256 kwargs['check_cmd'] = 'check_disk'
257 if kwargs.get('description') is None:
258 kwargs['description'] = ''
259
260 check = Check(*args, **kwargs)
261 check.remove(self.hostname)
262
263 def write(self):
264 try:
265 nagios_uid = pwd.getpwnam('nagios').pw_uid
266 nagios_gid = grp.getgrnam('nagios').gr_gid
267 except:
268 log("Nagios user not set up, nrpe checks not updated")
269 return
270
271 if not os.path.exists(NRPE.nagios_logdir):
272 os.mkdir(NRPE.nagios_logdir)
273 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
274
275 nrpe_monitors = {}
276 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
277 for nrpecheck in self.checks:
278 nrpecheck.write(self.nagios_context, self.hostname,
279 self.nagios_servicegroups)
280 nrpe_monitors[nrpecheck.shortname] = {
281 "command": nrpecheck.command,
282 }
283
284 service('restart', 'nagios-nrpe-server')
285
286 monitor_ids = relation_ids("local-monitors") + \
287 relation_ids("nrpe-external-master")
288 for rid in monitor_ids:
289 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
290
291
292def get_nagios_hostcontext(relation_name='nrpe-external-master'):
293 """
294 Query relation with nrpe subordinate, return the nagios_host_context
295
296 :param str relation_name: Name of relation nrpe sub joined to
297 """
298 for rel in relations_of_type(relation_name):
299 if 'nagios_hostname' in rel:
300 return rel['nagios_host_context']
301
302
303def get_nagios_hostname(relation_name='nrpe-external-master'):
304 """
305 Query relation with nrpe subordinate, return the nagios_hostname
306
307 :param str relation_name: Name of relation nrpe sub joined to
308 """
309 for rel in relations_of_type(relation_name):
310 if 'nagios_hostname' in rel:
311 return rel['nagios_hostname']
312
313
314def get_nagios_unit_name(relation_name='nrpe-external-master'):
315 """
316 Return the nagios unit name prepended with host_context if needed
317
318 :param str relation_name: Name of relation nrpe sub joined to
319 """
320 host_context = get_nagios_hostcontext(relation_name)
321 if host_context:
322 unit = "%s:%s" % (host_context, local_unit())
323 else:
324 unit = local_unit()
325 return unit
326
327
328def add_init_service_checks(nrpe, services, unit_name):
329 """
330 Add checks for each service in list
331
332 :param NRPE nrpe: NRPE object to add check to
333 :param list services: List of services to check
334 :param str unit_name: Unit name to use in check description
335 """
336 for svc in services:
337 upstart_init = '/etc/init/%s.conf' % svc
338 sysv_init = '/etc/init.d/%s' % svc
339 if os.path.exists(upstart_init):
340 nrpe.add_check(
341 shortname=svc,
342 description='process check {%s}' % unit_name,
343 check_cmd='check_upstart_job %s' % svc
344 )
345 elif os.path.exists(sysv_init):
346 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
347 cron_file = ('*/5 * * * * root '
348 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
349 '-s /etc/init.d/%s status > '
350 '/var/lib/nagios/service-check-%s.txt\n' % (svc,
351 svc)
352 )
353 f = open(cronpath, 'w')
354 f.write(cron_file)
355 f.close()
356 nrpe.add_check(
357 shortname=svc,
358 description='process check {%s}' % unit_name,
359 check_cmd='check_status_file.py -f '
360 '/var/lib/nagios/service-check-%s.txt' % svc,
361 )
362
363
364def copy_nrpe_checks():
365 """
366 Copy the nrpe checks into place
367
368 """
369 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
370 nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
371 'charmhelpers', 'contrib', 'openstack',
372 'files')
373
374 if not os.path.exists(NAGIOS_PLUGINS):
375 os.makedirs(NAGIOS_PLUGINS)
376 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
377 if os.path.isfile(fname):
378 shutil.copy2(fname,
379 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
380
381
382def add_haproxy_checks(nrpe, unit_name):
383 """
384 Add checks for each service in list
385
386 :param NRPE nrpe: NRPE object to add check to
387 :param str unit_name: Unit name to use in check description
388 """
389 nrpe.add_check(
390 shortname='haproxy_servers',
391 description='Check HAProxy {%s}' % unit_name,
392 check_cmd='check_haproxy.sh')
393 nrpe.add_check(
394 shortname='haproxy_queue',
395 description='Check HAProxy queue depth {%s}' % unit_name,
396 check_cmd='check_haproxy_queue_depth.sh')
0397
=== added directory 'charmhelpers.new/contrib/hahelpers'
=== added file 'charmhelpers.new/contrib/hahelpers/cluster.py'
--- charmhelpers.new/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/hahelpers/cluster.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,316 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# James Page <james.page@ubuntu.com>
22# Adam Gandelman <adamg@ubuntu.com>
23#
24
25"""
26Helpers for clustering and determining "cluster leadership" and other
27clustering-related helpers.
28"""
29
30import subprocess
31import os
32
33from socket import gethostname as get_unit_hostname
34
35import six
36
37from charmhelpers.core.hookenv import (
38 log,
39 relation_ids,
40 related_units as relation_list,
41 relation_get,
42 config as config_get,
43 INFO,
44 ERROR,
45 WARNING,
46 unit_get,
47 is_leader as juju_is_leader
48)
49from charmhelpers.core.decorators import (
50 retry_on_exception,
51)
52from charmhelpers.core.strutils import (
53 bool_from_string,
54)
55
56DC_RESOURCE_NAME = 'DC'
57
58
59class HAIncompleteConfig(Exception):
60 pass
61
62
63class CRMResourceNotFound(Exception):
64 pass
65
66
67class CRMDCNotFound(Exception):
68 pass
69
70
71def is_elected_leader(resource):
72 """
73 Returns True if the charm executing this is the elected cluster leader.
74
75 It relies on two mechanisms to determine leadership:
76 1. If juju is sufficiently new and leadership election is supported,
77 the is_leader command will be used.
78 2. If the charm is part of a corosync cluster, call corosync to
79 determine leadership.
80 3. If the charm is not part of a corosync cluster, the leader is
81 determined as being "the alive unit with the lowest unit numer". In
82 other words, the oldest surviving unit.
83 """
84 try:
85 return juju_is_leader()
86 except NotImplementedError:
87 log('Juju leadership election feature not enabled'
88 ', using fallback support',
89 level=WARNING)
90
91 if is_clustered():
92 if not is_crm_leader(resource):
93 log('Deferring action to CRM leader.', level=INFO)
94 return False
95 else:
96 peers = peer_units()
97 if peers and not oldest_peer(peers):
98 log('Deferring action to oldest service unit.', level=INFO)
99 return False
100 return True
101
102
103def is_clustered():
104 for r_id in (relation_ids('ha') or []):
105 for unit in (relation_list(r_id) or []):
106 clustered = relation_get('clustered',
107 rid=r_id,
108 unit=unit)
109 if clustered:
110 return True
111 return False
112
113
114def is_crm_dc():
115 """
116 Determine leadership by querying the pacemaker Designated Controller
117 """
118 cmd = ['crm', 'status']
119 try:
120 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
121 if not isinstance(status, six.text_type):
122 status = six.text_type(status, "utf-8")
123 except subprocess.CalledProcessError as ex:
124 raise CRMDCNotFound(str(ex))
125
126 current_dc = ''
127 for line in status.split('\n'):
128 if line.startswith('Current DC'):
129 # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
130 current_dc = line.split(':')[1].split()[0]
131 if current_dc == get_unit_hostname():
132 return True
133 elif current_dc == 'NONE':
134 raise CRMDCNotFound('Current DC: NONE')
135
136 return False
137
138
139@retry_on_exception(5, base_delay=2,
140 exc_type=(CRMResourceNotFound, CRMDCNotFound))
141def is_crm_leader(resource, retry=False):
142 """
143 Returns True if the charm calling this is the elected corosync leader,
144 as returned by calling the external "crm" command.
145
146 We allow this operation to be retried to avoid the possibility of getting a
147 false negative. See LP #1396246 for more info.
148 """
149 if resource == DC_RESOURCE_NAME:
150 return is_crm_dc()
151 cmd = ['crm', 'resource', 'show', resource]
152 try:
153 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
154 if not isinstance(status, six.text_type):
155 status = six.text_type(status, "utf-8")
156 except subprocess.CalledProcessError:
157 status = None
158
159 if status and get_unit_hostname() in status:
160 return True
161
162 if status and "resource %s is NOT running" % (resource) in status:
163 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
164
165 return False
166
167
168def is_leader(resource):
169 log("is_leader is deprecated. Please consider using is_crm_leader "
170 "instead.", level=WARNING)
171 return is_crm_leader(resource)
172
173
174def peer_units(peer_relation="cluster"):
175 peers = []
176 for r_id in (relation_ids(peer_relation) or []):
177 for unit in (relation_list(r_id) or []):
178 peers.append(unit)
179 return peers
180
181
182def peer_ips(peer_relation='cluster', addr_key='private-address'):
183 '''Return a dict of peers and their private-address'''
184 peers = {}
185 for r_id in relation_ids(peer_relation):
186 for unit in relation_list(r_id):
187 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
188 return peers
189
190
191def oldest_peer(peers):
192 """Determines who the oldest peer is by comparing unit numbers."""
193 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
194 for peer in peers:
195 remote_unit_no = int(peer.split('/')[1])
196 if remote_unit_no < local_unit_no:
197 return False
198 return True
199
200
201def eligible_leader(resource):
202 log("eligible_leader is deprecated. Please consider using "
203 "is_elected_leader instead.", level=WARNING)
204 return is_elected_leader(resource)
205
206
207def https():
208 '''
209 Determines whether enough data has been provided in configuration
210 or relation data to configure HTTPS
211 .
212 returns: boolean
213 '''
214 use_https = config_get('use-https')
215 if use_https and bool_from_string(use_https):
216 return True
217 if config_get('ssl_cert') and config_get('ssl_key'):
218 return True
219 for r_id in relation_ids('identity-service'):
220 for unit in relation_list(r_id):
221 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
222 rel_state = [
223 relation_get('https_keystone', rid=r_id, unit=unit),
224 relation_get('ca_cert', rid=r_id, unit=unit),
225 ]
226 # NOTE: works around (LP: #1203241)
227 if (None not in rel_state) and ('' not in rel_state):
228 return True
229 return False
230
231
232def determine_api_port(public_port, singlenode_mode=False):
233 '''
234 Determine correct API server listening port based on
235 existence of HTTPS reverse proxy and/or haproxy.
236
237 public_port: int: standard public port for given service
238
239 singlenode_mode: boolean: Shuffle ports when only a single unit is present
240
241 returns: int: the correct listening port for the API service
242 '''
243 i = 0
244 if singlenode_mode:
245 i += 1
246 elif len(peer_units()) > 0 or is_clustered():
247 i += 1
248 if https():
249 i += 1
250 return public_port - (i * 10)
251
252
253def determine_apache_port(public_port, singlenode_mode=False):
254 '''
255 Description: Determine correct apache listening port based on public IP +
256 state of the cluster.
257
258 public_port: int: standard public port for given service
259
260 singlenode_mode: boolean: Shuffle ports when only a single unit is present
261
262 returns: int: the correct listening port for the HAProxy service
263 '''
264 i = 0
265 if singlenode_mode:
266 i += 1
267 elif len(peer_units()) > 0 or is_clustered():
268 i += 1
269 return public_port - (i * 10)
270
271
272def get_hacluster_config(exclude_keys=None):
273 '''
274 Obtains all relevant configuration from charm configuration required
275 for initiating a relation to hacluster:
276
277 ha-bindiface, ha-mcastport, vip
278
279 param: exclude_keys: list of setting key(s) to be excluded.
280 returns: dict: A dict containing settings keyed by setting name.
281 raises: HAIncompleteConfig if settings are missing.
282 '''
283 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
284 conf = {}
285 for setting in settings:
286 if exclude_keys and setting in exclude_keys:
287 continue
288
289 conf[setting] = config_get(setting)
290 missing = []
291 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
292 if missing:
293 log('Insufficient config data to configure hacluster.', level=ERROR)
294 raise HAIncompleteConfig
295 return conf
296
297
298def canonical_url(configs, vip_setting='vip'):
299 '''
300 Returns the correct HTTP URL to this host given the state of HTTPS
301 configuration and hacluster.
302
303 :configs : OSTemplateRenderer: A config tempating object to inspect for
304 a complete https context.
305
306 :vip_setting: str: Setting in charm config that specifies
307 VIP address.
308 '''
309 scheme = 'http'
310 if 'https' in configs.complete_contexts():
311 scheme = 'https'
312 if is_clustered():
313 addr = config_get(vip_setting)
314 else:
315 addr = unit_get('private-address')
316 return '%s://%s' % (scheme, addr)
0317
=== added directory 'charmhelpers.new/contrib/network'
=== added file 'charmhelpers.new/contrib/network/ip.py'
--- charmhelpers.new/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/network/ip.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,456 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import re
19import subprocess
20import six
21import socket
22
23from functools import partial
24
25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import (
28 log,
29 WARNING,
30)
31
32try:
33 import netifaces
34except ImportError:
35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
37 import netifaces
38
39try:
40 import netaddr
41except ImportError:
42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
44 import netaddr
45
46
47def _validate_cidr(network):
48 try:
49 netaddr.IPNetwork(network)
50 except (netaddr.core.AddrFormatError, ValueError):
51 raise ValueError("Network (%s) is not in CIDR presentation format" %
52 network)
53
54
55def no_ip_found_error_out(network):
56 errmsg = ("No IP address found in network: %s" % network)
57 raise ValueError(errmsg)
58
59
60def get_address_in_network(network, fallback=None, fatal=False):
61 """Get an IPv4 or IPv6 address within the network from the host.
62
63 :param network (str): CIDR presentation format. For example,
64 '192.168.1.0/24'.
65 :param fallback (str): If no address is found, return fallback.
66 :param fatal (boolean): If no address is found, fallback is not
67 set and fatal is True then exit(1).
68 """
69 if network is None:
70 if fallback is not None:
71 return fallback
72
73 if fatal:
74 no_ip_found_error_out(network)
75 else:
76 return None
77
78 _validate_cidr(network)
79 network = netaddr.IPNetwork(network)
80 for iface in netifaces.interfaces():
81 addresses = netifaces.ifaddresses(iface)
82 if network.version == 4 and netifaces.AF_INET in addresses:
83 addr = addresses[netifaces.AF_INET][0]['addr']
84 netmask = addresses[netifaces.AF_INET][0]['netmask']
85 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
86 if cidr in network:
87 return str(cidr.ip)
88
89 if network.version == 6 and netifaces.AF_INET6 in addresses:
90 for addr in addresses[netifaces.AF_INET6]:
91 if not addr['addr'].startswith('fe80'):
92 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
93 addr['netmask']))
94 if cidr in network:
95 return str(cidr.ip)
96
97 if fallback is not None:
98 return fallback
99
100 if fatal:
101 no_ip_found_error_out(network)
102
103 return None
104
105
106def is_ipv6(address):
107 """Determine whether provided address is IPv6 or not."""
108 try:
109 address = netaddr.IPAddress(address)
110 except netaddr.AddrFormatError:
111 # probably a hostname - so not an address at all!
112 return False
113
114 return address.version == 6
115
116
117def is_address_in_network(network, address):
118 """
119 Determine whether the provided address is within a network range.
120
121 :param network (str): CIDR presentation format. For example,
122 '192.168.1.0/24'.
123 :param address: An individual IPv4 or IPv6 address without a net
124 mask or subnet prefix. For example, '192.168.1.1'.
125 :returns boolean: Flag indicating whether address is in network.
126 """
127 try:
128 network = netaddr.IPNetwork(network)
129 except (netaddr.core.AddrFormatError, ValueError):
130 raise ValueError("Network (%s) is not in CIDR presentation format" %
131 network)
132
133 try:
134 address = netaddr.IPAddress(address)
135 except (netaddr.core.AddrFormatError, ValueError):
136 raise ValueError("Address (%s) is not in correct presentation format" %
137 address)
138
139 if address in network:
140 return True
141 else:
142 return False
143
144
145def _get_for_address(address, key):
146 """Retrieve an attribute of or the physical interface that
147 the IP address provided could be bound to.
148
149 :param address (str): An individual IPv4 or IPv6 address without a net
150 mask or subnet prefix. For example, '192.168.1.1'.
151 :param key: 'iface' for the physical interface name or an attribute
152 of the configured interface, for example 'netmask'.
153 :returns str: Requested attribute or None if address is not bindable.
154 """
155 address = netaddr.IPAddress(address)
156 for iface in netifaces.interfaces():
157 addresses = netifaces.ifaddresses(iface)
158 if address.version == 4 and netifaces.AF_INET in addresses:
159 addr = addresses[netifaces.AF_INET][0]['addr']
160 netmask = addresses[netifaces.AF_INET][0]['netmask']
161 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
162 cidr = network.cidr
163 if address in cidr:
164 if key == 'iface':
165 return iface
166 else:
167 return addresses[netifaces.AF_INET][0][key]
168
169 if address.version == 6 and netifaces.AF_INET6 in addresses:
170 for addr in addresses[netifaces.AF_INET6]:
171 if not addr['addr'].startswith('fe80'):
172 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
173 addr['netmask']))
174 cidr = network.cidr
175 if address in cidr:
176 if key == 'iface':
177 return iface
178 elif key == 'netmask' and cidr:
179 return str(cidr).split('/')[1]
180 else:
181 return addr[key]
182
183 return None
184
185
186get_iface_for_address = partial(_get_for_address, key='iface')
187
188
189get_netmask_for_address = partial(_get_for_address, key='netmask')
190
191
192def format_ipv6_addr(address):
193 """If address is IPv6, wrap it in '[]' otherwise return None.
194
195 This is required by most configuration files when specifying IPv6
196 addresses.
197 """
198 if is_ipv6(address):
199 return "[%s]" % address
200
201 return None
202
203
204def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
205 fatal=True, exc_list=None):
206 """Return the assigned IP address for a given interface, if any."""
207 # Extract nic if passed /dev/ethX
208 if '/' in iface:
209 iface = iface.split('/')[-1]
210
211 if not exc_list:
212 exc_list = []
213
214 try:
215 inet_num = getattr(netifaces, inet_type)
216 except AttributeError:
217 raise Exception("Unknown inet type '%s'" % str(inet_type))
218
219 interfaces = netifaces.interfaces()
220 if inc_aliases:
221 ifaces = []
222 for _iface in interfaces:
223 if iface == _iface or _iface.split(':')[0] == iface:
224 ifaces.append(_iface)
225
226 if fatal and not ifaces:
227 raise Exception("Invalid interface '%s'" % iface)
228
229 ifaces.sort()
230 else:
231 if iface not in interfaces:
232 if fatal:
233 raise Exception("Interface '%s' not found " % (iface))
234 else:
235 return []
236
237 else:
238 ifaces = [iface]
239
240 addresses = []
241 for netiface in ifaces:
242 net_info = netifaces.ifaddresses(netiface)
243 if inet_num in net_info:
244 for entry in net_info[inet_num]:
245 if 'addr' in entry and entry['addr'] not in exc_list:
246 addresses.append(entry['addr'])
247
248 if fatal and not addresses:
249 raise Exception("Interface '%s' doesn't have any %s addresses." %
250 (iface, inet_type))
251
252 return sorted(addresses)
253
254
255get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
256
257
258def get_iface_from_addr(addr):
259 """Work out on which interface the provided address is configured."""
260 for iface in netifaces.interfaces():
261 addresses = netifaces.ifaddresses(iface)
262 for inet_type in addresses:
263 for _addr in addresses[inet_type]:
264 _addr = _addr['addr']
265 # link local
266 ll_key = re.compile("(.+)%.*")
267 raw = re.match(ll_key, _addr)
268 if raw:
269 _addr = raw.group(1)
270
271 if _addr == addr:
272 log("Address '%s' is configured on iface '%s'" %
273 (addr, iface))
274 return iface
275
276 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
277 raise Exception(msg)
278
279
280def sniff_iface(f):
281 """Ensure decorated function is called with a value for iface.
282
283 If no iface provided, inject net iface inferred from unit private address.
284 """
285 def iface_sniffer(*args, **kwargs):
286 if not kwargs.get('iface', None):
287 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
288
289 return f(*args, **kwargs)
290
291 return iface_sniffer
292
293
294@sniff_iface
295def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
296 dynamic_only=True):
297 """Get assigned IPv6 address for a given interface.
298
299 Returns list of addresses found. If no address found, returns empty list.
300
301 If iface is None, we infer the current primary interface by doing a reverse
302 lookup on the unit private-address.
303
304 We currently only support scope global IPv6 addresses i.e. non-temporary
305 addresses. If no global IPv6 address is found, return the first one found
306 in the ipv6 address list.
307 """
308 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
309 inc_aliases=inc_aliases, fatal=fatal,
310 exc_list=exc_list)
311
312 if addresses:
313 global_addrs = []
314 for addr in addresses:
315 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
316 m = re.match(key_scope_link_local, addr)
317 if m:
318 eui_64_mac = m.group(1)
319 iface = m.group(2)
320 else:
321 global_addrs.append(addr)
322
323 if global_addrs:
324 # Make sure any found global addresses are not temporary
325 cmd = ['ip', 'addr', 'show', iface]
326 out = subprocess.check_output(cmd).decode('UTF-8')
327 if dynamic_only:
328 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
329 else:
330 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
331
332 addrs = []
333 for line in out.split('\n'):
334 line = line.strip()
335 m = re.match(key, line)
336 if m and 'temporary' not in line:
337 # Return the first valid address we find
338 for addr in global_addrs:
339 if m.group(1) == addr:
340 if not dynamic_only or \
341 m.group(1).endswith(eui_64_mac):
342 addrs.append(addr)
343
344 if addrs:
345 return addrs
346
347 if fatal:
348 raise Exception("Interface '%s' does not have a scope global "
349 "non-temporary ipv6 address." % iface)
350
351 return []
352
353
354def get_bridges(vnic_dir='/sys/devices/virtual/net'):
355 """Return a list of bridges on the system."""
356 b_regex = "%s/*/bridge" % vnic_dir
357 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
358
359
360def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
361 """Return a list of nics comprising a given bridge on the system."""
362 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
363 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
364
365
366def is_bridge_member(nic):
367 """Check if a given nic is a member of a bridge."""
368 for bridge in get_bridges():
369 if nic in get_bridge_nics(bridge):
370 return True
371
372 return False
373
374
375def is_ip(address):
376 """
377 Returns True if address is a valid IP address.
378 """
379 try:
380 # Test to see if already an IPv4 address
381 socket.inet_aton(address)
382 return True
383 except socket.error:
384 return False
385
386
387def ns_query(address):
388 try:
389 import dns.resolver
390 except ImportError:
391 apt_install('python-dnspython')
392 import dns.resolver
393
394 if isinstance(address, dns.name.Name):
395 rtype = 'PTR'
396 elif isinstance(address, six.string_types):
397 rtype = 'A'
398 else:
399 return None
400
401 answers = dns.resolver.query(address, rtype)
402 if answers:
403 return str(answers[0])
404 return None
405
406
407def get_host_ip(hostname, fallback=None):
408 """
409 Resolves the IP for a given hostname, or returns
410 the input if it is already an IP.
411 """
412 if is_ip(hostname):
413 return hostname
414
415 ip_addr = ns_query(hostname)
416 if not ip_addr:
417 try:
418 ip_addr = socket.gethostbyname(hostname)
419 except:
420 log("Failed to resolve hostname '%s'" % (hostname),
421 level=WARNING)
422 return fallback
423 return ip_addr
424
425
426def get_hostname(address, fqdn=True):
427 """
428 Resolves hostname for given IP, or returns the input
429 if it is already a hostname.
430 """
431 if is_ip(address):
432 try:
433 import dns.reversename
434 except ImportError:
435 apt_install("python-dnspython")
436 import dns.reversename
437
438 rev = dns.reversename.from_address(address)
439 result = ns_query(rev)
440
441 if not result:
442 try:
443 result = socket.gethostbyaddr(address)[0]
444 except:
445 return None
446 else:
447 result = address
448
449 if fqdn:
450 # strip trailing .
451 if result.endswith('.'):
452 return result[:-1]
453 else:
454 return result
455 else:
456 return result.split('.')[0]
0457
=== added directory 'charmhelpers.new/contrib/openstack'
=== added directory 'charmhelpers.new/contrib/openstack/amulet'
=== added file 'charmhelpers.new/contrib/openstack/amulet/deployment.py'
--- charmhelpers.new/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/amulet/deployment.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,297 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import logging
18import re
19import sys
20import six
21from collections import OrderedDict
22from charmhelpers.contrib.amulet.deployment import (
23 AmuletDeployment
24)
25
26DEBUG = logging.DEBUG
27ERROR = logging.ERROR
28
29
30class OpenStackAmuletDeployment(AmuletDeployment):
31 """OpenStack amulet deployment.
32
33 This class inherits from AmuletDeployment and has additional support
34 that is specifically for use by OpenStack charms.
35 """
36
37 def __init__(self, series=None, openstack=None, source=None,
38 stable=True, log_level=DEBUG):
39 """Initialize the deployment environment."""
40 super(OpenStackAmuletDeployment, self).__init__(series)
41 self.log = self.get_logger(level=log_level)
42 self.log.info('OpenStackAmuletDeployment: init')
43 self.openstack = openstack
44 self.source = source
45 self.stable = stable
46 # Note(coreycb): this needs to be changed when new next branches come
47 # out.
48 self.current_next = "trusty"
49
50 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51 """Get a logger object that will log to stdout."""
52 log = logging
53 logger = log.getLogger(name)
54 fmt = log.Formatter("%(asctime)s %(funcName)s "
55 "%(levelname)s: %(message)s")
56
57 handler = log.StreamHandler(stream=sys.stdout)
58 handler.setLevel(level)
59 handler.setFormatter(fmt)
60
61 logger.addHandler(handler)
62 logger.setLevel(level)
63
64 return logger
65
66 def _determine_branch_locations(self, other_services):
67 """Determine the branch locations for the other services.
68
69 Determine if the local branch being tested is derived from its
70 stable or next (dev) branch, and based on this, use the corresonding
71 stable or next branches for the other_services."""
72
73 self.log.info('OpenStackAmuletDeployment: determine branch locations')
74
75 # Charms outside the lp:~openstack-charmers namespace
76 base_charms = ['mysql', 'mongodb', 'nrpe']
77
78 # Force these charms to current series even when using an older series.
79 # ie. Use trusty/nrpe even when series is precise, as the P charm
80 # does not possess the necessary external master config and hooks.
81 force_series_current = ['nrpe']
82
83 if self.series in ['precise', 'trusty']:
84 base_series = self.series
85 else:
86 base_series = self.current_next
87
88 for svc in other_services:
89 if svc['name'] in force_series_current:
90 base_series = self.current_next
91 # If a location has been explicitly set, use it
92 if svc.get('location'):
93 continue
94 if self.stable:
95 temp = 'lp:charms/{}/{}'
96 svc['location'] = temp.format(base_series,
97 svc['name'])
98 else:
99 if svc['name'] in base_charms:
100 temp = 'lp:charms/{}/{}'
101 svc['location'] = temp.format(base_series,
102 svc['name'])
103 else:
104 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
105 svc['location'] = temp.format(self.current_next,
106 svc['name'])
107
108 return other_services
109
110 def _add_services(self, this_service, other_services):
111 """Add services to the deployment and set openstack-origin/source."""
112 self.log.info('OpenStackAmuletDeployment: adding services')
113
114 other_services = self._determine_branch_locations(other_services)
115
116 super(OpenStackAmuletDeployment, self)._add_services(this_service,
117 other_services)
118
119 services = other_services
120 services.append(this_service)
121
122 # Charms which should use the source config option
123 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
124 'ceph-osd', 'ceph-radosgw']
125
126 # Charms which can not use openstack-origin, ie. many subordinates
127 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128 'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
129
130 if self.openstack:
131 for svc in services:
132 if svc['name'] not in use_source + no_origin:
133 config = {'openstack-origin': self.openstack}
134 self.d.configure(svc['name'], config)
135
136 if self.source:
137 for svc in services:
138 if svc['name'] in use_source and svc['name'] not in no_origin:
139 config = {'source': self.source}
140 self.d.configure(svc['name'], config)
141
142 def _configure_services(self, configs):
143 """Configure all of the services."""
144 self.log.info('OpenStackAmuletDeployment: configure services')
145 for service, config in six.iteritems(configs):
146 self.d.configure(service, config)
147
148 def _auto_wait_for_status(self, message=None, exclude_services=None,
149 include_only=None, timeout=1800):
150 """Wait for all units to have a specific extended status, except
151 for any defined as excluded. Unless specified via message, any
152 status containing any case of 'ready' will be considered a match.
153
154 Examples of message usage:
155
156 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
157 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
158
159 Wait for all units to reach this status (exact match):
160 message = re.compile('^Unit is ready and clustered$')
161
162 Wait for all units to reach any one of these (exact match):
163 message = re.compile('Unit is ready|OK|Ready')
164
165 Wait for at least one unit to reach this status (exact match):
166 message = {'ready'}
167
168 See Amulet's sentry.wait_for_messages() for message usage detail.
169 https://github.com/juju/amulet/blob/master/amulet/sentry.py
170
171 :param message: Expected status match
172 :param exclude_services: List of juju service names to ignore,
173 not to be used in conjuction with include_only.
174 :param include_only: List of juju service names to exclusively check,
175 not to be used in conjuction with exclude_services.
176 :param timeout: Maximum time in seconds to wait for status match
177 :returns: None. Raises if timeout is hit.
178 """
179 self.log.info('Waiting for extended status on units...')
180
181 all_services = self.d.services.keys()
182
183 if exclude_services and include_only:
184 raise ValueError('exclude_services can not be used '
185 'with include_only')
186
187 if message:
188 if isinstance(message, re._pattern_type):
189 match = message.pattern
190 else:
191 match = message
192
193 self.log.debug('Custom extended status wait match: '
194 '{}'.format(match))
195 else:
196 self.log.debug('Default extended status wait match: contains '
197 'READY (case-insensitive)')
198 message = re.compile('.*ready.*', re.IGNORECASE)
199
200 if exclude_services:
201 self.log.debug('Excluding services from extended status match: '
202 '{}'.format(exclude_services))
203 else:
204 exclude_services = []
205
206 if include_only:
207 services = include_only
208 else:
209 services = list(set(all_services) - set(exclude_services))
210
211 self.log.debug('Waiting up to {}s for extended status on services: '
212 '{}'.format(timeout, services))
213 service_messages = {service: message for service in services}
214 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
215 self.log.info('OK')
216
217 def _get_openstack_release(self):
218 """Get openstack release.
219
220 Return an integer representing the enum value of the openstack
221 release.
222 """
223 # Must be ordered by OpenStack release (not by Ubuntu release):
224 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
225 self.precise_havana, self.precise_icehouse,
226 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
227 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
228 self.wily_liberty) = range(12)
229
230 releases = {
231 ('precise', None): self.precise_essex,
232 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
233 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
234 ('precise', 'cloud:precise-havana'): self.precise_havana,
235 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
236 ('trusty', None): self.trusty_icehouse,
237 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
238 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
239 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
240 ('utopic', None): self.utopic_juno,
241 ('vivid', None): self.vivid_kilo,
242 ('wily', None): self.wily_liberty}
243 return releases[(self.series, self.openstack)]
244
245 def _get_openstack_release_string(self):
246 """Get openstack release string.
247
248 Return a string representing the openstack release.
249 """
250 releases = OrderedDict([
251 ('precise', 'essex'),
252 ('quantal', 'folsom'),
253 ('raring', 'grizzly'),
254 ('saucy', 'havana'),
255 ('trusty', 'icehouse'),
256 ('utopic', 'juno'),
257 ('vivid', 'kilo'),
258 ('wily', 'liberty'),
259 ])
260 if self.openstack:
261 os_origin = self.openstack.split(':')[1]
262 return os_origin.split('%s-' % self.series)[1].split('/')[0]
263 else:
264 return releases[self.series]
265
266 def get_ceph_expected_pools(self, radosgw=False):
267 """Return a list of expected ceph pools in a ceph + cinder + glance
268 test scenario, based on OpenStack release and whether ceph radosgw
269 is flagged as present or not."""
270
271 if self._get_openstack_release() >= self.trusty_kilo:
272 # Kilo or later
273 pools = [
274 'rbd',
275 'cinder',
276 'glance'
277 ]
278 else:
279 # Juno or earlier
280 pools = [
281 'data',
282 'metadata',
283 'rbd',
284 'cinder',
285 'glance'
286 ]
287
288 if radosgw:
289 pools.extend([
290 '.rgw.root',
291 '.rgw.control',
292 '.rgw',
293 '.rgw.gc',
294 '.users.uid'
295 ])
296
297 return pools
0298
=== added file 'charmhelpers.new/contrib/openstack/amulet/utils.py'
--- charmhelpers.new/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/amulet/utils.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,985 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import amulet
18import json
19import logging
20import os
21import re
22import six
23import time
24import urllib
25
26import cinderclient.v1.client as cinder_client
27import glanceclient.v1.client as glance_client
28import heatclient.v1.client as heat_client
29import keystoneclient.v2_0 as keystone_client
30import novaclient.v1_1.client as nova_client
31import pika
32import swiftclient
33
34from charmhelpers.contrib.amulet.utils import (
35 AmuletUtils
36)
37
38DEBUG = logging.DEBUG
39ERROR = logging.ERROR
40
41
42class OpenStackAmuletUtils(AmuletUtils):
43 """OpenStack amulet utilities.
44
45 This class inherits from AmuletUtils and has additional support
46 that is specifically for use by OpenStack charm tests.
47 """
48
49 def __init__(self, log_level=ERROR):
50 """Initialize the deployment environment."""
51 super(OpenStackAmuletUtils, self).__init__(log_level)
52
53 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
54 public_port, expected):
55 """Validate endpoint data.
56
57 Validate actual endpoint data vs expected endpoint data. The ports
58 are used to find the matching endpoint.
59 """
60 self.log.debug('Validating endpoint data...')
61 self.log.debug('actual: {}'.format(repr(endpoints)))
62 found = False
63 for ep in endpoints:
64 self.log.debug('endpoint: {}'.format(repr(ep)))
65 if (admin_port in ep.adminurl and
66 internal_port in ep.internalurl and
67 public_port in ep.publicurl):
68 found = True
69 actual = {'id': ep.id,
70 'region': ep.region,
71 'adminurl': ep.adminurl,
72 'internalurl': ep.internalurl,
73 'publicurl': ep.publicurl,
74 'service_id': ep.service_id}
75 ret = self._validate_dict_data(expected, actual)
76 if ret:
77 return 'unexpected endpoint data - {}'.format(ret)
78
79 if not found:
80 return 'endpoint not found'
81
82 def validate_svc_catalog_endpoint_data(self, expected, actual):
83 """Validate service catalog endpoint data.
84
85 Validate a list of actual service catalog endpoints vs a list of
86 expected service catalog endpoints.
87 """
88 self.log.debug('Validating service catalog endpoint data...')
89 self.log.debug('actual: {}'.format(repr(actual)))
90 for k, v in six.iteritems(expected):
91 if k in actual:
92 ret = self._validate_dict_data(expected[k][0], actual[k][0])
93 if ret:
94 return self.endpoint_error(k, ret)
95 else:
96 return "endpoint {} does not exist".format(k)
97 return ret
98
99 def validate_tenant_data(self, expected, actual):
100 """Validate tenant data.
101
102 Validate a list of actual tenant data vs list of expected tenant
103 data.
104 """
105 self.log.debug('Validating tenant data...')
106 self.log.debug('actual: {}'.format(repr(actual)))
107 for e in expected:
108 found = False
109 for act in actual:
110 a = {'enabled': act.enabled, 'description': act.description,
111 'name': act.name, 'id': act.id}
112 if e['name'] == a['name']:
113 found = True
114 ret = self._validate_dict_data(e, a)
115 if ret:
116 return "unexpected tenant data - {}".format(ret)
117 if not found:
118 return "tenant {} does not exist".format(e['name'])
119 return ret
120
121 def validate_role_data(self, expected, actual):
122 """Validate role data.
123
124 Validate a list of actual role data vs a list of expected role
125 data.
126 """
127 self.log.debug('Validating role data...')
128 self.log.debug('actual: {}'.format(repr(actual)))
129 for e in expected:
130 found = False
131 for act in actual:
132 a = {'name': act.name, 'id': act.id}
133 if e['name'] == a['name']:
134 found = True
135 ret = self._validate_dict_data(e, a)
136 if ret:
137 return "unexpected role data - {}".format(ret)
138 if not found:
139 return "role {} does not exist".format(e['name'])
140 return ret
141
142 def validate_user_data(self, expected, actual):
143 """Validate user data.
144
145 Validate a list of actual user data vs a list of expected user
146 data.
147 """
148 self.log.debug('Validating user data...')
149 self.log.debug('actual: {}'.format(repr(actual)))
150 for e in expected:
151 found = False
152 for act in actual:
153 a = {'enabled': act.enabled, 'name': act.name,
154 'email': act.email, 'tenantId': act.tenantId,
155 'id': act.id}
156 if e['name'] == a['name']:
157 found = True
158 ret = self._validate_dict_data(e, a)
159 if ret:
160 return "unexpected user data - {}".format(ret)
161 if not found:
162 return "user {} does not exist".format(e['name'])
163 return ret
164
165 def validate_flavor_data(self, expected, actual):
166 """Validate flavor data.
167
168 Validate a list of actual flavors vs a list of expected flavors.
169 """
170 self.log.debug('Validating flavor data...')
171 self.log.debug('actual: {}'.format(repr(actual)))
172 act = [a.name for a in actual]
173 return self._validate_list_data(expected, act)
174
175 def tenant_exists(self, keystone, tenant):
176 """Return True if tenant exists."""
177 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
178 return tenant in [t.name for t in keystone.tenants.list()]
179
180 def authenticate_cinder_admin(self, keystone_sentry, username,
181 password, tenant):
182 """Authenticates admin user with cinder."""
183 # NOTE(beisner): cinder python client doesn't accept tokens.
184 service_ip = \
185 keystone_sentry.relation('shared-db',
186 'mysql:shared-db')['private-address']
187 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
188 return cinder_client.Client(username, password, tenant, ept)
189
190 def authenticate_keystone_admin(self, keystone_sentry, user, password,
191 tenant):
192 """Authenticates admin user with the keystone admin endpoint."""
193 self.log.debug('Authenticating keystone admin...')
194 unit = keystone_sentry
195 service_ip = unit.relation('shared-db',
196 'mysql:shared-db')['private-address']
197 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
198 return keystone_client.Client(username=user, password=password,
199 tenant_name=tenant, auth_url=ep)
200
201 def authenticate_keystone_user(self, keystone, user, password, tenant):
202 """Authenticates a regular user with the keystone public endpoint."""
203 self.log.debug('Authenticating keystone user ({})...'.format(user))
204 ep = keystone.service_catalog.url_for(service_type='identity',
205 endpoint_type='publicURL')
206 return keystone_client.Client(username=user, password=password,
207 tenant_name=tenant, auth_url=ep)
208
209 def authenticate_glance_admin(self, keystone):
210 """Authenticates admin user with glance."""
211 self.log.debug('Authenticating glance admin...')
212 ep = keystone.service_catalog.url_for(service_type='image',
213 endpoint_type='adminURL')
214 return glance_client.Client(ep, token=keystone.auth_token)
215
216 def authenticate_heat_admin(self, keystone):
217 """Authenticates the admin user with heat."""
218 self.log.debug('Authenticating heat admin...')
219 ep = keystone.service_catalog.url_for(service_type='orchestration',
220 endpoint_type='publicURL')
221 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
222
223 def authenticate_nova_user(self, keystone, user, password, tenant):
224 """Authenticates a regular user with nova-api."""
225 self.log.debug('Authenticating nova user ({})...'.format(user))
226 ep = keystone.service_catalog.url_for(service_type='identity',
227 endpoint_type='publicURL')
228 return nova_client.Client(username=user, api_key=password,
229 project_id=tenant, auth_url=ep)
230
231 def authenticate_swift_user(self, keystone, user, password, tenant):
232 """Authenticates a regular user with swift api."""
233 self.log.debug('Authenticating swift user ({})...'.format(user))
234 ep = keystone.service_catalog.url_for(service_type='identity',
235 endpoint_type='publicURL')
236 return swiftclient.Connection(authurl=ep,
237 user=user,
238 key=password,
239 tenant_name=tenant,
240 auth_version='2.0')
241
242 def create_cirros_image(self, glance, image_name):
243 """Download the latest cirros image and upload it to glance,
244 validate and return a resource pointer.
245
246 :param glance: pointer to authenticated glance connection
247 :param image_name: display name for new image
248 :returns: glance image pointer
249 """
250 self.log.debug('Creating glance cirros image '
251 '({})...'.format(image_name))
252
253 # Download cirros image
254 http_proxy = os.getenv('AMULET_HTTP_PROXY')
255 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
256 if http_proxy:
257 proxies = {'http': http_proxy}
258 opener = urllib.FancyURLopener(proxies)
259 else:
260 opener = urllib.FancyURLopener()
261
262 f = opener.open('http://download.cirros-cloud.net/version/released')
263 version = f.read().strip()
264 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
265 local_path = os.path.join('tests', cirros_img)
266
267 if not os.path.exists(local_path):
268 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
269 version, cirros_img)
270 opener.retrieve(cirros_url, local_path)
271 f.close()
272
273 # Create glance image
274 with open(local_path) as f:
275 image = glance.images.create(name=image_name, is_public=True,
276 disk_format='qcow2',
277 container_format='bare', data=f)
278
279 # Wait for image to reach active status
280 img_id = image.id
281 ret = self.resource_reaches_status(glance.images, img_id,
282 expected_stat='active',
283 msg='Image status wait')
284 if not ret:
285 msg = 'Glance image failed to reach expected state.'
286 amulet.raise_status(amulet.FAIL, msg=msg)
287
288 # Re-validate new image
289 self.log.debug('Validating image attributes...')
290 val_img_name = glance.images.get(img_id).name
291 val_img_stat = glance.images.get(img_id).status
292 val_img_pub = glance.images.get(img_id).is_public
293 val_img_cfmt = glance.images.get(img_id).container_format
294 val_img_dfmt = glance.images.get(img_id).disk_format
295 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
296 'container fmt:{} disk fmt:{}'.format(
297 val_img_name, val_img_pub, img_id,
298 val_img_stat, val_img_cfmt, val_img_dfmt))
299
300 if val_img_name == image_name and val_img_stat == 'active' \
301 and val_img_pub is True and val_img_cfmt == 'bare' \
302 and val_img_dfmt == 'qcow2':
303 self.log.debug(msg_attr)
304 else:
305 msg = ('Volume validation failed, {}'.format(msg_attr))
306 amulet.raise_status(amulet.FAIL, msg=msg)
307
308 return image
309
310 def delete_image(self, glance, image):
311 """Delete the specified image."""
312
313 # /!\ DEPRECATION WARNING
314 self.log.warn('/!\\ DEPRECATION WARNING: use '
315 'delete_resource instead of delete_image.')
316 self.log.debug('Deleting glance image ({})...'.format(image))
317 return self.delete_resource(glance.images, image, msg='glance image')
318
319 def create_instance(self, nova, image_name, instance_name, flavor):
320 """Create the specified instance."""
321 self.log.debug('Creating instance '
322 '({}|{}|{})'.format(instance_name, image_name, flavor))
323 image = nova.images.find(name=image_name)
324 flavor = nova.flavors.find(name=flavor)
325 instance = nova.servers.create(name=instance_name, image=image,
326 flavor=flavor)
327
328 count = 1
329 status = instance.status
330 while status != 'ACTIVE' and count < 60:
331 time.sleep(3)
332 instance = nova.servers.get(instance.id)
333 status = instance.status
334 self.log.debug('instance status: {}'.format(status))
335 count += 1
336
337 if status != 'ACTIVE':
338 self.log.error('instance creation timed out')
339 return None
340
341 return instance
342
343 def delete_instance(self, nova, instance):
344 """Delete the specified instance."""
345
346 # /!\ DEPRECATION WARNING
347 self.log.warn('/!\\ DEPRECATION WARNING: use '
348 'delete_resource instead of delete_instance.')
349 self.log.debug('Deleting instance ({})...'.format(instance))
350 return self.delete_resource(nova.servers, instance,
351 msg='nova instance')
352
353 def create_or_get_keypair(self, nova, keypair_name="testkey"):
354 """Create a new keypair, or return pointer if it already exists."""
355 try:
356 _keypair = nova.keypairs.get(keypair_name)
357 self.log.debug('Keypair ({}) already exists, '
358 'using it.'.format(keypair_name))
359 return _keypair
360 except:
361 self.log.debug('Keypair ({}) does not exist, '
362 'creating it.'.format(keypair_name))
363
364 _keypair = nova.keypairs.create(name=keypair_name)
365 return _keypair
366
367 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
368 img_id=None, src_vol_id=None, snap_id=None):
369 """Create cinder volume, optionally from a glance image, OR
370 optionally as a clone of an existing volume, OR optionally
371 from a snapshot. Wait for the new volume status to reach
372 the expected status, validate and return a resource pointer.
373
374 :param vol_name: cinder volume display name
375 :param vol_size: size in gigabytes
376 :param img_id: optional glance image id
377 :param src_vol_id: optional source volume id to clone
378 :param snap_id: optional snapshot id to use
379 :returns: cinder volume pointer
380 """
381 # Handle parameter input and avoid impossible combinations
382 if img_id and not src_vol_id and not snap_id:
383 # Create volume from image
384 self.log.debug('Creating cinder volume from glance image...')
385 bootable = 'true'
386 elif src_vol_id and not img_id and not snap_id:
387 # Clone an existing volume
388 self.log.debug('Cloning cinder volume...')
389 bootable = cinder.volumes.get(src_vol_id).bootable
390 elif snap_id and not src_vol_id and not img_id:
391 # Create volume from snapshot
392 self.log.debug('Creating cinder volume from snapshot...')
393 snap = cinder.volume_snapshots.find(id=snap_id)
394 vol_size = snap.size
395 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
396 bootable = cinder.volumes.get(snap_vol_id).bootable
397 elif not img_id and not src_vol_id and not snap_id:
398 # Create volume
399 self.log.debug('Creating cinder volume...')
400 bootable = 'false'
401 else:
402 # Impossible combination of parameters
403 msg = ('Invalid method use - name:{} size:{} img_id:{} '
404 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
405 img_id, src_vol_id,
406 snap_id))
407 amulet.raise_status(amulet.FAIL, msg=msg)
408
409 # Create new volume
410 try:
411 vol_new = cinder.volumes.create(display_name=vol_name,
412 imageRef=img_id,
413 size=vol_size,
414 source_volid=src_vol_id,
415 snapshot_id=snap_id)
416 vol_id = vol_new.id
417 except Exception as e:
418 msg = 'Failed to create volume: {}'.format(e)
419 amulet.raise_status(amulet.FAIL, msg=msg)
420
421 # Wait for volume to reach available status
422 ret = self.resource_reaches_status(cinder.volumes, vol_id,
423 expected_stat="available",
424 msg="Volume status wait")
425 if not ret:
426 msg = 'Cinder volume failed to reach expected state.'
427 amulet.raise_status(amulet.FAIL, msg=msg)
428
429 # Re-validate new volume
430 self.log.debug('Validating volume attributes...')
431 val_vol_name = cinder.volumes.get(vol_id).display_name
432 val_vol_boot = cinder.volumes.get(vol_id).bootable
433 val_vol_stat = cinder.volumes.get(vol_id).status
434 val_vol_size = cinder.volumes.get(vol_id).size
435 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
436 '{} size:{}'.format(val_vol_name, vol_id,
437 val_vol_stat, val_vol_boot,
438 val_vol_size))
439
440 if val_vol_boot == bootable and val_vol_stat == 'available' \
441 and val_vol_name == vol_name and val_vol_size == vol_size:
442 self.log.debug(msg_attr)
443 else:
444 msg = ('Volume validation failed, {}'.format(msg_attr))
445 amulet.raise_status(amulet.FAIL, msg=msg)
446
447 return vol_new
448
449 def delete_resource(self, resource, resource_id,
450 msg="resource", max_wait=120):
451 """Delete one openstack resource, such as one instance, keypair,
452 image, volume, stack, etc., and confirm deletion within max wait time.
453
454 :param resource: pointer to os resource type, ex:glance_client.images
455 :param resource_id: unique name or id for the openstack resource
456 :param msg: text to identify purpose in logging
457 :param max_wait: maximum wait time in seconds
458 :returns: True if successful, otherwise False
459 """
460 self.log.debug('Deleting OpenStack resource '
461 '{} ({})'.format(resource_id, msg))
462 num_before = len(list(resource.list()))
463 resource.delete(resource_id)
464
465 tries = 0
466 num_after = len(list(resource.list()))
467 while num_after != (num_before - 1) and tries < (max_wait / 4):
468 self.log.debug('{} delete check: '
469 '{} [{}:{}] {}'.format(msg, tries,
470 num_before,
471 num_after,
472 resource_id))
473 time.sleep(4)
474 num_after = len(list(resource.list()))
475 tries += 1
476
477 self.log.debug('{}: expected, actual count = {}, '
478 '{}'.format(msg, num_before - 1, num_after))
479
480 if num_after == (num_before - 1):
481 return True
482 else:
483 self.log.error('{} delete timed out'.format(msg))
484 return False
485
486 def resource_reaches_status(self, resource, resource_id,
487 expected_stat='available',
488 msg='resource', max_wait=120):
489 """Wait for an openstack resources status to reach an
490 expected status within a specified time. Useful to confirm that
491 nova instances, cinder vols, snapshots, glance images, heat stacks
492 and other resources eventually reach the expected status.
493
494 :param resource: pointer to os resource type, ex: heat_client.stacks
495 :param resource_id: unique id for the openstack resource
496 :param expected_stat: status to expect resource to reach
497 :param msg: text to identify purpose in logging
498 :param max_wait: maximum wait time in seconds
499 :returns: True if successful, False if status is not reached
500 """
501
502 tries = 0
503 resource_stat = resource.get(resource_id).status
504 while resource_stat != expected_stat and tries < (max_wait / 4):
505 self.log.debug('{} status check: '
506 '{} [{}:{}] {}'.format(msg, tries,
507 resource_stat,
508 expected_stat,
509 resource_id))
510 time.sleep(4)
511 resource_stat = resource.get(resource_id).status
512 tries += 1
513
514 self.log.debug('{}: expected, actual status = {}, '
515 '{}'.format(msg, resource_stat, expected_stat))
516
517 if resource_stat == expected_stat:
518 return True
519 else:
520 self.log.debug('{} never reached expected status: '
521 '{}'.format(resource_id, expected_stat))
522 return False
523
524 def get_ceph_osd_id_cmd(self, index):
525 """Produce a shell command that will return a ceph-osd id."""
526 return ("`initctl list | grep 'ceph-osd ' | "
527 "awk 'NR=={} {{ print $2 }}' | "
528 "grep -o '[0-9]*'`".format(index + 1))
529
530 def get_ceph_pools(self, sentry_unit):
531 """Return a dict of ceph pools from a single ceph unit, with
532 pool name as keys, pool id as vals."""
533 pools = {}
534 cmd = 'sudo ceph osd lspools'
535 output, code = sentry_unit.run(cmd)
536 if code != 0:
537 msg = ('{} `{}` returned {} '
538 '{}'.format(sentry_unit.info['unit_name'],
539 cmd, code, output))
540 amulet.raise_status(amulet.FAIL, msg=msg)
541
542 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
543 for pool in str(output).split(','):
544 pool_id_name = pool.split(' ')
545 if len(pool_id_name) == 2:
546 pool_id = pool_id_name[0]
547 pool_name = pool_id_name[1]
548 pools[pool_name] = int(pool_id)
549
550 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
551 pools))
552 return pools
553
554 def get_ceph_df(self, sentry_unit):
555 """Return dict of ceph df json output, including ceph pool state.
556
557 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
558 :returns: Dict of ceph df output
559 """
560 cmd = 'sudo ceph df --format=json'
561 output, code = sentry_unit.run(cmd)
562 if code != 0:
563 msg = ('{} `{}` returned {} '
564 '{}'.format(sentry_unit.info['unit_name'],
565 cmd, code, output))
566 amulet.raise_status(amulet.FAIL, msg=msg)
567 return json.loads(output)
568
569 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
570 """Take a sample of attributes of a ceph pool, returning ceph
571 pool name, object count and disk space used for the specified
572 pool ID number.
573
574 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
575 :param pool_id: Ceph pool ID
576 :returns: List of pool name, object count, kb disk space used
577 """
578 df = self.get_ceph_df(sentry_unit)
579 pool_name = df['pools'][pool_id]['name']
580 obj_count = df['pools'][pool_id]['stats']['objects']
581 kb_used = df['pools'][pool_id]['stats']['kb_used']
582 self.log.debug('Ceph {} pool (ID {}): {} objects, '
583 '{} kb used'.format(pool_name, pool_id,
584 obj_count, kb_used))
585 return pool_name, obj_count, kb_used
586
587 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
588 """Validate ceph pool samples taken over time, such as pool
589 object counts or pool kb used, before adding, after adding, and
590 after deleting items which affect those pool attributes. The
591 2nd element is expected to be greater than the 1st; 3rd is expected
592 to be less than the 2nd.
593
594 :param samples: List containing 3 data samples
595 :param sample_type: String for logging and usage context
596 :returns: None if successful, Failure message otherwise
597 """
598 original, created, deleted = range(3)
599 if samples[created] <= samples[original] or \
600 samples[deleted] >= samples[created]:
601 return ('Ceph {} samples ({}) '
602 'unexpected.'.format(sample_type, samples))
603 else:
604 self.log.debug('Ceph {} samples (OK): '
605 '{}'.format(sample_type, samples))
606 return None
607
608 # rabbitmq/amqp specific helpers:
609
610 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
611 """Wait for rmq units extended status to show cluster readiness,
612 after an optional initial sleep period. Initial sleep is likely
613 necessary to be effective following a config change, as status
614 message may not instantly update to non-ready."""
615
616 if init_sleep:
617 time.sleep(init_sleep)
618
619 message = re.compile('^Unit is ready and clustered$')
620 deployment._auto_wait_for_status(message=message,
621 timeout=timeout,
622 include_only=['rabbitmq-server'])
623
624 def add_rmq_test_user(self, sentry_units,
625 username="testuser1", password="changeme"):
626 """Add a test user via the first rmq juju unit, check connection as
627 the new user against all sentry units.
628
629 :param sentry_units: list of sentry unit pointers
630 :param username: amqp user name, default to testuser1
631 :param password: amqp user password
632 :returns: None if successful. Raise on error.
633 """
634 self.log.debug('Adding rmq user ({})...'.format(username))
635
636 # Check that user does not already exist
637 cmd_user_list = 'rabbitmqctl list_users'
638 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
639 if username in output:
640 self.log.warning('User ({}) already exists, returning '
641 'gracefully.'.format(username))
642 return
643
644 perms = '".*" ".*" ".*"'
645 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
646 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
647
648 # Add user via first unit
649 for cmd in cmds:
650 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
651
652 # Check connection against the other sentry_units
653 self.log.debug('Checking user connect against units...')
654 for sentry_unit in sentry_units:
655 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
656 username=username,
657 password=password)
658 connection.close()
659
660 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
661 """Delete a rabbitmq user via the first rmq juju unit.
662
663 :param sentry_units: list of sentry unit pointers
664 :param username: amqp user name, default to testuser1
665 :param password: amqp user password
666 :returns: None if successful or no such user.
667 """
668 self.log.debug('Deleting rmq user ({})...'.format(username))
669
670 # Check that the user exists
671 cmd_user_list = 'rabbitmqctl list_users'
672 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
673
674 if username not in output:
675 self.log.warning('User ({}) does not exist, returning '
676 'gracefully.'.format(username))
677 return
678
679 # Delete the user
680 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
681 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
682
683 def get_rmq_cluster_status(self, sentry_unit):
684 """Execute rabbitmq cluster status command on a unit and return
685 the full output.
686
687 :param unit: sentry unit
688 :returns: String containing console output of cluster status command
689 """
690 cmd = 'rabbitmqctl cluster_status'
691 output, _ = self.run_cmd_unit(sentry_unit, cmd)
692 self.log.debug('{} cluster_status:\n{}'.format(
693 sentry_unit.info['unit_name'], output))
694 return str(output)
695
696 def get_rmq_cluster_running_nodes(self, sentry_unit):
697 """Parse rabbitmqctl cluster_status output string, return list of
698 running rabbitmq cluster nodes.
699
700 :param unit: sentry unit
701 :returns: List containing node names of running nodes
702 """
703 # NOTE(beisner): rabbitmqctl cluster_status output is not
704 # json-parsable, do string chop foo, then json.loads that.
705 str_stat = self.get_rmq_cluster_status(sentry_unit)
706 if 'running_nodes' in str_stat:
707 pos_start = str_stat.find("{running_nodes,") + 15
708 pos_end = str_stat.find("]},", pos_start) + 1
709 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
710 run_nodes = json.loads(str_run_nodes)
711 return run_nodes
712 else:
713 return []
714
715 def validate_rmq_cluster_running_nodes(self, sentry_units):
716 """Check that all rmq unit hostnames are represented in the
717 cluster_status output of all units.
718
719 :param host_names: dict of juju unit names to host names
720 :param units: list of sentry unit pointers (all rmq units)
721 :returns: None if successful, otherwise return error message
722 """
723 host_names = self.get_unit_hostnames(sentry_units)
724 errors = []
725
726 # Query every unit for cluster_status running nodes
727 for query_unit in sentry_units:
728 query_unit_name = query_unit.info['unit_name']
729 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
730
731 # Confirm that every unit is represented in the queried unit's
732 # cluster_status running nodes output.
733 for validate_unit in sentry_units:
734 val_host_name = host_names[validate_unit.info['unit_name']]
735 val_node_name = 'rabbit@{}'.format(val_host_name)
736
737 if val_node_name not in running_nodes:
738 errors.append('Cluster member check failed on {}: {} not '
739 'in {}\n'.format(query_unit_name,
740 val_node_name,
741 running_nodes))
742 if errors:
743 return ''.join(errors)
744
745 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
746 """Check a single juju rmq unit for ssl and port in the config file."""
747 host = sentry_unit.info['public-address']
748 unit_name = sentry_unit.info['unit_name']
749
750 conf_file = '/etc/rabbitmq/rabbitmq.config'
751 conf_contents = str(self.file_contents_safe(sentry_unit,
752 conf_file, max_wait=16))
753 # Checks
754 conf_ssl = 'ssl' in conf_contents
755 conf_port = str(port) in conf_contents
756
757 # Port explicitly checked in config
758 if port and conf_port and conf_ssl:
759 self.log.debug('SSL is enabled @{}:{} '
760 '({})'.format(host, port, unit_name))
761 return True
762 elif port and not conf_port and conf_ssl:
763 self.log.debug('SSL is enabled @{} but not on port {} '
764 '({})'.format(host, port, unit_name))
765 return False
766 # Port not checked (useful when checking that ssl is disabled)
767 elif not port and conf_ssl:
768 self.log.debug('SSL is enabled @{}:{} '
769 '({})'.format(host, port, unit_name))
770 return True
771 elif not conf_ssl:
772 self.log.debug('SSL not enabled @{}:{} '
773 '({})'.format(host, port, unit_name))
774 return False
775 else:
776 msg = ('Unknown condition when checking SSL status @{}:{} '
777 '({})'.format(host, port, unit_name))
778 amulet.raise_status(amulet.FAIL, msg)
779
780 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
781 """Check that ssl is enabled on rmq juju sentry units.
782
783 :param sentry_units: list of all rmq sentry units
784 :param port: optional ssl port override to validate
785 :returns: None if successful, otherwise return error message
786 """
787 for sentry_unit in sentry_units:
788 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
789 return ('Unexpected condition: ssl is disabled on unit '
790 '({})'.format(sentry_unit.info['unit_name']))
791 return None
792
793 def validate_rmq_ssl_disabled_units(self, sentry_units):
794 """Check that ssl is enabled on listed rmq juju sentry units.
795
796 :param sentry_units: list of all rmq sentry units
797 :returns: True if successful. Raise on error.
798 """
799 for sentry_unit in sentry_units:
800 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
801 return ('Unexpected condition: ssl is enabled on unit '
802 '({})'.format(sentry_unit.info['unit_name']))
803 return None
804
805 def configure_rmq_ssl_on(self, sentry_units, deployment,
806 port=None, max_wait=60):
807 """Turn ssl charm config option on, with optional non-default
808 ssl port specification. Confirm that it is enabled on every
809 unit.
810
811 :param sentry_units: list of sentry units
812 :param deployment: amulet deployment object pointer
813 :param port: amqp port, use defaults if None
814 :param max_wait: maximum time to wait in seconds to confirm
815 :returns: None if successful. Raise on error.
816 """
817 self.log.debug('Setting ssl charm config option: on')
818
819 # Enable RMQ SSL
820 config = {'ssl': 'on'}
821 if port:
822 config['ssl_port'] = port
823
824 deployment.d.configure('rabbitmq-server', config)
825
826 # Wait for unit status
827 self.rmq_wait_for_cluster(deployment)
828
829 # Confirm
830 tries = 0
831 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
832 while ret and tries < (max_wait / 4):
833 time.sleep(4)
834 self.log.debug('Attempt {}: {}'.format(tries, ret))
835 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
836 tries += 1
837
838 if ret:
839 amulet.raise_status(amulet.FAIL, ret)
840
841 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
842 """Turn ssl charm config option off, confirm that it is disabled
843 on every unit.
844
845 :param sentry_units: list of sentry units
846 :param deployment: amulet deployment object pointer
847 :param max_wait: maximum time to wait in seconds to confirm
848 :returns: None if successful. Raise on error.
849 """
850 self.log.debug('Setting ssl charm config option: off')
851
852 # Disable RMQ SSL
853 config = {'ssl': 'off'}
854 deployment.d.configure('rabbitmq-server', config)
855
856 # Wait for unit status
857 self.rmq_wait_for_cluster(deployment)
858
859 # Confirm
860 tries = 0
861 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
862 while ret and tries < (max_wait / 4):
863 time.sleep(4)
864 self.log.debug('Attempt {}: {}'.format(tries, ret))
865 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
866 tries += 1
867
868 if ret:
869 amulet.raise_status(amulet.FAIL, ret)
870
871 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
872 port=None, fatal=True,
873 username="testuser1", password="changeme"):
874 """Establish and return a pika amqp connection to the rabbitmq service
875 running on a rmq juju unit.
876
877 :param sentry_unit: sentry unit pointer
878 :param ssl: boolean, default to False
879 :param port: amqp port, use defaults if None
880 :param fatal: boolean, default to True (raises on connect error)
881 :param username: amqp user name, default to testuser1
882 :param password: amqp user password
883 :returns: pika amqp connection pointer or None if failed and non-fatal
884 """
885 host = sentry_unit.info['public-address']
886 unit_name = sentry_unit.info['unit_name']
887
888 # Default port logic if port is not specified
889 if ssl and not port:
890 port = 5671
891 elif not ssl and not port:
892 port = 5672
893
894 self.log.debug('Connecting to amqp on {}:{} ({}) as '
895 '{}...'.format(host, port, unit_name, username))
896
897 try:
898 credentials = pika.PlainCredentials(username, password)
899 parameters = pika.ConnectionParameters(host=host, port=port,
900 credentials=credentials,
901 ssl=ssl,
902 connection_attempts=3,
903 retry_delay=5,
904 socket_timeout=1)
905 connection = pika.BlockingConnection(parameters)
906 assert connection.server_properties['product'] == 'RabbitMQ'
907 self.log.debug('Connect OK')
908 return connection
909 except Exception as e:
910 msg = ('amqp connection failed to {}:{} as '
911 '{} ({})'.format(host, port, username, str(e)))
912 if fatal:
913 amulet.raise_status(amulet.FAIL, msg)
914 else:
915 self.log.warn(msg)
916 return None
917
918 def publish_amqp_message_by_unit(self, sentry_unit, message,
919 queue="test", ssl=False,
920 username="testuser1",
921 password="changeme",
922 port=None):
923 """Publish an amqp message to a rmq juju unit.
924
925 :param sentry_unit: sentry unit pointer
926 :param message: amqp message string
927 :param queue: message queue, default to test
928 :param username: amqp user name, default to testuser1
929 :param password: amqp user password
930 :param ssl: boolean, default to False
931 :param port: amqp port, use defaults if None
932 :returns: None. Raises exception if publish failed.
933 """
934 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
935 message))
936 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
937 port=port,
938 username=username,
939 password=password)
940
941 # NOTE(beisner): extra debug here re: pika hang potential:
942 # https://github.com/pika/pika/issues/297
943 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
944 self.log.debug('Defining channel...')
945 channel = connection.channel()
946 self.log.debug('Declaring queue...')
947 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
948 self.log.debug('Publishing message...')
949 channel.basic_publish(exchange='', routing_key=queue, body=message)
950 self.log.debug('Closing channel...')
951 channel.close()
952 self.log.debug('Closing connection...')
953 connection.close()
954
955 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
956 username="testuser1",
957 password="changeme",
958 ssl=False, port=None):
959 """Get an amqp message from a rmq juju unit.
960
961 :param sentry_unit: sentry unit pointer
962 :param queue: message queue, default to test
963 :param username: amqp user name, default to testuser1
964 :param password: amqp user password
965 :param ssl: boolean, default to False
966 :param port: amqp port, use defaults if None
967 :returns: amqp message body as string. Raise if get fails.
968 """
969 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
970 port=port,
971 username=username,
972 password=password)
973 channel = connection.channel()
974 method_frame, _, body = channel.basic_get(queue)
975
976 if method_frame:
977 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
978 body))
979 channel.basic_ack(method_frame.delivery_tag)
980 channel.close()
981 connection.close()
982 return body
983 else:
984 msg = 'No message retrieved.'
985 amulet.raise_status(amulet.FAIL, msg)
0986
=== added file 'charmhelpers.new/contrib/openstack/context.py'
--- charmhelpers.new/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/context.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,1457 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import json
19import os
20import re
21import time
22from base64 import b64decode
23from subprocess import check_call
24
25import six
26import yaml
27
28from charmhelpers.fetch import (
29 apt_install,
30 filter_installed_packages,
31)
32from charmhelpers.core.hookenv import (
33 config,
34 is_relation_made,
35 local_unit,
36 log,
37 relation_get,
38 relation_ids,
39 related_units,
40 relation_set,
41 unit_get,
42 unit_private_ip,
43 charm_name,
44 DEBUG,
45 INFO,
46 WARNING,
47 ERROR,
48)
49
50from charmhelpers.core.sysctl import create as sysctl_create
51from charmhelpers.core.strutils import bool_from_string
52
53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
56 list_nics,
57 get_nic_hwaddr,
58 mkdir,
59 write_file,
60)
61from charmhelpers.contrib.hahelpers.cluster import (
62 determine_apache_port,
63 determine_api_port,
64 https,
65 is_clustered,
66)
67from charmhelpers.contrib.hahelpers.apache import (
68 get_cert,
69 get_ca_cert,
70 install_ca_cert,
71)
72from charmhelpers.contrib.openstack.neutron import (
73 neutron_plugin_attribute,
74 parse_data_port_mappings,
75)
76from charmhelpers.contrib.openstack.ip import (
77 resolve_address,
78 INTERNAL,
79)
80from charmhelpers.contrib.network.ip import (
81 get_address_in_network,
82 get_ipv4_addr,
83 get_ipv6_addr,
84 get_netmask_for_address,
85 format_ipv6_addr,
86 is_address_in_network,
87 is_bridge_member,
88)
89from charmhelpers.contrib.openstack.utils import get_host_ip
90CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
91ADDRESS_TYPES = ['admin', 'internal', 'public']
92
93
94class OSContextError(Exception):
95 pass
96
97
98def ensure_packages(packages):
99 """Install but do not upgrade required plugin packages."""
100 required = filter_installed_packages(packages)
101 if required:
102 apt_install(required, fatal=True)
103
104
105def context_complete(ctxt):
106 _missing = []
107 for k, v in six.iteritems(ctxt):
108 if v is None or v == '':
109 _missing.append(k)
110
111 if _missing:
112 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
113 return False
114
115 return True
116
117
118def config_flags_parser(config_flags):
119 """Parses config flags string into dict.
120
121 This parsing method supports a few different formats for the config
122 flag values to be parsed:
123
124 1. A string in the simple format of key=value pairs, with the possibility
125 of specifying multiple key value pairs within the same string. For
126 example, a string in the format of 'key1=value1, key2=value2' will
127 return a dict of:
128
129 {'key1': 'value1',
130 'key2': 'value2'}.
131
132 2. A string in the above format, but supporting a comma-delimited list
133 of values for the same key. For example, a string in the format of
134 'key1=value1, key2=value3,value4,value5' will return a dict of:
135
136 {'key1', 'value1',
137 'key2', 'value2,value3,value4'}
138
139 3. A string containing a colon character (:) prior to an equal
140 character (=) will be treated as yaml and parsed as such. This can be
141 used to specify more complex key value pairs. For example,
142 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
143 return a dict of:
144
145 {'key1', 'subkey1=value1, subkey2=value2'}
146
147 The provided config_flags string may be a list of comma-separated values
148 which themselves may be comma-separated list of values.
149 """
150 # If we find a colon before an equals sign then treat it as yaml.
151 # Note: limit it to finding the colon first since this indicates assignment
152 # for inline yaml.
153 colon = config_flags.find(':')
154 equals = config_flags.find('=')
155 if colon > 0:
156 if colon < equals or equals < 0:
157 return yaml.safe_load(config_flags)
158
159 if config_flags.find('==') >= 0:
160 log("config_flags is not in expected format (key=value)", level=ERROR)
161 raise OSContextError
162
163 # strip the following from each value.
164 post_strippers = ' ,'
165 # we strip any leading/trailing '=' or ' ' from the string then
166 # split on '='.
167 split = config_flags.strip(' =').split('=')
168 limit = len(split)
169 flags = {}
170 for i in range(0, limit - 1):
171 current = split[i]
172 next = split[i + 1]
173 vindex = next.rfind(',')
174 if (i == limit - 2) or (vindex < 0):
175 value = next
176 else:
177 value = next[:vindex]
178
179 if i == 0:
180 key = current
181 else:
182 # if this not the first entry, expect an embedded key.
183 index = current.rfind(',')
184 if index < 0:
185 log("Invalid config value(s) at index %s" % (i), level=ERROR)
186 raise OSContextError
187 key = current[index + 1:]
188
189 # Add to collection.
190 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
191
192 return flags
193
194
195class OSContextGenerator(object):
196 """Base class for all context generators."""
197 interfaces = []
198 related = False
199 complete = False
200 missing_data = []
201
202 def __call__(self):
203 raise NotImplementedError
204
205 def context_complete(self, ctxt):
206 """Check for missing data for the required context data.
207 Set self.missing_data if it exists and return False.
208 Set self.complete if no missing data and return True.
209 """
210 # Fresh start
211 self.complete = False
212 self.missing_data = []
213 for k, v in six.iteritems(ctxt):
214 if v is None or v == '':
215 if k not in self.missing_data:
216 self.missing_data.append(k)
217
218 if self.missing_data:
219 self.complete = False
220 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
221 else:
222 self.complete = True
223 return self.complete
224
225 def get_related(self):
226 """Check if any of the context interfaces have relation ids.
227 Set self.related and return True if one of the interfaces
228 has relation ids.
229 """
230 # Fresh start
231 self.related = False
232 try:
233 for interface in self.interfaces:
234 if relation_ids(interface):
235 self.related = True
236 return self.related
237 except AttributeError as e:
238 log("{} {}"
239 "".format(self, e), 'INFO')
240 return self.related
241
242
243class SharedDBContext(OSContextGenerator):
244 interfaces = ['shared-db']
245
246 def __init__(self,
247 database=None, user=None, relation_prefix=None, ssl_dir=None):
248 """Allows inspecting relation for settings prefixed with
249 relation_prefix. This is useful for parsing access for multiple
250 databases returned via the shared-db interface (eg, nova_password,
251 quantum_password)
252 """
253 self.relation_prefix = relation_prefix
254 self.database = database
255 self.user = user
256 self.ssl_dir = ssl_dir
257 self.rel_name = self.interfaces[0]
258
259 def __call__(self):
260 self.database = self.database or config('database')
261 self.user = self.user or config('database-user')
262 if None in [self.database, self.user]:
263 log("Could not generate shared_db context. Missing required charm "
264 "config options. (database name and user)", level=ERROR)
265 raise OSContextError
266
267 ctxt = {}
268
269 # NOTE(jamespage) if mysql charm provides a network upon which
270 # access to the database should be made, reconfigure relation
271 # with the service units local address and defer execution
272 access_network = relation_get('access-network')
273 if access_network is not None:
274 if self.relation_prefix is not None:
275 hostname_key = "{}_hostname".format(self.relation_prefix)
276 else:
277 hostname_key = "hostname"
278 access_hostname = get_address_in_network(access_network,
279 unit_get('private-address'))
280 set_hostname = relation_get(attribute=hostname_key,
281 unit=local_unit())
282 if set_hostname != access_hostname:
283 relation_set(relation_settings={hostname_key: access_hostname})
284 return None # Defer any further hook execution for now....
285
286 password_setting = 'password'
287 if self.relation_prefix:
288 password_setting = self.relation_prefix + '_password'
289
290 for rid in relation_ids(self.interfaces[0]):
291 self.related = True
292 for unit in related_units(rid):
293 rdata = relation_get(rid=rid, unit=unit)
294 host = rdata.get('db_host')
295 host = format_ipv6_addr(host) or host
296 ctxt = {
297 'database_host': host,
298 'database': self.database,
299 'database_user': self.user,
300 'database_password': rdata.get(password_setting),
301 'database_type': 'mysql'
302 }
303 if self.context_complete(ctxt):
304 db_ssl(rdata, ctxt, self.ssl_dir)
305 return ctxt
306 return {}
307
308
309class PostgresqlDBContext(OSContextGenerator):
310 interfaces = ['pgsql-db']
311
312 def __init__(self, database=None):
313 self.database = database
314
315 def __call__(self):
316 self.database = self.database or config('database')
317 if self.database is None:
318 log('Could not generate postgresql_db context. Missing required '
319 'charm config options. (database name)', level=ERROR)
320 raise OSContextError
321
322 ctxt = {}
323 for rid in relation_ids(self.interfaces[0]):
324 self.related = True
325 for unit in related_units(rid):
326 rel_host = relation_get('host', rid=rid, unit=unit)
327 rel_user = relation_get('user', rid=rid, unit=unit)
328 rel_passwd = relation_get('password', rid=rid, unit=unit)
329 ctxt = {'database_host': rel_host,
330 'database': self.database,
331 'database_user': rel_user,
332 'database_password': rel_passwd,
333 'database_type': 'postgresql'}
334 if self.context_complete(ctxt):
335 return ctxt
336
337 return {}
338
339
340def db_ssl(rdata, ctxt, ssl_dir):
341 if 'ssl_ca' in rdata and ssl_dir:
342 ca_path = os.path.join(ssl_dir, 'db-client.ca')
343 with open(ca_path, 'w') as fh:
344 fh.write(b64decode(rdata['ssl_ca']))
345
346 ctxt['database_ssl_ca'] = ca_path
347 elif 'ssl_ca' in rdata:
348 log("Charm not setup for ssl support but ssl ca found", level=INFO)
349 return ctxt
350
351 if 'ssl_cert' in rdata:
352 cert_path = os.path.join(
353 ssl_dir, 'db-client.cert')
354 if not os.path.exists(cert_path):
355 log("Waiting 1m for ssl client cert validity", level=INFO)
356 time.sleep(60)
357
358 with open(cert_path, 'w') as fh:
359 fh.write(b64decode(rdata['ssl_cert']))
360
361 ctxt['database_ssl_cert'] = cert_path
362 key_path = os.path.join(ssl_dir, 'db-client.key')
363 with open(key_path, 'w') as fh:
364 fh.write(b64decode(rdata['ssl_key']))
365
366 ctxt['database_ssl_key'] = key_path
367
368 return ctxt
369
370
371class IdentityServiceContext(OSContextGenerator):
372
373 def __init__(self, service=None, service_user=None, rel_name='identity-service'):
374 self.service = service
375 self.service_user = service_user
376 self.rel_name = rel_name
377 self.interfaces = [self.rel_name]
378
379 def __call__(self):
380 log('Generating template context for ' + self.rel_name, level=DEBUG)
381 ctxt = {}
382
383 if self.service and self.service_user:
384 # This is required for pki token signing if we don't want /tmp to
385 # be used.
386 cachedir = '/var/cache/%s' % (self.service)
387 if not os.path.isdir(cachedir):
388 log("Creating service cache dir %s" % (cachedir), level=DEBUG)
389 mkdir(path=cachedir, owner=self.service_user,
390 group=self.service_user, perms=0o700)
391
392 ctxt['signing_dir'] = cachedir
393
394 for rid in relation_ids(self.rel_name):
395 self.related = True
396 for unit in related_units(rid):
397 rdata = relation_get(rid=rid, unit=unit)
398 serv_host = rdata.get('service_host')
399 serv_host = format_ipv6_addr(serv_host) or serv_host
400 auth_host = rdata.get('auth_host')
401 auth_host = format_ipv6_addr(auth_host) or auth_host
402 svc_protocol = rdata.get('service_protocol') or 'http'
403 auth_protocol = rdata.get('auth_protocol') or 'http'
404 ctxt.update({'service_port': rdata.get('service_port'),
405 'service_host': serv_host,
406 'auth_host': auth_host,
407 'auth_port': rdata.get('auth_port'),
408 'admin_tenant_name': rdata.get('service_tenant'),
409 'admin_user': rdata.get('service_username'),
410 'admin_password': rdata.get('service_password'),
411 'service_protocol': svc_protocol,
412 'auth_protocol': auth_protocol})
413
414 if self.context_complete(ctxt):
415 # NOTE(jamespage) this is required for >= icehouse
416 # so a missing value just indicates keystone needs
417 # upgrading
418 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
419 return ctxt
420
421 return {}
422
423
424class AMQPContext(OSContextGenerator):
425
426 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
427 self.ssl_dir = ssl_dir
428 self.rel_name = rel_name
429 self.relation_prefix = relation_prefix
430 self.interfaces = [rel_name]
431
432 def __call__(self):
433 log('Generating template context for amqp', level=DEBUG)
434 conf = config()
435 if self.relation_prefix:
436 user_setting = '%s-rabbit-user' % (self.relation_prefix)
437 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
438 else:
439 user_setting = 'rabbit-user'
440 vhost_setting = 'rabbit-vhost'
441
442 try:
443 username = conf[user_setting]
444 vhost = conf[vhost_setting]
445 except KeyError as e:
446 log('Could not generate shared_db context. Missing required charm '
447 'config options: %s.' % e, level=ERROR)
448 raise OSContextError
449
450 ctxt = {}
451 for rid in relation_ids(self.rel_name):
452 ha_vip_only = False
453 self.related = True
454 for unit in related_units(rid):
455 if relation_get('clustered', rid=rid, unit=unit):
456 ctxt['clustered'] = True
457 vip = relation_get('vip', rid=rid, unit=unit)
458 vip = format_ipv6_addr(vip) or vip
459 ctxt['rabbitmq_host'] = vip
460 else:
461 host = relation_get('private-address', rid=rid, unit=unit)
462 host = format_ipv6_addr(host) or host
463 ctxt['rabbitmq_host'] = host
464
465 ctxt.update({
466 'rabbitmq_user': username,
467 'rabbitmq_password': relation_get('password', rid=rid,
468 unit=unit),
469 'rabbitmq_virtual_host': vhost,
470 })
471
472 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
473 if ssl_port:
474 ctxt['rabbit_ssl_port'] = ssl_port
475
476 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
477 if ssl_ca:
478 ctxt['rabbit_ssl_ca'] = ssl_ca
479
480 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
481 ctxt['rabbitmq_ha_queues'] = True
482
483 ha_vip_only = relation_get('ha-vip-only',
484 rid=rid, unit=unit) is not None
485
486 if self.context_complete(ctxt):
487 if 'rabbit_ssl_ca' in ctxt:
488 if not self.ssl_dir:
489 log("Charm not setup for ssl support but ssl ca "
490 "found", level=INFO)
491 break
492
493 ca_path = os.path.join(
494 self.ssl_dir, 'rabbit-client-ca.pem')
495 with open(ca_path, 'w') as fh:
496 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
497 ctxt['rabbit_ssl_ca'] = ca_path
498
499 # Sufficient information found = break out!
500 break
501
502 # Used for active/active rabbitmq >= grizzly
503 if (('clustered' not in ctxt or ha_vip_only) and
504 len(related_units(rid)) > 1):
505 rabbitmq_hosts = []
506 for unit in related_units(rid):
507 host = relation_get('private-address', rid=rid, unit=unit)
508 host = format_ipv6_addr(host) or host
509 rabbitmq_hosts.append(host)
510
511 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
512
513 oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
514 if oslo_messaging_flags:
515 ctxt['oslo_messaging_flags'] = config_flags_parser(
516 oslo_messaging_flags)
517
518 if not self.complete:
519 return {}
520
521 return ctxt
522
523
524class CephContext(OSContextGenerator):
525 """Generates context for /etc/ceph/ceph.conf templates."""
526 interfaces = ['ceph']
527
528 def __call__(self):
529 if not relation_ids('ceph'):
530 return {}
531
532 log('Generating template context for ceph', level=DEBUG)
533 mon_hosts = []
534 ctxt = {
535 'use_syslog': str(config('use-syslog')).lower()
536 }
537 for rid in relation_ids('ceph'):
538 for unit in related_units(rid):
539 if not ctxt.get('auth'):
540 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
541 if not ctxt.get('key'):
542 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
543 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
544 unit=unit)
545 unit_priv_addr = relation_get('private-address', rid=rid,
546 unit=unit)
547 ceph_addr = ceph_pub_addr or unit_priv_addr
548 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
549 mon_hosts.append(ceph_addr)
550
551 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
552
553 if not os.path.isdir('/etc/ceph'):
554 os.mkdir('/etc/ceph')
555
556 if not self.context_complete(ctxt):
557 return {}
558
559 ensure_packages(['ceph-common'])
560 return ctxt
561
562
563class HAProxyContext(OSContextGenerator):
564 """Provides half a context for the haproxy template, which describes
565 all peers to be included in the cluster. Each charm needs to include
566 its own context generator that describes the port mapping.
567 """
568 interfaces = ['cluster']
569
570 def __init__(self, singlenode_mode=False):
571 self.singlenode_mode = singlenode_mode
572
573 def __call__(self):
574 if not relation_ids('cluster') and not self.singlenode_mode:
575 return {}
576
577 if config('prefer-ipv6'):
578 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
579 else:
580 addr = get_host_ip(unit_get('private-address'))
581
582 l_unit = local_unit().replace('/', '-')
583 cluster_hosts = {}
584
585 # NOTE(jamespage): build out map of configured network endpoints
586 # and associated backends
587 for addr_type in ADDRESS_TYPES:
588 cfg_opt = 'os-{}-network'.format(addr_type)
589 laddr = get_address_in_network(config(cfg_opt))
590 if laddr:
591 netmask = get_netmask_for_address(laddr)
592 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
593 netmask),
594 'backends': {l_unit: laddr}}
595 for rid in relation_ids('cluster'):
596 for unit in related_units(rid):
597 _laddr = relation_get('{}-address'.format(addr_type),
598 rid=rid, unit=unit)
599 if _laddr:
600 _unit = unit.replace('/', '-')
601 cluster_hosts[laddr]['backends'][_unit] = _laddr
602
603 # NOTE(jamespage) add backend based on private address - this
604 # with either be the only backend or the fallback if no acls
605 # match in the frontend
606 cluster_hosts[addr] = {}
607 netmask = get_netmask_for_address(addr)
608 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
609 'backends': {l_unit: addr}}
610 for rid in relation_ids('cluster'):
611 for unit in related_units(rid):
612 _laddr = relation_get('private-address',
613 rid=rid, unit=unit)
614 if _laddr:
615 _unit = unit.replace('/', '-')
616 cluster_hosts[addr]['backends'][_unit] = _laddr
617
618 ctxt = {
619 'frontends': cluster_hosts,
620 'default_backend': addr
621 }
622
623 if config('haproxy-server-timeout'):
624 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
625
626 if config('haproxy-client-timeout'):
627 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
628
629 if config('prefer-ipv6'):
630 ctxt['ipv6'] = True
631 ctxt['local_host'] = 'ip6-localhost'
632 ctxt['haproxy_host'] = '::'
633 ctxt['stat_port'] = ':::8888'
634 else:
635 ctxt['local_host'] = '127.0.0.1'
636 ctxt['haproxy_host'] = '0.0.0.0'
637 ctxt['stat_port'] = ':8888'
638
639 for frontend in cluster_hosts:
640 if (len(cluster_hosts[frontend]['backends']) > 1 or
641 self.singlenode_mode):
642 # Enable haproxy when we have enough peers.
643 log('Ensuring haproxy enabled in /etc/default/haproxy.',
644 level=DEBUG)
645 with open('/etc/default/haproxy', 'w') as out:
646 out.write('ENABLED=1\n')
647
648 return ctxt
649
650 log('HAProxy context is incomplete, this unit has no peers.',
651 level=INFO)
652 return {}
653
654
655class ImageServiceContext(OSContextGenerator):
656 interfaces = ['image-service']
657
658 def __call__(self):
659 """Obtains the glance API server from the image-service relation.
660 Useful in nova and cinder (currently).
661 """
662 log('Generating template context for image-service.', level=DEBUG)
663 rids = relation_ids('image-service')
664 if not rids:
665 return {}
666
667 for rid in rids:
668 for unit in related_units(rid):
669 api_server = relation_get('glance-api-server',
670 rid=rid, unit=unit)
671 if api_server:
672 return {'glance_api_servers': api_server}
673
674 log("ImageService context is incomplete. Missing required relation "
675 "data.", level=INFO)
676 return {}
677
678
679class ApacheSSLContext(OSContextGenerator):
680 """Generates a context for an apache vhost configuration that configures
681 HTTPS reverse proxying for one or many endpoints. Generated context
682 looks something like::
683
684 {
685 'namespace': 'cinder',
686 'private_address': 'iscsi.mycinderhost.com',
687 'endpoints': [(8776, 8766), (8777, 8767)]
688 }
689
690 The endpoints list consists of a tuples mapping external ports
691 to internal ports.
692 """
693 interfaces = ['https']
694
695 # charms should inherit this context and set external ports
696 # and service namespace accordingly.
697 external_ports = []
698 service_namespace = None
699
700 def enable_modules(self):
701 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
702 check_call(cmd)
703
704 def configure_cert(self, cn=None):
705 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
706 mkdir(path=ssl_dir)
707 cert, key = get_cert(cn)
708 if cn:
709 cert_filename = 'cert_{}'.format(cn)
710 key_filename = 'key_{}'.format(cn)
711 else:
712 cert_filename = 'cert'
713 key_filename = 'key'
714
715 write_file(path=os.path.join(ssl_dir, cert_filename),
716 content=b64decode(cert))
717 write_file(path=os.path.join(ssl_dir, key_filename),
718 content=b64decode(key))
719
720 def configure_ca(self):
721 ca_cert = get_ca_cert()
722 if ca_cert:
723 install_ca_cert(b64decode(ca_cert))
724
725 def canonical_names(self):
726 """Figure out which canonical names clients will access this service.
727 """
728 cns = []
729 for r_id in relation_ids('identity-service'):
730 for unit in related_units(r_id):
731 rdata = relation_get(rid=r_id, unit=unit)
732 for k in rdata:
733 if k.startswith('ssl_key_'):
734 cns.append(k.lstrip('ssl_key_'))
735
736 return sorted(list(set(cns)))
737
738 def get_network_addresses(self):
739 """For each network configured, return corresponding address and vip
740 (if available).
741
742 Returns a list of tuples of the form:
743
744 [(address_in_net_a, vip_in_net_a),
745 (address_in_net_b, vip_in_net_b),
746 ...]
747
748 or, if no vip(s) available:
749
750 [(address_in_net_a, address_in_net_a),
751 (address_in_net_b, address_in_net_b),
752 ...]
753 """
754 addresses = []
755 if config('vip'):
756 vips = config('vip').split()
757 else:
758 vips = []
759
760 for net_type in ['os-internal-network', 'os-admin-network',
761 'os-public-network']:
762 addr = get_address_in_network(config(net_type),
763 unit_get('private-address'))
764 if len(vips) > 1 and is_clustered():
765 if not config(net_type):
766 log("Multiple networks configured but net_type "
767 "is None (%s)." % net_type, level=WARNING)
768 continue
769
770 for vip in vips:
771 if is_address_in_network(config(net_type), vip):
772 addresses.append((addr, vip))
773 break
774
775 elif is_clustered() and config('vip'):
776 addresses.append((addr, config('vip')))
777 else:
778 addresses.append((addr, addr))
779
780 return sorted(addresses)
781
782 def __call__(self):
783 if isinstance(self.external_ports, six.string_types):
784 self.external_ports = [self.external_ports]
785
786 if not self.external_ports or not https():
787 return {}
788
789 self.configure_ca()
790 self.enable_modules()
791
792 ctxt = {'namespace': self.service_namespace,
793 'endpoints': [],
794 'ext_ports': []}
795
796 cns = self.canonical_names()
797 if cns:
798 for cn in cns:
799 self.configure_cert(cn)
800 else:
801 # Expect cert/key provided in config (currently assumed that ca
802 # uses ip for cn)
803 cn = resolve_address(endpoint_type=INTERNAL)
804 self.configure_cert(cn)
805
806 addresses = self.get_network_addresses()
807 for address, endpoint in sorted(set(addresses)):
808 for api_port in self.external_ports:
809 ext_port = determine_apache_port(api_port,
810 singlenode_mode=True)
811 int_port = determine_api_port(api_port, singlenode_mode=True)
812 portmap = (address, endpoint, int(ext_port), int(int_port))
813 ctxt['endpoints'].append(portmap)
814 ctxt['ext_ports'].append(int(ext_port))
815
816 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
817 return ctxt
818
819
820class NeutronContext(OSContextGenerator):
821 interfaces = []
822
823 @property
824 def plugin(self):
825 return None
826
827 @property
828 def network_manager(self):
829 return None
830
831 @property
832 def packages(self):
833 return neutron_plugin_attribute(self.plugin, 'packages',
834 self.network_manager)
835
836 @property
837 def neutron_security_groups(self):
838 return None
839
840 def _ensure_packages(self):
841 for pkgs in self.packages:
842 ensure_packages(pkgs)
843
844 def _save_flag_file(self):
845 if self.network_manager == 'quantum':
846 _file = '/etc/nova/quantum_plugin.conf'
847 else:
848 _file = '/etc/nova/neutron_plugin.conf'
849
850 with open(_file, 'wb') as out:
851 out.write(self.plugin + '\n')
852
853 def ovs_ctxt(self):
854 driver = neutron_plugin_attribute(self.plugin, 'driver',
855 self.network_manager)
856 config = neutron_plugin_attribute(self.plugin, 'config',
857 self.network_manager)
858 ovs_ctxt = {'core_plugin': driver,
859 'neutron_plugin': 'ovs',
860 'neutron_security_groups': self.neutron_security_groups,
861 'local_ip': unit_private_ip(),
862 'config': config}
863
864 return ovs_ctxt
865
866 def nuage_ctxt(self):
867 driver = neutron_plugin_attribute(self.plugin, 'driver',
868 self.network_manager)
869 config = neutron_plugin_attribute(self.plugin, 'config',
870 self.network_manager)
871 nuage_ctxt = {'core_plugin': driver,
872 'neutron_plugin': 'vsp',
873 'neutron_security_groups': self.neutron_security_groups,
874 'local_ip': unit_private_ip(),
875 'config': config}
876
877 return nuage_ctxt
878
879 def nvp_ctxt(self):
880 driver = neutron_plugin_attribute(self.plugin, 'driver',
881 self.network_manager)
882 config = neutron_plugin_attribute(self.plugin, 'config',
883 self.network_manager)
884 nvp_ctxt = {'core_plugin': driver,
885 'neutron_plugin': 'nvp',
886 'neutron_security_groups': self.neutron_security_groups,
887 'local_ip': unit_private_ip(),
888 'config': config}
889
890 return nvp_ctxt
891
892 def n1kv_ctxt(self):
893 driver = neutron_plugin_attribute(self.plugin, 'driver',
894 self.network_manager)
895 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
896 self.network_manager)
897 n1kv_user_config_flags = config('n1kv-config-flags')
898 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
899 n1kv_ctxt = {'core_plugin': driver,
900 'neutron_plugin': 'n1kv',
901 'neutron_security_groups': self.neutron_security_groups,
902 'local_ip': unit_private_ip(),
903 'config': n1kv_config,
904 'vsm_ip': config('n1kv-vsm-ip'),
905 'vsm_username': config('n1kv-vsm-username'),
906 'vsm_password': config('n1kv-vsm-password'),
907 'restrict_policy_profiles': restrict_policy_profiles}
908
909 if n1kv_user_config_flags:
910 flags = config_flags_parser(n1kv_user_config_flags)
911 n1kv_ctxt['user_config_flags'] = flags
912
913 return n1kv_ctxt
914
915 def calico_ctxt(self):
916 driver = neutron_plugin_attribute(self.plugin, 'driver',
917 self.network_manager)
918 config = neutron_plugin_attribute(self.plugin, 'config',
919 self.network_manager)
920 calico_ctxt = {'core_plugin': driver,
921 'neutron_plugin': 'Calico',
922 'neutron_security_groups': self.neutron_security_groups,
923 'local_ip': unit_private_ip(),
924 'config': config}
925
926 return calico_ctxt
927
928 def neutron_ctxt(self):
929 if https():
930 proto = 'https'
931 else:
932 proto = 'http'
933
934 if is_clustered():
935 host = config('vip')
936 else:
937 host = unit_get('private-address')
938
939 ctxt = {'network_manager': self.network_manager,
940 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
941 return ctxt
942
943 def pg_ctxt(self):
944 driver = neutron_plugin_attribute(self.plugin, 'driver',
945 self.network_manager)
946 config = neutron_plugin_attribute(self.plugin, 'config',
947 self.network_manager)
948 ovs_ctxt = {'core_plugin': driver,
949 'neutron_plugin': 'plumgrid',
950 'neutron_security_groups': self.neutron_security_groups,
951 'local_ip': unit_private_ip(),
952 'config': config}
953 return ovs_ctxt
954
955 def midonet_ctxt(self):
956 driver = neutron_plugin_attribute(self.plugin, 'driver',
957 self.network_manager)
958 midonet_config = neutron_plugin_attribute(self.plugin, 'config',
959 self.network_manager)
960 mido_ctxt = {'core_plugin': driver,
961 'neutron_plugin': 'midonet',
962 'neutron_security_groups': self.neutron_security_groups,
963 'local_ip': unit_private_ip(),
964 'config': midonet_config}
965
966 return mido_ctxt
967
968 def __call__(self):
969 if self.network_manager not in ['quantum', 'neutron']:
970 return {}
971
972 if not self.plugin:
973 return {}
974
975 ctxt = self.neutron_ctxt()
976
977 if self.plugin == 'ovs':
978 ctxt.update(self.ovs_ctxt())
979 elif self.plugin in ['nvp', 'nsx']:
980 ctxt.update(self.nvp_ctxt())
981 elif self.plugin == 'n1kv':
982 ctxt.update(self.n1kv_ctxt())
983 elif self.plugin == 'Calico':
984 ctxt.update(self.calico_ctxt())
985 elif self.plugin == 'vsp':
986 ctxt.update(self.nuage_ctxt())
987 elif self.plugin == 'plumgrid':
988 ctxt.update(self.pg_ctxt())
989 elif self.plugin == 'midonet':
990 ctxt.update(self.midonet_ctxt())
991
992 alchemy_flags = config('neutron-alchemy-flags')
993 if alchemy_flags:
994 flags = config_flags_parser(alchemy_flags)
995 ctxt['neutron_alchemy_flags'] = flags
996
997 self._save_flag_file()
998 return ctxt
999
1000
1001class NeutronPortContext(OSContextGenerator):
1002
1003 def resolve_ports(self, ports):
1004 """Resolve NICs not yet bound to bridge(s)
1005
1006 If hwaddress provided then returns resolved hwaddress otherwise NIC.
1007 """
1008 if not ports:
1009 return None
1010
1011 hwaddr_to_nic = {}
1012 hwaddr_to_ip = {}
1013 for nic in list_nics():
1014 # Ignore virtual interfaces (bond masters will be identified from
1015 # their slaves)
1016 if not is_phy_iface(nic):
1017 continue
1018
1019 _nic = get_bond_master(nic)
1020 if _nic:
1021 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1022 level=DEBUG)
1023 nic = _nic
1024
1025 hwaddr = get_nic_hwaddr(nic)
1026 hwaddr_to_nic[hwaddr] = nic
1027 addresses = get_ipv4_addr(nic, fatal=False)
1028 addresses += get_ipv6_addr(iface=nic, fatal=False)
1029 hwaddr_to_ip[hwaddr] = addresses
1030
1031 resolved = []
1032 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
1033 for entry in ports:
1034 if re.match(mac_regex, entry):
1035 # NIC is in known NICs and does NOT hace an IP address
1036 if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
1037 # If the nic is part of a bridge then don't use it
1038 if is_bridge_member(hwaddr_to_nic[entry]):
1039 continue
1040
1041 # Entry is a MAC address for a valid interface that doesn't
1042 # have an IP address assigned yet.
1043 resolved.append(hwaddr_to_nic[entry])
1044 else:
1045 # If the passed entry is not a MAC address, assume it's a valid
1046 # interface, and that the user put it there on purpose (we can
1047 # trust it to be the real external network).
1048 resolved.append(entry)
1049
1050 # Ensure no duplicates
1051 return list(set(resolved))
1052
1053
1054class OSConfigFlagContext(OSContextGenerator):
1055 """Provides support for user-defined config flags.
1056
1057 Users can define a comma-seperated list of key=value pairs
1058 in the charm configuration and apply them at any point in
1059 any file by using a template flag.
1060
1061 Sometimes users might want config flags inserted within a
1062 specific section so this class allows users to specify the
1063 template flag name, allowing for multiple template flags
1064 (sections) within the same context.
1065
1066 NOTE: the value of config-flags may be a comma-separated list of
1067 key=value pairs and some Openstack config files support
1068 comma-separated lists as values.
1069 """
1070
1071 def __init__(self, charm_flag='config-flags',
1072 template_flag='user_config_flags'):
1073 """
1074 :param charm_flag: config flags in charm configuration.
1075 :param template_flag: insert point for user-defined flags in template
1076 file.
1077 """
1078 super(OSConfigFlagContext, self).__init__()
1079 self._charm_flag = charm_flag
1080 self._template_flag = template_flag
1081
1082 def __call__(self):
1083 config_flags = config(self._charm_flag)
1084 if not config_flags:
1085 return {}
1086
1087 return {self._template_flag:
1088 config_flags_parser(config_flags)}
1089
1090
1091class LibvirtConfigFlagsContext(OSContextGenerator):
1092 """
1093 This context provides support for extending
1094 the libvirt section through user-defined flags.
1095 """
1096 def __call__(self):
1097 ctxt = {}
1098 libvirt_flags = config('libvirt-flags')
1099 if libvirt_flags:
1100 ctxt['libvirt_flags'] = config_flags_parser(
1101 libvirt_flags)
1102 return ctxt
1103
1104
1105class SubordinateConfigContext(OSContextGenerator):
1106
1107 """
1108 Responsible for inspecting relations to subordinates that
1109 may be exporting required config via a json blob.
1110
1111 The subordinate interface allows subordinates to export their
1112 configuration requirements to the principle for multiple config
1113 files and multiple serivces. Ie, a subordinate that has interfaces
1114 to both glance and nova may export to following yaml blob as json::
1115
1116 glance:
1117 /etc/glance/glance-api.conf:
1118 sections:
1119 DEFAULT:
1120 - [key1, value1]
1121 /etc/glance/glance-registry.conf:
1122 MYSECTION:
1123 - [key2, value2]
1124 nova:
1125 /etc/nova/nova.conf:
1126 sections:
1127 DEFAULT:
1128 - [key3, value3]
1129
1130
1131 It is then up to the principle charms to subscribe this context to
1132 the service+config file it is interestd in. Configuration data will
1133 be available in the template context, in glance's case, as::
1134
1135 ctxt = {
1136 ... other context ...
1137 'subordinate_configuration': {
1138 'DEFAULT': {
1139 'key1': 'value1',
1140 },
1141 'MYSECTION': {
1142 'key2': 'value2',
1143 },
1144 }
1145 }
1146 """
1147
1148 def __init__(self, service, config_file, interface):
1149 """
1150 :param service : Service name key to query in any subordinate
1151 data found
1152 :param config_file : Service's config file to query sections
1153 :param interface : Subordinate interface to inspect
1154 """
1155 self.config_file = config_file
1156 if isinstance(service, list):
1157 self.services = service
1158 else:
1159 self.services = [service]
1160 if isinstance(interface, list):
1161 self.interfaces = interface
1162 else:
1163 self.interfaces = [interface]
1164
1165 def __call__(self):
1166 ctxt = {'sections': {}}
1167 rids = []
1168 for interface in self.interfaces:
1169 rids.extend(relation_ids(interface))
1170 for rid in rids:
1171 for unit in related_units(rid):
1172 sub_config = relation_get('subordinate_configuration',
1173 rid=rid, unit=unit)
1174 if sub_config and sub_config != '':
1175 try:
1176 sub_config = json.loads(sub_config)
1177 except:
1178 log('Could not parse JSON from '
1179 'subordinate_configuration setting from %s'
1180 % rid, level=ERROR)
1181 continue
1182
1183 for service in self.services:
1184 if service not in sub_config:
1185 log('Found subordinate_configuration on %s but it '
1186 'contained nothing for %s service'
1187 % (rid, service), level=INFO)
1188 continue
1189
1190 sub_config = sub_config[service]
1191 if self.config_file not in sub_config:
1192 log('Found subordinate_configuration on %s but it '
1193 'contained nothing for %s'
1194 % (rid, self.config_file), level=INFO)
1195 continue
1196
1197 sub_config = sub_config[self.config_file]
1198 for k, v in six.iteritems(sub_config):
1199 if k == 'sections':
1200 for section, config_list in six.iteritems(v):
1201 log("adding section '%s'" % (section),
1202 level=DEBUG)
1203 if ctxt[k].get(section):
1204 ctxt[k][section].extend(config_list)
1205 else:
1206 ctxt[k][section] = config_list
1207 else:
1208 ctxt[k] = v
1209 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1210 return ctxt
1211
1212
1213class LogLevelContext(OSContextGenerator):
1214
1215 def __call__(self):
1216 ctxt = {}
1217 ctxt['debug'] = \
1218 False if config('debug') is None else config('debug')
1219 ctxt['verbose'] = \
1220 False if config('verbose') is None else config('verbose')
1221
1222 return ctxt
1223
1224
1225class SyslogContext(OSContextGenerator):
1226
1227 def __call__(self):
1228 ctxt = {'use_syslog': config('use-syslog')}
1229 return ctxt
1230
1231
1232class BindHostContext(OSContextGenerator):
1233
1234 def __call__(self):
1235 if config('prefer-ipv6'):
1236 return {'bind_host': '::'}
1237 else:
1238 return {'bind_host': '0.0.0.0'}
1239
1240
1241class WorkerConfigContext(OSContextGenerator):
1242
1243 @property
1244 def num_cpus(self):
1245 try:
1246 from psutil import NUM_CPUS
1247 except ImportError:
1248 apt_install('python-psutil', fatal=True)
1249 from psutil import NUM_CPUS
1250
1251 return NUM_CPUS
1252
1253 def __call__(self):
1254 multiplier = config('worker-multiplier') or 0
1255 ctxt = {"workers": self.num_cpus * multiplier}
1256 return ctxt
1257
1258
1259class ZeroMQContext(OSContextGenerator):
1260 interfaces = ['zeromq-configuration']
1261
1262 def __call__(self):
1263 ctxt = {}
1264 if is_relation_made('zeromq-configuration', 'host'):
1265 for rid in relation_ids('zeromq-configuration'):
1266 for unit in related_units(rid):
1267 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1268 ctxt['zmq_host'] = relation_get('host', unit, rid)
1269 ctxt['zmq_redis_address'] = relation_get(
1270 'zmq_redis_address', unit, rid)
1271
1272 return ctxt
1273
1274
1275class NotificationDriverContext(OSContextGenerator):
1276
1277 def __init__(self, zmq_relation='zeromq-configuration',
1278 amqp_relation='amqp'):
1279 """
1280 :param zmq_relation: Name of Zeromq relation to check
1281 """
1282 self.zmq_relation = zmq_relation
1283 self.amqp_relation = amqp_relation
1284
1285 def __call__(self):
1286 ctxt = {'notifications': 'False'}
1287 if is_relation_made(self.amqp_relation):
1288 ctxt['notifications'] = "True"
1289
1290 return ctxt
1291
1292
1293class SysctlContext(OSContextGenerator):
1294 """This context check if the 'sysctl' option exists on configuration
1295 then creates a file with the loaded contents"""
1296 def __call__(self):
1297 sysctl_dict = config('sysctl')
1298 if sysctl_dict:
1299 sysctl_create(sysctl_dict,
1300 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1301 return {'sysctl': sysctl_dict}
1302
1303
1304class NeutronAPIContext(OSContextGenerator):
1305 '''
1306 Inspects current neutron-plugin-api relation for neutron settings. Return
1307 defaults if it is not present.
1308 '''
1309 interfaces = ['neutron-plugin-api']
1310
1311 def __call__(self):
1312 self.neutron_defaults = {
1313 'l2_population': {
1314 'rel_key': 'l2-population',
1315 'default': False,
1316 },
1317 'overlay_network_type': {
1318 'rel_key': 'overlay-network-type',
1319 'default': 'gre',
1320 },
1321 'neutron_security_groups': {
1322 'rel_key': 'neutron-security-groups',
1323 'default': False,
1324 },
1325 'network_device_mtu': {
1326 'rel_key': 'network-device-mtu',
1327 'default': None,
1328 },
1329 'enable_dvr': {
1330 'rel_key': 'enable-dvr',
1331 'default': False,
1332 },
1333 'enable_l3ha': {
1334 'rel_key': 'enable-l3ha',
1335 'default': False,
1336 },
1337 }
1338 ctxt = self.get_neutron_options({})
1339 for rid in relation_ids('neutron-plugin-api'):
1340 for unit in related_units(rid):
1341 rdata = relation_get(rid=rid, unit=unit)
1342 if 'l2-population' in rdata:
1343 ctxt.update(self.get_neutron_options(rdata))
1344
1345 return ctxt
1346
1347 def get_neutron_options(self, rdata):
1348 settings = {}
1349 for nkey in self.neutron_defaults.keys():
1350 defv = self.neutron_defaults[nkey]['default']
1351 rkey = self.neutron_defaults[nkey]['rel_key']
1352 if rkey in rdata.keys():
1353 if type(defv) is bool:
1354 settings[nkey] = bool_from_string(rdata[rkey])
1355 else:
1356 settings[nkey] = rdata[rkey]
1357 else:
1358 settings[nkey] = defv
1359 return settings
1360
1361
1362class ExternalPortContext(NeutronPortContext):
1363
1364 def __call__(self):
1365 ctxt = {}
1366 ports = config('ext-port')
1367 if ports:
1368 ports = [p.strip() for p in ports.split()]
1369 ports = self.resolve_ports(ports)
1370 if ports:
1371 ctxt = {"ext_port": ports[0]}
1372 napi_settings = NeutronAPIContext()()
1373 mtu = napi_settings.get('network_device_mtu')
1374 if mtu:
1375 ctxt['ext_port_mtu'] = mtu
1376
1377 return ctxt
1378
1379
1380class DataPortContext(NeutronPortContext):
1381
1382 def __call__(self):
1383 ports = config('data-port')
1384 if ports:
1385 # Map of {port/mac:bridge}
1386 portmap = parse_data_port_mappings(ports)
1387 ports = portmap.keys()
1388 # Resolve provided ports or mac addresses and filter out those
1389 # already attached to a bridge.
1390 resolved = self.resolve_ports(ports)
1391 # FIXME: is this necessary?
1392 normalized = {get_nic_hwaddr(port): port for port in resolved
1393 if port not in ports}
1394 normalized.update({port: port for port in resolved
1395 if port in ports})
1396 if resolved:
1397 return {normalized[port]: bridge for port, bridge in
1398 six.iteritems(portmap) if port in normalized.keys()}
1399
1400 return None
1401
1402
1403class PhyNICMTUContext(DataPortContext):
1404
1405 def __call__(self):
1406 ctxt = {}
1407 mappings = super(PhyNICMTUContext, self).__call__()
1408 if mappings and mappings.keys():
1409 ports = sorted(mappings.keys())
1410 napi_settings = NeutronAPIContext()()
1411 mtu = napi_settings.get('network_device_mtu')
1412 all_ports = set()
1413 # If any of ports is a vlan device, its underlying device must have
1414 # mtu applied first.
1415 for port in ports:
1416 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1417 lport = os.path.basename(lport)
1418 all_ports.add(lport.split('_')[1])
1419
1420 all_ports = list(all_ports)
1421 all_ports.extend(ports)
1422 if mtu:
1423 ctxt["devs"] = '\\n'.join(all_ports)
1424 ctxt['mtu'] = mtu
1425
1426 return ctxt
1427
1428
1429class NetworkServiceContext(OSContextGenerator):
1430
1431 def __init__(self, rel_name='quantum-network-service'):
1432 self.rel_name = rel_name
1433 self.interfaces = [rel_name]
1434
1435 def __call__(self):
1436 for rid in relation_ids(self.rel_name):
1437 for unit in related_units(rid):
1438 rdata = relation_get(rid=rid, unit=unit)
1439 ctxt = {
1440 'keystone_host': rdata.get('keystone_host'),
1441 'service_port': rdata.get('service_port'),
1442 'auth_port': rdata.get('auth_port'),
1443 'service_tenant': rdata.get('service_tenant'),
1444 'service_username': rdata.get('service_username'),
1445 'service_password': rdata.get('service_password'),
1446 'quantum_host': rdata.get('quantum_host'),
1447 'quantum_port': rdata.get('quantum_port'),
1448 'quantum_url': rdata.get('quantum_url'),
1449 'region': rdata.get('region'),
1450 'service_protocol':
1451 rdata.get('service_protocol') or 'http',
1452 'auth_protocol':
1453 rdata.get('auth_protocol') or 'http',
1454 }
1455 if self.context_complete(ctxt):
1456 return ctxt
1457 return {}
01458
=== added file 'charmhelpers.new/contrib/openstack/ip.py'
--- charmhelpers.new/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/ip.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,151 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from charmhelpers.core.hookenv import (
18 config,
19 unit_get,
20 service_name,
21)
22from charmhelpers.contrib.network.ip import (
23 get_address_in_network,
24 is_address_in_network,
25 is_ipv6,
26 get_ipv6_addr,
27)
28from charmhelpers.contrib.hahelpers.cluster import is_clustered
29
30PUBLIC = 'public'
31INTERNAL = 'int'
32ADMIN = 'admin'
33
34ADDRESS_MAP = {
35 PUBLIC: {
36 'config': 'os-public-network',
37 'fallback': 'public-address',
38 'override': 'os-public-hostname',
39 },
40 INTERNAL: {
41 'config': 'os-internal-network',
42 'fallback': 'private-address',
43 'override': 'os-internal-hostname',
44 },
45 ADMIN: {
46 'config': 'os-admin-network',
47 'fallback': 'private-address',
48 'override': 'os-admin-hostname',
49 }
50}
51
52
53def canonical_url(configs, endpoint_type=PUBLIC):
54 """Returns the correct HTTP URL to this host given the state of HTTPS
55 configuration, hacluster and charm configuration.
56
57 :param configs: OSTemplateRenderer config templating object to inspect
58 for a complete https context.
59 :param endpoint_type: str endpoint type to resolve.
60 :param returns: str base URL for services on the current service unit.
61 """
62 scheme = _get_scheme(configs)
63
64 address = resolve_address(endpoint_type)
65 if is_ipv6(address):
66 address = "[{}]".format(address)
67
68 return '%s://%s' % (scheme, address)
69
70
71def _get_scheme(configs):
72 """Returns the scheme to use for the url (either http or https)
73 depending upon whether https is in the configs value.
74
75 :param configs: OSTemplateRenderer config templating object to inspect
76 for a complete https context.
77 :returns: either 'http' or 'https' depending on whether https is
78 configured within the configs context.
79 """
80 scheme = 'http'
81 if configs and 'https' in configs.complete_contexts():
82 scheme = 'https'
83 return scheme
84
85
86def _get_address_override(endpoint_type=PUBLIC):
87 """Returns any address overrides that the user has defined based on the
88 endpoint type.
89
90 Note: this function allows for the service name to be inserted into the
91 address if the user specifies {service_name}.somehost.org.
92
93 :param endpoint_type: the type of endpoint to retrieve the override
94 value for.
95 :returns: any endpoint address or hostname that the user has overridden
96 or None if an override is not present.
97 """
98 override_key = ADDRESS_MAP[endpoint_type]['override']
99 addr_override = config(override_key)
100 if not addr_override:
101 return None
102 else:
103 return addr_override.format(service_name=service_name())
104
105
106def resolve_address(endpoint_type=PUBLIC):
107 """Return unit address depending on net config.
108
109 If unit is clustered with vip(s) and has net splits defined, return vip on
110 correct network. If clustered with no nets defined, return primary vip.
111
112 If not clustered, return unit address ensuring address is on configured net
113 split if one is configured.
114
115 :param endpoint_type: Network endpoing type
116 """
117 resolved_address = _get_address_override(endpoint_type)
118 if resolved_address:
119 return resolved_address
120
121 vips = config('vip')
122 if vips:
123 vips = vips.split()
124
125 net_type = ADDRESS_MAP[endpoint_type]['config']
126 net_addr = config(net_type)
127 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
128 clustered = is_clustered()
129 if clustered:
130 if not net_addr:
131 # If no net-splits defined, we expect a single vip
132 resolved_address = vips[0]
133 else:
134 for vip in vips:
135 if is_address_in_network(net_addr, vip):
136 resolved_address = vip
137 break
138 else:
139 if config('prefer-ipv6'):
140 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
141 else:
142 fallback_addr = unit_get(net_fallback)
143
144 resolved_address = get_address_in_network(net_addr, fallback_addr)
145
146 if resolved_address is None:
147 raise ValueError("Unable to resolve a suitable IP address based on "
148 "charm state and configuration. (net_type=%s, "
149 "clustered=%s)" % (net_type, clustered))
150
151 return resolved_address
0152
=== added file 'charmhelpers.new/contrib/openstack/neutron.py'
--- charmhelpers.new/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/neutron.py 2015-11-24 19:47:41 +0000
@@ -0,0 +1,370 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Various utilies for dealing with Neutron and the renaming from Quantum.
18
19import six
20from subprocess import check_output
21
22from charmhelpers.core.hookenv import (
23 config,
24 log,
25 ERROR,
26)
27
28from charmhelpers.contrib.openstack.utils import os_release
29
30
31def headers_package():
32 """Ensures correct linux-headers for running kernel are installed,
33 for building DKMS package"""
34 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
35 return 'linux-headers-%s' % kver
36
37QUANTUM_CONF_DIR = '/etc/quantum'
38
39
40def kernel_version():
41 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
42 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
43 kver = kver.split('.')
44 return (int(kver[0]), int(kver[1]))
45
46
47def determine_dkms_package():
48 """ Determine which DKMS package should be used based on kernel version """
49 # NOTE: 3.13 kernels have support for GRE and VXLAN native
50 if kernel_version() >= (3, 13):
51 return []
52 else:
53 return ['openvswitch-datapath-dkms']
54
55
56# legacy
57
58
59def quantum_plugins():
60 from charmhelpers.contrib.openstack import context
61 return {
62 'ovs': {
63 'config': '/etc/quantum/plugins/openvswitch/'
64 'ovs_quantum_plugin.ini',
65 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
66 'OVSQuantumPluginV2',
67 'contexts': [
68 context.SharedDBContext(user=config('neutron-database-user'),
69 database=config('neutron-database'),
70 relation_prefix='neutron',
71 ssl_dir=QUANTUM_CONF_DIR)],
72 'services': ['quantum-plugin-openvswitch-agent'],
73 'packages': [[headers_package()] + determine_dkms_package(),
74 ['quantum-plugin-openvswitch-agent']],
75 'server_packages': ['quantum-server',
76 'quantum-plugin-openvswitch'],
77 'server_services': ['quantum-server']
78 },
79 'nvp': {
80 'config': '/etc/quantum/plugins/nicira/nvp.ini',
81 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
82 'QuantumPlugin.NvpPluginV2',
83 'contexts': [
84 context.SharedDBContext(user=config('neutron-database-user'),
85 database=config('neutron-database'),
86 relation_prefix='neutron',
87 ssl_dir=QUANTUM_CONF_DIR)],
88 'services': [],
89 'packages': [],
90 'server_packages': ['quantum-server',
91 'quantum-plugin-nicira'],
92 'server_services': ['quantum-server']
93 }
94 }
95
96NEUTRON_CONF_DIR = '/etc/neutron'
97
98
99def neutron_plugins():
100 from charmhelpers.contrib.openstack import context
101 release = os_release('nova-common')
102 plugins = {
103 'ovs': {
104 'config': '/etc/neutron/plugins/openvswitch/'
105 'ovs_neutron_plugin.ini',
106 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
107 'OVSNeutronPluginV2',
108 'contexts': [
109 context.SharedDBContext(user=config('neutron-database-user'),
110 database=config('neutron-database'),
111 relation_prefix='neutron',
112 ssl_dir=NEUTRON_CONF_DIR)],
113 'services': ['neutron-plugin-openvswitch-agent'],
114 'packages': [[headers_package()] + determine_dkms_package(),
115 ['neutron-plugin-openvswitch-agent']],
116 'server_packages': ['neutron-server',
117 'neutron-plugin-openvswitch'],
118 'server_services': ['neutron-server']
119 },
120 'nvp': {
121 'config': '/etc/neutron/plugins/nicira/nvp.ini',
122 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
123 'NeutronPlugin.NvpPluginV2',
124 'contexts': [
125 context.SharedDBContext(user=config('neutron-database-user'),
126 database=config('neutron-database'),
127 relation_prefix='neutron',
128 ssl_dir=NEUTRON_CONF_DIR)],
129 'services': [],
130 'packages': [],
131 'server_packages': ['neutron-server',
132 'neutron-plugin-nicira'],
133 'server_services': ['neutron-server']
134 },
135 'nsx': {
136 'config': '/etc/neutron/plugins/vmware/nsx.ini',
137 'driver': 'vmware',
138 'contexts': [
139 context.SharedDBContext(user=config('neutron-database-user'),
140 database=config('neutron-database'),
141 relation_prefix='neutron',
142 ssl_dir=NEUTRON_CONF_DIR)],
143 'services': [],
144 'packages': [],
145 'server_packages': ['neutron-server',
146 'neutron-plugin-vmware'],
147 'server_services': ['neutron-server']
148 },
149 'n1kv': {
150 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
151 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
152 'contexts': [
153 context.SharedDBContext(user=config('neutron-database-user'),
154 database=config('neutron-database'),
155 relation_prefix='neutron',
156 ssl_dir=NEUTRON_CONF_DIR)],
157 'services': [],
158 'packages': [[headers_package()] + determine_dkms_package(),
159 ['neutron-plugin-cisco']],
160 'server_packages': ['neutron-server',
161 'neutron-plugin-cisco'],
162 'server_services': ['neutron-server']
163 },
164 'Calico': {
165 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
166 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
167 'contexts': [
168 context.SharedDBContext(user=config('neutron-database-user'),
169 database=config('neutron-database'),
170 relation_prefix='neutron',
171 ssl_dir=NEUTRON_CONF_DIR)],
172 'services': ['calico-felix',
173 'bird',
174 'neutron-dhcp-agent',
175 'nova-api-metadata',
176 'etcd'],
177 'packages': [[headers_package()] + determine_dkms_package(),
178 ['calico-compute',
179 'bird',
180 'neutron-dhcp-agent',
181 'nova-api-metadata',
182 'etcd']],
183 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
184 'server_services': ['neutron-server', 'etcd']
185 },
186 'vsp': {
187 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
188 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
189 'contexts': [
190 context.SharedDBContext(user=config('neutron-database-user'),
191 database=config('neutron-database'),
192 relation_prefix='neutron',
193 ssl_dir=NEUTRON_CONF_DIR)],
194 'services': [],
195 'packages': [],
196 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
197 'server_services': ['neutron-server']
198 },
199 'plumgrid': {
200 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
201 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
202 'contexts': [
203 context.SharedDBContext(user=config('database-user'),
204 database=config('database'),
205 ssl_dir=NEUTRON_CONF_DIR)],
206 'services': [],
207 'packages': ['plumgrid-lxc',
208 'iovisor-dkms'],
209 'server_packages': ['neutron-server',
210 'neutron-plugin-plumgrid'],
211 'server_services': ['neutron-server']
212 },
213 'midonet': {
214 'config': '/etc/neutron/plugins/midonet/midonet.ini',
215 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
216 'contexts': [
217 context.SharedDBContext(user=config('neutron-database-user'),
218 database=config('neutron-database'),
219 relation_prefix='neutron',
220 ssl_dir=NEUTRON_CONF_DIR)],
221 'services': [],
222 'packages': [[headers_package()] + determine_dkms_package()],
223 'server_packages': ['neutron-server',
224 'python-neutron-plugin-midonet'],
225 'server_services': ['neutron-server']
226 }
227 }
228 if release >= 'icehouse':
229 # NOTE: patch in ml2 plugin for icehouse onwards
230 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
231 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
232 plugins['ovs']['server_packages'] = ['neutron-server',
233 'neutron-plugin-ml2']
234 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
235 plugins['nvp'] = plugins['nsx']
236 return plugins
237
238
239def neutron_plugin_attribute(plugin, attr, net_manager=None):
240 manager = net_manager or network_manager()
241 if manager == 'quantum':
242 plugins = quantum_plugins()
243 elif manager == 'neutron':
244 plugins = neutron_plugins()
245 else:
246 log("Network manager '%s' does not support plugins." % (manager),
247 level=ERROR)
248 raise Exception
249
250 try:
251 _plugin = plugins[plugin]
252 except KeyError:
253 log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
254 raise Exception
255
256 try:
257 return _plugin[attr]
258 except KeyError:
259 return None
260
261
262def network_manager():
263 '''
264 Deals with the renaming of Quantum to Neutron in H and any situations
265 that require compatability (eg, deploying H with network-manager=quantum,
266 upgrading from G).
267 '''
268 release = os_release('nova-common')
269 manager = config('network-manager').lower()
270
271 if manager not in ['quantum', 'neutron']:
272 return manager
273
274 if release in ['essex']:
275 # E does not support neutron
276 log('Neutron networking not supported in Essex.', level=ERROR)
277 raise Exception
278 elif release in ['folsom', 'grizzly']:
279 # neutron is named quantum in F and G
280 return 'quantum'
281 else:
282 # ensure accurate naming for all releases post-H
283 return 'neutron'
284
285
286def parse_mappings(mappings, key_rvalue=False):
287 """By default mappings are lvalue keyed.
288
289 If key_rvalue is True, the mapping will be reversed to allow multiple
290 configs for the same lvalue.
291 """
292 parsed = {}
293 if mappings:
294 mappings = mappings.split()
295 for m in mappings:
296 p = m.partition(':')
297
298 if key_rvalue:
299 key_index = 2
300 val_index = 0
301 # if there is no rvalue skip to next
302 if not p[1]:
303 continue
304 else:
305 key_index = 0
306 val_index = 2
307
308 key = p[key_index].strip()
309 parsed[key] = p[val_index].strip()
310
311 return parsed
312
313
314def parse_bridge_mappings(mappings):
315 """Parse bridge mappings.
316
317 Mappings must be a space-delimited list of provider:bridge mappings.
318
319 Returns dict of the form {provider:bridge}.
320 """
321 return parse_mappings(mappings)
322
323
324def parse_data_port_mappings(mappings, default_bridge='br-data'):
325 """Parse data port mappings.
326
327 Mappings must be a space-delimited list of bridge:port.
328
329 Returns dict of the form {port:bridge} where ports may be mac addresses or
330 interface names.
331 """
332
333 # NOTE(dosaboy): we use rvalue for key to allow multiple values to be
334 # proposed for <port> since it may be a mac address which will differ
335 # across units this allowing first-known-good to be chosen.
336 _mappings = parse_mappings(mappings, key_rvalue=True)
337 if not _mappings or list(_mappings.values()) == ['']:
338 if not mappings:
339 return {}
340
341 # For backwards-compatibility we need to support port-only provided in
342 # config.
343 _mappings = {mappings.split()[0]: default_bridge}
344
345 ports = _mappings.keys()
346 if len(set(ports)) != len(ports):
347 raise Exception("It is not allowed to have the same port configured "
348 "on more than one bridge")
349
350 return _mappings
351
352
353def parse_vlan_range_mappings(mappings):
354 """Parse vlan range mappings.
355
356 Mappings must be a space-delimited list of provider:start:end mappings.
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches