Merge lp:~james-page/charms/trusty/swift-storage/xenial into lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk

Proposed by James Page
Status: Superseded
Proposed branch: lp:~james-page/charms/trusty/swift-storage/xenial
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-storage/trunk
Diff against target: 13142 lines (+12701/-18) (has conflicts)
59 files modified
.testr.conf (+8/-0)
actions.yaml (+6/-0)
actions/actions.py (+108/-0)
actions/openstack_upgrade.py (+34/-0)
charm-helpers-hooks.yaml (+5/-0)
charmhelpers.new/cli/__init__.py (+191/-0)
charmhelpers.new/cli/benchmark.py (+36/-0)
charmhelpers.new/cli/commands.py (+32/-0)
charmhelpers.new/cli/hookenv.py (+23/-0)
charmhelpers.new/cli/host.py (+31/-0)
charmhelpers.new/cli/unitdata.py (+39/-0)
charmhelpers.new/contrib/charmsupport/nrpe.py (+398/-0)
charmhelpers.new/contrib/hahelpers/cluster.py (+316/-0)
charmhelpers.new/contrib/network/ip.py (+458/-0)
charmhelpers.new/contrib/openstack/amulet/deployment.py (+302/-0)
charmhelpers.new/contrib/openstack/amulet/utils.py (+985/-0)
charmhelpers.new/contrib/openstack/context.py (+1477/-0)
charmhelpers.new/contrib/openstack/files/check_haproxy.sh (+34/-0)
charmhelpers.new/contrib/openstack/ip.py (+151/-0)
charmhelpers.new/contrib/openstack/neutron.py (+378/-0)
charmhelpers.new/contrib/openstack/templates/ceph.conf (+21/-0)
charmhelpers.new/contrib/openstack/templates/haproxy.cfg (+66/-0)
charmhelpers.new/contrib/openstack/templating.py (+323/-0)
charmhelpers.new/contrib/openstack/utils.py (+1044/-0)
charmhelpers.new/contrib/python/packages.py (+130/-0)
charmhelpers.new/contrib/storage/linux/ceph.py (+1039/-0)
charmhelpers.new/contrib/storage/linux/loopback.py (+88/-0)
charmhelpers.new/contrib/storage/linux/utils.py (+71/-0)
charmhelpers.new/core/files.py (+45/-0)
charmhelpers.new/core/hookenv.py (+978/-0)
charmhelpers.new/core/host.py (+673/-0)
charmhelpers.new/core/hugepage.py (+71/-0)
charmhelpers.new/core/kernel.py (+68/-0)
charmhelpers.new/core/services/base.py (+353/-0)
charmhelpers.new/core/services/helpers.py (+292/-0)
charmhelpers.new/core/strutils.py (+72/-0)
charmhelpers.new/core/templating.py (+81/-0)
charmhelpers.new/core/unitdata.py (+521/-0)
charmhelpers.new/fetch/__init__.py (+464/-0)
charmhelpers.new/fetch/archiveurl.py (+167/-0)
charmhelpers.new/fetch/bzrurl.py (+68/-0)
charmhelpers.new/fetch/giturl.py (+70/-0)
lib/misc_utils.py (+110/-0)
lib/swift_storage_context.py (+96/-0)
lib/swift_storage_utils.py (+372/-0)
requirements.txt (+11/-0)
test-requirements.txt (+8/-0)
tests/018-basic-trusty-liberty (+11/-0)
tests/019-basic-trusty-mitaka (+11/-0)
tests/019-basic-vivid-kilo (+0/-9)
tests/020-basic-wily-liberty (+9/-0)
tests/021-basic-xenial-mitaka (+9/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+9/-0)
tests/tests.yaml (+21/-0)
tox.ini (+29/-0)
unit_tests/test_actions.py (+220/-0)
unit_tests/test_actions_openstack_upgrade.py (+58/-0)
unit_tests/test_swift_storage_context.py (+1/-5)
unit_tests/test_swift_storage_utils.py (+9/-4)
Conflict adding file .testr.conf.  Moved existing file to .testr.conf.moved.
Path conflict: <deleted> / lib/misc_utils.py
Path conflict: <deleted> / lib/swift_storage_context.py
Path conflict: <deleted> / lib/swift_storage_utils.py
Conflict adding file actions.  Moved existing file to actions.moved.
Conflict adding file actions.yaml.  Moved existing file to actions.yaml.moved.
Text conflict in charm-helpers-hooks.yaml
Conflict adding file charmhelpers.  Moved existing file to charmhelpers.moved.
Conflict: charmhelpers.new is not a directory, but has files in it.  Created directory.
Conflict adding files to charmhelpers.new/contrib.  Created directory.
Conflict because charmhelpers.new/contrib is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/charmsupport.  Created directory.
Conflict because charmhelpers.new/contrib/charmsupport is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/hahelpers.  Created directory.
Conflict because charmhelpers.new/contrib/hahelpers is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/network.  Created directory.
Conflict because charmhelpers.new/contrib/network is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack.  Created directory.
Conflict because charmhelpers.new/contrib/openstack is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/amulet.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/amulet is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/files.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/files is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/openstack/templates.  Created directory.
Conflict because charmhelpers.new/contrib/openstack/templates is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/python.  Created directory.
Conflict because charmhelpers.new/contrib/python is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage.  Created directory.
Conflict because charmhelpers.new/contrib/storage is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/contrib/storage/linux.  Created directory.
Conflict because charmhelpers.new/contrib/storage/linux is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core.  Created directory.
Conflict because charmhelpers.new/core is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/core/services.  Created directory.
Conflict because charmhelpers.new/core/services is not versioned, but has versioned children.  Versioned directory.
Conflict adding files to charmhelpers.new/fetch.  Created directory.
Conflict because charmhelpers.new/fetch is not versioned, but has versioned children.  Versioned directory.
Conflict adding file hooks/install.real.  Moved existing file to hooks/install.real.moved.
Conflict adding file hooks/lib.  Moved existing file to hooks/lib.moved.
Conflict adding file lib.  Moved existing file to lib.moved.
Conflict adding file requirements.txt.  Moved existing file to requirements.txt.moved.
Conflict adding file test-requirements.txt.  Moved existing file to test-requirements.txt.moved.
Conflict adding file tests/018-basic-trusty-liberty.  Moved existing file to tests/018-basic-trusty-liberty.moved.
Conflict adding file tests/019-basic-trusty-mitaka.  Moved existing file to tests/019-basic-trusty-mitaka.moved.
Conflict adding file tests/020-basic-wily-liberty.  Moved existing file to tests/020-basic-wily-liberty.moved.
Conflict adding file tests/021-basic-xenial-mitaka.  Moved existing file to tests/021-basic-xenial-mitaka.moved.
Text conflict in tests/charmhelpers/contrib/openstack/amulet/deployment.py
Conflict adding file tests/setup.  Moved existing file to tests/setup.moved.
Conflict adding file tests/tests.yaml.  Moved existing file to tests/tests.yaml.moved.
Conflict adding file tox.ini.  Moved existing file to tox.ini.moved.
Conflict adding file unit_tests/test_actions.py.  Moved existing file to unit_tests/test_actions.py.moved.
Conflict adding file unit_tests/test_actions_openstack_upgrade.py.  Moved existing file to unit_tests/test_actions_openstack_upgrade.py.moved.
To merge this branch: bzr merge lp:~james-page/charms/trusty/swift-storage/xenial
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+284514@code.launchpad.net

This proposal has been superseded by a proposal from 2016-01-30.

Description of the change

Resync helpers, fixup xenial support.

To post a comment you must log in.
104. By James Page

Tidy lint

105. By James Page

Baseline

106. By James Page

Enable xenial mitaka tests

Unmerged revisions

106. By James Page

Enable xenial mitaka tests

105. By James Page

Baseline

104. By James Page

Tidy lint

103. By James Page

Resync helpers, refactor code to use cpu calcs from charmhelpers

102. By James Page

Fix liberty/mitaka typo from previous test definition update batch.

101. By Corey Bryant

[corey.bryant, r=jamespage] Sync charm-helpers.

100. By Liam Young

Update test combo definitions, remove Vivid deprecated release tests, update bundletester testplan yaml, update tests README.

99. By Liam Young

[corey.bryant, r=gnuoy] Charmhelper sync

98. By James Page

Resync helpers

97. By Corey Bryant

[corey.bryant,r=trivial] Sync charm-helpers.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file '.testr.conf'
--- .testr.conf 1970-01-01 00:00:00 +0000
+++ .testr.conf 2016-01-30 12:38:43 +0000
@@ -0,0 +1,8 @@
1[DEFAULT]
2test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
3 OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
4 OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
5 ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
6
7test_id_option=--load-list $IDFILE
8test_list_option=--list
09
=== renamed file '.testr.conf' => '.testr.conf.moved'
=== added directory 'actions'
=== renamed directory 'actions' => 'actions.moved'
=== added file 'actions.yaml'
--- actions.yaml 1970-01-01 00:00:00 +0000
+++ actions.yaml 2016-01-30 12:38:43 +0000
@@ -0,0 +1,6 @@
1pause:
2 description: Pause the swift-storage unit. This action will stop Swift services.
3resume:
4 description: Resume the swift-storage unit. This action will start Swift services.
5openstack-upgrade:
6 description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True.
07
=== renamed file 'actions.yaml' => 'actions.yaml.moved'
=== added file 'actions/__init__.py'
=== added file 'actions/actions.py'
--- actions/actions.py 1970-01-01 00:00:00 +0000
+++ actions/actions.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,108 @@
1#!/usr/bin/python
2
3import argparse
4import os
5import sys
6import yaml
7
8from charmhelpers.core.host import service_pause, service_resume
9from charmhelpers.core.hookenv import action_fail
10from charmhelpers.core.unitdata import HookData, kv
11from charmhelpers.contrib.openstack.utils import (
12 get_os_codename_package,
13 set_os_workload_status,
14)
15from lib.swift_storage_utils import (
16 assess_status,
17 REQUIRED_INTERFACES,
18 SWIFT_SVCS,
19)
20from hooks.swift_storage_hooks import (
21 CONFIGS,
22)
23
24
25def _get_services():
26 """Return a list of services that need to be (un)paused."""
27 services = SWIFT_SVCS[:]
28 # Before Icehouse there was no swift-container-sync
29 if get_os_codename_package("swift-container") < "icehouse":
30 services.remove("swift-container-sync")
31 return services
32
33
34def get_action_parser(actions_yaml_path, action_name,
35 get_services=_get_services):
36 """Make an argparse.ArgumentParser seeded from actions.yaml definitions."""
37 with open(actions_yaml_path) as fh:
38 doc = yaml.load(fh)[action_name]["description"]
39 parser = argparse.ArgumentParser(description=doc)
40 parser.add_argument("--services", default=get_services())
41 # TODO: Add arguments for params defined in the actions.yaml
42 return parser
43
44
45def pause(args):
46 """Pause all the swift services.
47
48 @raises Exception if any services fail to stop
49 """
50 for service in args.services:
51 stopped = service_pause(service)
52 if not stopped:
53 raise Exception("{} didn't stop cleanly.".format(service))
54 with HookData()():
55 kv().set('unit-paused', True)
56 set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
57 charm_func=assess_status)
58
59
60def resume(args):
61 """Resume all the swift services.
62
63 @raises Exception if any services fail to start
64 """
65 for service in args.services:
66 started = service_resume(service)
67 if not started:
68 raise Exception("{} didn't start cleanly.".format(service))
69 with HookData()():
70 kv().set('unit-paused', False)
71 set_os_workload_status(CONFIGS, REQUIRED_INTERFACES,
72 charm_func=assess_status)
73
74
75# A dictionary of all the defined actions to callables (which take
76# parsed arguments).
77ACTIONS = {"pause": pause, "resume": resume}
78
79
80def main(argv):
81 action_name = _get_action_name()
82 actions_yaml_path = _get_actions_yaml_path()
83 parser = get_action_parser(actions_yaml_path, action_name)
84 args = parser.parse_args(argv)
85 try:
86 action = ACTIONS[action_name]
87 except KeyError:
88 return "Action %s undefined" % action_name
89 else:
90 try:
91 action(args)
92 except Exception as e:
93 action_fail(str(e))
94
95
96def _get_action_name():
97 """Return the name of the action."""
98 return os.path.basename(__file__)
99
100
101def _get_actions_yaml_path():
102 """Return the path to actions.yaml"""
103 cwd = os.path.dirname(__file__)
104 return os.path.join(cwd, "..", "actions.yaml")
105
106
107if __name__ == "__main__":
108 sys.exit(main(sys.argv[1:]))
0109
=== added symlink 'actions/charmhelpers'
=== target is u'../charmhelpers/'
=== added symlink 'actions/hooks'
=== target is u'../hooks'
=== added symlink 'actions/lib'
=== target is u'../lib'
=== added symlink 'actions/openstack-upgrade'
=== target is u'openstack_upgrade.py'
=== added file 'actions/openstack_upgrade.py'
--- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000
+++ actions/openstack_upgrade.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,34 @@
1#!/usr/bin/python
2import sys
3
4sys.path.append('hooks/')
5
6from charmhelpers.contrib.openstack.utils import (
7 do_action_openstack_upgrade,
8)
9
10from swift_storage_hooks import (
11 config_changed,
12 CONFIGS,
13)
14
15from lib.swift_storage_utils import (
16 do_openstack_upgrade,
17)
18
19
20def openstack_upgrade():
21 """Upgrade packages to config-set Openstack version.
22
23 If the charm was installed from source we cannot upgrade it.
24 For backwards compatibility a config flag must be set for this
25 code to run, otherwise a full service level upgrade will fire
26 on config-changed."""
27
28 if (do_action_openstack_upgrade('swift',
29 do_openstack_upgrade,
30 CONFIGS)):
31 config_changed()
32
33if __name__ == '__main__':
34 openstack_upgrade()
035
=== added symlink 'actions/pause'
=== target is u'actions.py'
=== added symlink 'actions/resume'
=== target is u'actions.py'
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2015-10-22 16:09:29 +0000
+++ charm-helpers-hooks.yaml 2016-01-30 12:38:43 +0000
@@ -1,5 +1,10 @@
1<<<<<<< TREE
1branch: lp:~openstack-charmers/charm-helpers/stable2branch: lp:~openstack-charmers/charm-helpers/stable
2destination: charmhelpers3destination: charmhelpers
4=======
5branch: lp:charm-helpers
6destination: charmhelpers
7>>>>>>> MERGE-SOURCE
3include:8include:
4 - core9 - core
5 - cli10 - cli
611
=== renamed directory 'charmhelpers' => 'charmhelpers.moved'
=== renamed symlink 'hooks/charmhelpers' => 'charmhelpers.new'
=== target was u'../charmhelpers/'
=== added directory 'charmhelpers.new/cli'
=== added file 'charmhelpers.new/cli/__init__.py'
--- charmhelpers.new/cli/__init__.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/__init__.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,191 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import inspect
18import argparse
19import sys
20
21from six.moves import zip
22
23import charmhelpers.core.unitdata
24
25
26class OutputFormatter(object):
27 def __init__(self, outfile=sys.stdout):
28 self.formats = (
29 "raw",
30 "json",
31 "py",
32 "yaml",
33 "csv",
34 "tab",
35 )
36 self.outfile = outfile
37
38 def add_arguments(self, argument_parser):
39 formatgroup = argument_parser.add_mutually_exclusive_group()
40 choices = self.supported_formats
41 formatgroup.add_argument("--format", metavar='FMT',
42 help="Select output format for returned data, "
43 "where FMT is one of: {}".format(choices),
44 choices=choices, default='raw')
45 for fmt in self.formats:
46 fmtfunc = getattr(self, fmt)
47 formatgroup.add_argument("-{}".format(fmt[0]),
48 "--{}".format(fmt), action='store_const',
49 const=fmt, dest='format',
50 help=fmtfunc.__doc__)
51
52 @property
53 def supported_formats(self):
54 return self.formats
55
56 def raw(self, output):
57 """Output data as raw string (default)"""
58 if isinstance(output, (list, tuple)):
59 output = '\n'.join(map(str, output))
60 self.outfile.write(str(output))
61
62 def py(self, output):
63 """Output data as a nicely-formatted python data structure"""
64 import pprint
65 pprint.pprint(output, stream=self.outfile)
66
67 def json(self, output):
68 """Output data in JSON format"""
69 import json
70 json.dump(output, self.outfile)
71
72 def yaml(self, output):
73 """Output data in YAML format"""
74 import yaml
75 yaml.safe_dump(output, self.outfile)
76
77 def csv(self, output):
78 """Output data as excel-compatible CSV"""
79 import csv
80 csvwriter = csv.writer(self.outfile)
81 csvwriter.writerows(output)
82
83 def tab(self, output):
84 """Output data in excel-compatible tab-delimited format"""
85 import csv
86 csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
87 csvwriter.writerows(output)
88
89 def format_output(self, output, fmt='raw'):
90 fmtfunc = getattr(self, fmt)
91 fmtfunc(output)
92
93
94class CommandLine(object):
95 argument_parser = None
96 subparsers = None
97 formatter = None
98 exit_code = 0
99
100 def __init__(self):
101 if not self.argument_parser:
102 self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
103 if not self.formatter:
104 self.formatter = OutputFormatter()
105 self.formatter.add_arguments(self.argument_parser)
106 if not self.subparsers:
107 self.subparsers = self.argument_parser.add_subparsers(help='Commands')
108
109 def subcommand(self, command_name=None):
110 """
111 Decorate a function as a subcommand. Use its arguments as the
112 command-line arguments"""
113 def wrapper(decorated):
114 cmd_name = command_name or decorated.__name__
115 subparser = self.subparsers.add_parser(cmd_name,
116 description=decorated.__doc__)
117 for args, kwargs in describe_arguments(decorated):
118 subparser.add_argument(*args, **kwargs)
119 subparser.set_defaults(func=decorated)
120 return decorated
121 return wrapper
122
123 def test_command(self, decorated):
124 """
125 Subcommand is a boolean test function, so bool return values should be
126 converted to a 0/1 exit code.
127 """
128 decorated._cli_test_command = True
129 return decorated
130
131 def no_output(self, decorated):
132 """
133 Subcommand is not expected to return a value, so don't print a spurious None.
134 """
135 decorated._cli_no_output = True
136 return decorated
137
138 def subcommand_builder(self, command_name, description=None):
139 """
140 Decorate a function that builds a subcommand. Builders should accept a
141 single argument (the subparser instance) and return the function to be
142 run as the command."""
143 def wrapper(decorated):
144 subparser = self.subparsers.add_parser(command_name)
145 func = decorated(subparser)
146 subparser.set_defaults(func=func)
147 subparser.description = description or func.__doc__
148 return wrapper
149
150 def run(self):
151 "Run cli, processing arguments and executing subcommands."
152 arguments = self.argument_parser.parse_args()
153 argspec = inspect.getargspec(arguments.func)
154 vargs = []
155 for arg in argspec.args:
156 vargs.append(getattr(arguments, arg))
157 if argspec.varargs:
158 vargs.extend(getattr(arguments, argspec.varargs))
159 output = arguments.func(*vargs)
160 if getattr(arguments.func, '_cli_test_command', False):
161 self.exit_code = 0 if output else 1
162 output = ''
163 if getattr(arguments.func, '_cli_no_output', False):
164 output = ''
165 self.formatter.format_output(output, arguments.format)
166 if charmhelpers.core.unitdata._KV:
167 charmhelpers.core.unitdata._KV.flush()
168
169
170cmdline = CommandLine()
171
172
173def describe_arguments(func):
174 """
175 Analyze a function's signature and return a data structure suitable for
176 passing in as arguments to an argparse parser's add_argument() method."""
177
178 argspec = inspect.getargspec(func)
179 # we should probably raise an exception somewhere if func includes **kwargs
180 if argspec.defaults:
181 positional_args = argspec.args[:-len(argspec.defaults)]
182 keyword_names = argspec.args[-len(argspec.defaults):]
183 for arg, default in zip(keyword_names, argspec.defaults):
184 yield ('--{}'.format(arg),), {'default': default}
185 else:
186 positional_args = argspec.args
187
188 for arg in positional_args:
189 yield (arg,), {}
190 if argspec.varargs:
191 yield (argspec.varargs,), {'nargs': '*'}
0192
=== added file 'charmhelpers.new/cli/benchmark.py'
--- charmhelpers.new/cli/benchmark.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/benchmark.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,36 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.contrib.benchmark import Benchmark
19
20
21@cmdline.subcommand(command_name='benchmark-start')
22def start():
23 Benchmark.start()
24
25
26@cmdline.subcommand(command_name='benchmark-finish')
27def finish():
28 Benchmark.finish()
29
30
31@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
32def service(subparser):
33 subparser.add_argument("value", help="The composite score.")
34 subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
35 subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
36 return Benchmark.set_composite_score
037
=== added file 'charmhelpers.new/cli/commands.py'
--- charmhelpers.new/cli/commands.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/commands.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,32 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""
18This module loads sub-modules into the python runtime so they can be
19discovered via the inspect module. In order to prevent flake8 from (rightfully)
20telling us these are unused modules, throw a ' # noqa' at the end of each import
21so that the warning is suppressed.
22"""
23
24from . import CommandLine # noqa
25
26"""
27Import the sub-modules which have decorated subcommands to register with chlp.
28"""
29from . import host # noqa
30from . import benchmark # noqa
31from . import unitdata # noqa
32from . import hookenv # noqa
033
=== added file 'charmhelpers.new/cli/hookenv.py'
--- charmhelpers.new/cli/hookenv.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/hookenv.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,23 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import hookenv
19
20
21cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
22cmdline.subcommand('service-name')(hookenv.service_name)
23cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
024
=== added file 'charmhelpers.new/cli/host.py'
--- charmhelpers.new/cli/host.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/host.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,31 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import host
19
20
21@cmdline.subcommand()
22def mounts():
23 "List mounts"
24 return host.mounts()
25
26
27@cmdline.subcommand_builder('service', description="Control system services")
28def service(subparser):
29 subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
30 subparser.add_argument("service_name", help="Name of the service to control")
31 return host.service
032
=== added file 'charmhelpers.new/cli/unitdata.py'
--- charmhelpers.new/cli/unitdata.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/cli/unitdata.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,39 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from . import cmdline
18from charmhelpers.core import unitdata
19
20
21@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
22def unitdata_cmd(subparser):
23 nested = subparser.add_subparsers()
24 get_cmd = nested.add_parser('get', help='Retrieve data')
25 get_cmd.add_argument('key', help='Key to retrieve the value of')
26 get_cmd.set_defaults(action='get', value=None)
27 set_cmd = nested.add_parser('set', help='Store data')
28 set_cmd.add_argument('key', help='Key to set')
29 set_cmd.add_argument('value', help='Value to store')
30 set_cmd.set_defaults(action='set')
31
32 def _unitdata_cmd(action, key, value):
33 if action == 'get':
34 return unitdata.kv().get(key)
35 elif action == 'set':
36 unitdata.kv().set(key, value)
37 unitdata.kv().flush()
38 return ''
39 return _unitdata_cmd
040
=== added directory 'charmhelpers.new/contrib'
=== added directory 'charmhelpers.new/contrib/charmsupport'
=== added file 'charmhelpers.new/contrib/charmsupport/nrpe.py'
--- charmhelpers.new/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/charmsupport/nrpe.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,398 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17"""Compatibility with the nrpe-external-master charm"""
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# Matthew Wedgwood <matthew.wedgwood@canonical.com>
22
23import subprocess
24import pwd
25import grp
26import os
27import glob
28import shutil
29import re
30import shlex
31import yaml
32
33from charmhelpers.core.hookenv import (
34 config,
35 local_unit,
36 log,
37 relation_ids,
38 relation_set,
39 relations_of_type,
40)
41
42from charmhelpers.core.host import service
43
44# This module adds compatibility with the nrpe-external-master and plain nrpe
45# subordinate charms. To use it in your charm:
46#
47# 1. Update metadata.yaml
48#
49# provides:
50# (...)
51# nrpe-external-master:
52# interface: nrpe-external-master
53# scope: container
54#
55# and/or
56#
57# provides:
58# (...)
59# local-monitors:
60# interface: local-monitors
61# scope: container
62
63#
64# 2. Add the following to config.yaml
65#
66# nagios_context:
67# default: "juju"
68# type: string
69# description: |
70# Used by the nrpe subordinate charms.
71# A string that will be prepended to instance name to set the host name
72# in nagios. So for instance the hostname would be something like:
73# juju-myservice-0
74# If you're running multiple environments with the same services in them
75# this allows you to differentiate between them.
76# nagios_servicegroups:
77# default: ""
78# type: string
79# description: |
80# A comma-separated list of nagios servicegroups.
81# If left empty, the nagios_context will be used as the servicegroup
82#
83# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
84#
85# 4. Update your hooks.py with something like this:
86#
87# from charmsupport.nrpe import NRPE
88# (...)
89# def update_nrpe_config():
90# nrpe_compat = NRPE()
91# nrpe_compat.add_check(
92# shortname = "myservice",
93# description = "Check MyService",
94# check_cmd = "check_http -w 2 -c 10 http://localhost"
95# )
96# nrpe_compat.add_check(
97# "myservice_other",
98# "Check for widget failures",
99# check_cmd = "/srv/myapp/scripts/widget_check"
100# )
101# nrpe_compat.write()
102#
103# def config_changed():
104# (...)
105# update_nrpe_config()
106#
107# def nrpe_external_master_relation_changed():
108# update_nrpe_config()
109#
110# def local_monitors_relation_changed():
111# update_nrpe_config()
112#
113# 5. ln -s hooks.py nrpe-external-master-relation-changed
114# ln -s hooks.py local-monitors-relation-changed
115
116
117class CheckException(Exception):
118 pass
119
120
121class Check(object):
122 shortname_re = '[A-Za-z0-9-_]+$'
123 service_template = ("""
124#---------------------------------------------------
125# This file is Juju managed
126#---------------------------------------------------
127define service {{
128 use active-service
129 host_name {nagios_hostname}
130 service_description {nagios_hostname}[{shortname}] """
131 """{description}
132 check_command check_nrpe!{command}
133 servicegroups {nagios_servicegroup}
134}}
135""")
136
137 def __init__(self, shortname, description, check_cmd):
138 super(Check, self).__init__()
139 # XXX: could be better to calculate this from the service name
140 if not re.match(self.shortname_re, shortname):
141 raise CheckException("shortname must match {}".format(
142 Check.shortname_re))
143 self.shortname = shortname
144 self.command = "check_{}".format(shortname)
145 # Note: a set of invalid characters is defined by the
146 # Nagios server config
147 # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
148 self.description = description
149 self.check_cmd = self._locate_cmd(check_cmd)
150
151 def _get_check_filename(self):
152 return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
153
154 def _get_service_filename(self, hostname):
155 return os.path.join(NRPE.nagios_exportdir,
156 'service__{}_{}.cfg'.format(hostname, self.command))
157
158 def _locate_cmd(self, check_cmd):
159 search_path = (
160 '/usr/lib/nagios/plugins',
161 '/usr/local/lib/nagios/plugins',
162 )
163 parts = shlex.split(check_cmd)
164 for path in search_path:
165 if os.path.exists(os.path.join(path, parts[0])):
166 command = os.path.join(path, parts[0])
167 if len(parts) > 1:
168 command += " " + " ".join(parts[1:])
169 return command
170 log('Check command not found: {}'.format(parts[0]))
171 return ''
172
173 def _remove_service_files(self):
174 if not os.path.exists(NRPE.nagios_exportdir):
175 return
176 for f in os.listdir(NRPE.nagios_exportdir):
177 if f.endswith('_{}.cfg'.format(self.command)):
178 os.remove(os.path.join(NRPE.nagios_exportdir, f))
179
180 def remove(self, hostname):
181 nrpe_check_file = self._get_check_filename()
182 if os.path.exists(nrpe_check_file):
183 os.remove(nrpe_check_file)
184 self._remove_service_files()
185
186 def write(self, nagios_context, hostname, nagios_servicegroups):
187 nrpe_check_file = self._get_check_filename()
188 with open(nrpe_check_file, 'w') as nrpe_check_config:
189 nrpe_check_config.write("# check {}\n".format(self.shortname))
190 nrpe_check_config.write("command[{}]={}\n".format(
191 self.command, self.check_cmd))
192
193 if not os.path.exists(NRPE.nagios_exportdir):
194 log('Not writing service config as {} is not accessible'.format(
195 NRPE.nagios_exportdir))
196 else:
197 self.write_service_config(nagios_context, hostname,
198 nagios_servicegroups)
199
200 def write_service_config(self, nagios_context, hostname,
201 nagios_servicegroups):
202 self._remove_service_files()
203
204 templ_vars = {
205 'nagios_hostname': hostname,
206 'nagios_servicegroup': nagios_servicegroups,
207 'description': self.description,
208 'shortname': self.shortname,
209 'command': self.command,
210 }
211 nrpe_service_text = Check.service_template.format(**templ_vars)
212 nrpe_service_file = self._get_service_filename(hostname)
213 with open(nrpe_service_file, 'w') as nrpe_service_config:
214 nrpe_service_config.write(str(nrpe_service_text))
215
216 def run(self):
217 subprocess.call(self.check_cmd)
218
219
220class NRPE(object):
221 nagios_logdir = '/var/log/nagios'
222 nagios_exportdir = '/var/lib/nagios/export'
223 nrpe_confdir = '/etc/nagios/nrpe.d'
224
225 def __init__(self, hostname=None):
226 super(NRPE, self).__init__()
227 self.config = config()
228 self.nagios_context = self.config['nagios_context']
229 if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
230 self.nagios_servicegroups = self.config['nagios_servicegroups']
231 else:
232 self.nagios_servicegroups = self.nagios_context
233 self.unit_name = local_unit().replace('/', '-')
234 if hostname:
235 self.hostname = hostname
236 else:
237 nagios_hostname = get_nagios_hostname()
238 if nagios_hostname:
239 self.hostname = nagios_hostname
240 else:
241 self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
242 self.checks = []
243
244 def add_check(self, *args, **kwargs):
245 self.checks.append(Check(*args, **kwargs))
246
247 def remove_check(self, *args, **kwargs):
248 if kwargs.get('shortname') is None:
249 raise ValueError('shortname of check must be specified')
250
251 # Use sensible defaults if they're not specified - these are not
252 # actually used during removal, but they're required for constructing
253 # the Check object; check_disk is chosen because it's part of the
254 # nagios-plugins-basic package.
255 if kwargs.get('check_cmd') is None:
256 kwargs['check_cmd'] = 'check_disk'
257 if kwargs.get('description') is None:
258 kwargs['description'] = ''
259
260 check = Check(*args, **kwargs)
261 check.remove(self.hostname)
262
263 def write(self):
264 try:
265 nagios_uid = pwd.getpwnam('nagios').pw_uid
266 nagios_gid = grp.getgrnam('nagios').gr_gid
267 except:
268 log("Nagios user not set up, nrpe checks not updated")
269 return
270
271 if not os.path.exists(NRPE.nagios_logdir):
272 os.mkdir(NRPE.nagios_logdir)
273 os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
274
275 nrpe_monitors = {}
276 monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
277 for nrpecheck in self.checks:
278 nrpecheck.write(self.nagios_context, self.hostname,
279 self.nagios_servicegroups)
280 nrpe_monitors[nrpecheck.shortname] = {
281 "command": nrpecheck.command,
282 }
283
284 service('restart', 'nagios-nrpe-server')
285
286 monitor_ids = relation_ids("local-monitors") + \
287 relation_ids("nrpe-external-master")
288 for rid in monitor_ids:
289 relation_set(relation_id=rid, monitors=yaml.dump(monitors))
290
291
292def get_nagios_hostcontext(relation_name='nrpe-external-master'):
293 """
294 Query relation with nrpe subordinate, return the nagios_host_context
295
296 :param str relation_name: Name of relation nrpe sub joined to
297 """
298 for rel in relations_of_type(relation_name):
299 if 'nagios_host_context' in rel:
300 return rel['nagios_host_context']
301
302
303def get_nagios_hostname(relation_name='nrpe-external-master'):
304 """
305 Query relation with nrpe subordinate, return the nagios_hostname
306
307 :param str relation_name: Name of relation nrpe sub joined to
308 """
309 for rel in relations_of_type(relation_name):
310 if 'nagios_hostname' in rel:
311 return rel['nagios_hostname']
312
313
314def get_nagios_unit_name(relation_name='nrpe-external-master'):
315 """
316 Return the nagios unit name prepended with host_context if needed
317
318 :param str relation_name: Name of relation nrpe sub joined to
319 """
320 host_context = get_nagios_hostcontext(relation_name)
321 if host_context:
322 unit = "%s:%s" % (host_context, local_unit())
323 else:
324 unit = local_unit()
325 return unit
326
327
328def add_init_service_checks(nrpe, services, unit_name):
329 """
330 Add checks for each service in list
331
332 :param NRPE nrpe: NRPE object to add check to
333 :param list services: List of services to check
334 :param str unit_name: Unit name to use in check description
335 """
336 for svc in services:
337 upstart_init = '/etc/init/%s.conf' % svc
338 sysv_init = '/etc/init.d/%s' % svc
339 if os.path.exists(upstart_init):
340 # Don't add a check for these services from neutron-gateway
341 if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
342 nrpe.add_check(
343 shortname=svc,
344 description='process check {%s}' % unit_name,
345 check_cmd='check_upstart_job %s' % svc
346 )
347 elif os.path.exists(sysv_init):
348 cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
349 cron_file = ('*/5 * * * * root '
350 '/usr/local/lib/nagios/plugins/check_exit_status.pl '
351 '-s /etc/init.d/%s status > '
352 '/var/lib/nagios/service-check-%s.txt\n' % (svc,
353 svc)
354 )
355 f = open(cronpath, 'w')
356 f.write(cron_file)
357 f.close()
358 nrpe.add_check(
359 shortname=svc,
360 description='process check {%s}' % unit_name,
361 check_cmd='check_status_file.py -f '
362 '/var/lib/nagios/service-check-%s.txt' % svc,
363 )
364
365
366def copy_nrpe_checks():
367 """
368 Copy the nrpe checks into place
369
370 """
371 NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
372 nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
373 'charmhelpers', 'contrib', 'openstack',
374 'files')
375
376 if not os.path.exists(NAGIOS_PLUGINS):
377 os.makedirs(NAGIOS_PLUGINS)
378 for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
379 if os.path.isfile(fname):
380 shutil.copy2(fname,
381 os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
382
383
384def add_haproxy_checks(nrpe, unit_name):
385 """
386 Add checks for each service in list
387
388 :param NRPE nrpe: NRPE object to add check to
389 :param str unit_name: Unit name to use in check description
390 """
391 nrpe.add_check(
392 shortname='haproxy_servers',
393 description='Check HAProxy {%s}' % unit_name,
394 check_cmd='check_haproxy.sh')
395 nrpe.add_check(
396 shortname='haproxy_queue',
397 description='Check HAProxy queue depth {%s}' % unit_name,
398 check_cmd='check_haproxy_queue_depth.sh')
0399
=== added directory 'charmhelpers.new/contrib/hahelpers'
=== added file 'charmhelpers.new/contrib/hahelpers/cluster.py'
--- charmhelpers.new/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/hahelpers/cluster.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,316 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17#
18# Copyright 2012 Canonical Ltd.
19#
20# Authors:
21# James Page <james.page@ubuntu.com>
22# Adam Gandelman <adamg@ubuntu.com>
23#
24
25"""
26Helpers for clustering and determining "cluster leadership" and other
27clustering-related helpers.
28"""
29
30import subprocess
31import os
32
33from socket import gethostname as get_unit_hostname
34
35import six
36
37from charmhelpers.core.hookenv import (
38 log,
39 relation_ids,
40 related_units as relation_list,
41 relation_get,
42 config as config_get,
43 INFO,
44 ERROR,
45 WARNING,
46 unit_get,
47 is_leader as juju_is_leader
48)
49from charmhelpers.core.decorators import (
50 retry_on_exception,
51)
52from charmhelpers.core.strutils import (
53 bool_from_string,
54)
55
56DC_RESOURCE_NAME = 'DC'
57
58
59class HAIncompleteConfig(Exception):
60 pass
61
62
63class CRMResourceNotFound(Exception):
64 pass
65
66
67class CRMDCNotFound(Exception):
68 pass
69
70
71def is_elected_leader(resource):
72 """
73 Returns True if the charm executing this is the elected cluster leader.
74
75 It relies on two mechanisms to determine leadership:
76 1. If juju is sufficiently new and leadership election is supported,
77 the is_leader command will be used.
78 2. If the charm is part of a corosync cluster, call corosync to
79 determine leadership.
80 3. If the charm is not part of a corosync cluster, the leader is
81 determined as being "the alive unit with the lowest unit numer". In
82 other words, the oldest surviving unit.
83 """
84 try:
85 return juju_is_leader()
86 except NotImplementedError:
87 log('Juju leadership election feature not enabled'
88 ', using fallback support',
89 level=WARNING)
90
91 if is_clustered():
92 if not is_crm_leader(resource):
93 log('Deferring action to CRM leader.', level=INFO)
94 return False
95 else:
96 peers = peer_units()
97 if peers and not oldest_peer(peers):
98 log('Deferring action to oldest service unit.', level=INFO)
99 return False
100 return True
101
102
103def is_clustered():
104 for r_id in (relation_ids('ha') or []):
105 for unit in (relation_list(r_id) or []):
106 clustered = relation_get('clustered',
107 rid=r_id,
108 unit=unit)
109 if clustered:
110 return True
111 return False
112
113
114def is_crm_dc():
115 """
116 Determine leadership by querying the pacemaker Designated Controller
117 """
118 cmd = ['crm', 'status']
119 try:
120 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
121 if not isinstance(status, six.text_type):
122 status = six.text_type(status, "utf-8")
123 except subprocess.CalledProcessError as ex:
124 raise CRMDCNotFound(str(ex))
125
126 current_dc = ''
127 for line in status.split('\n'):
128 if line.startswith('Current DC'):
129 # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
130 current_dc = line.split(':')[1].split()[0]
131 if current_dc == get_unit_hostname():
132 return True
133 elif current_dc == 'NONE':
134 raise CRMDCNotFound('Current DC: NONE')
135
136 return False
137
138
139@retry_on_exception(5, base_delay=2,
140 exc_type=(CRMResourceNotFound, CRMDCNotFound))
141def is_crm_leader(resource, retry=False):
142 """
143 Returns True if the charm calling this is the elected corosync leader,
144 as returned by calling the external "crm" command.
145
146 We allow this operation to be retried to avoid the possibility of getting a
147 false negative. See LP #1396246 for more info.
148 """
149 if resource == DC_RESOURCE_NAME:
150 return is_crm_dc()
151 cmd = ['crm', 'resource', 'show', resource]
152 try:
153 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
154 if not isinstance(status, six.text_type):
155 status = six.text_type(status, "utf-8")
156 except subprocess.CalledProcessError:
157 status = None
158
159 if status and get_unit_hostname() in status:
160 return True
161
162 if status and "resource %s is NOT running" % (resource) in status:
163 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
164
165 return False
166
167
168def is_leader(resource):
169 log("is_leader is deprecated. Please consider using is_crm_leader "
170 "instead.", level=WARNING)
171 return is_crm_leader(resource)
172
173
174def peer_units(peer_relation="cluster"):
175 peers = []
176 for r_id in (relation_ids(peer_relation) or []):
177 for unit in (relation_list(r_id) or []):
178 peers.append(unit)
179 return peers
180
181
182def peer_ips(peer_relation='cluster', addr_key='private-address'):
183 '''Return a dict of peers and their private-address'''
184 peers = {}
185 for r_id in relation_ids(peer_relation):
186 for unit in relation_list(r_id):
187 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
188 return peers
189
190
191def oldest_peer(peers):
192 """Determines who the oldest peer is by comparing unit numbers."""
193 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
194 for peer in peers:
195 remote_unit_no = int(peer.split('/')[1])
196 if remote_unit_no < local_unit_no:
197 return False
198 return True
199
200
201def eligible_leader(resource):
202 log("eligible_leader is deprecated. Please consider using "
203 "is_elected_leader instead.", level=WARNING)
204 return is_elected_leader(resource)
205
206
207def https():
208 '''
209 Determines whether enough data has been provided in configuration
210 or relation data to configure HTTPS
211 .
212 returns: boolean
213 '''
214 use_https = config_get('use-https')
215 if use_https and bool_from_string(use_https):
216 return True
217 if config_get('ssl_cert') and config_get('ssl_key'):
218 return True
219 for r_id in relation_ids('identity-service'):
220 for unit in relation_list(r_id):
221 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
222 rel_state = [
223 relation_get('https_keystone', rid=r_id, unit=unit),
224 relation_get('ca_cert', rid=r_id, unit=unit),
225 ]
226 # NOTE: works around (LP: #1203241)
227 if (None not in rel_state) and ('' not in rel_state):
228 return True
229 return False
230
231
232def determine_api_port(public_port, singlenode_mode=False):
233 '''
234 Determine correct API server listening port based on
235 existence of HTTPS reverse proxy and/or haproxy.
236
237 public_port: int: standard public port for given service
238
239 singlenode_mode: boolean: Shuffle ports when only a single unit is present
240
241 returns: int: the correct listening port for the API service
242 '''
243 i = 0
244 if singlenode_mode:
245 i += 1
246 elif len(peer_units()) > 0 or is_clustered():
247 i += 1
248 if https():
249 i += 1
250 return public_port - (i * 10)
251
252
253def determine_apache_port(public_port, singlenode_mode=False):
254 '''
255 Description: Determine correct apache listening port based on public IP +
256 state of the cluster.
257
258 public_port: int: standard public port for given service
259
260 singlenode_mode: boolean: Shuffle ports when only a single unit is present
261
262 returns: int: the correct listening port for the HAProxy service
263 '''
264 i = 0
265 if singlenode_mode:
266 i += 1
267 elif len(peer_units()) > 0 or is_clustered():
268 i += 1
269 return public_port - (i * 10)
270
271
272def get_hacluster_config(exclude_keys=None):
273 '''
274 Obtains all relevant configuration from charm configuration required
275 for initiating a relation to hacluster:
276
277 ha-bindiface, ha-mcastport, vip
278
279 param: exclude_keys: list of setting key(s) to be excluded.
280 returns: dict: A dict containing settings keyed by setting name.
281 raises: HAIncompleteConfig if settings are missing.
282 '''
283 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
284 conf = {}
285 for setting in settings:
286 if exclude_keys and setting in exclude_keys:
287 continue
288
289 conf[setting] = config_get(setting)
290 missing = []
291 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
292 if missing:
293 log('Insufficient config data to configure hacluster.', level=ERROR)
294 raise HAIncompleteConfig
295 return conf
296
297
298def canonical_url(configs, vip_setting='vip'):
299 '''
300 Returns the correct HTTP URL to this host given the state of HTTPS
301 configuration and hacluster.
302
303 :configs : OSTemplateRenderer: A config tempating object to inspect for
304 a complete https context.
305
306 :vip_setting: str: Setting in charm config that specifies
307 VIP address.
308 '''
309 scheme = 'http'
310 if 'https' in configs.complete_contexts():
311 scheme = 'https'
312 if is_clustered():
313 addr = config_get(vip_setting)
314 else:
315 addr = unit_get('private-address')
316 return '%s://%s' % (scheme, addr)
0317
=== added directory 'charmhelpers.new/contrib/network'
=== added file 'charmhelpers.new/contrib/network/ip.py'
--- charmhelpers.new/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/network/ip.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,458 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import re
19import subprocess
20import six
21import socket
22
23from functools import partial
24
25from charmhelpers.core.hookenv import unit_get
26from charmhelpers.fetch import apt_install, apt_update
27from charmhelpers.core.hookenv import (
28 log,
29 WARNING,
30)
31
32try:
33 import netifaces
34except ImportError:
35 apt_update(fatal=True)
36 apt_install('python-netifaces', fatal=True)
37 import netifaces
38
39try:
40 import netaddr
41except ImportError:
42 apt_update(fatal=True)
43 apt_install('python-netaddr', fatal=True)
44 import netaddr
45
46
47def _validate_cidr(network):
48 try:
49 netaddr.IPNetwork(network)
50 except (netaddr.core.AddrFormatError, ValueError):
51 raise ValueError("Network (%s) is not in CIDR presentation format" %
52 network)
53
54
55def no_ip_found_error_out(network):
56 errmsg = ("No IP address found in network(s): %s" % network)
57 raise ValueError(errmsg)
58
59
60def get_address_in_network(network, fallback=None, fatal=False):
61 """Get an IPv4 or IPv6 address within the network from the host.
62
63 :param network (str): CIDR presentation format. For example,
64 '192.168.1.0/24'. Supports multiple networks as a space-delimited list.
65 :param fallback (str): If no address is found, return fallback.
66 :param fatal (boolean): If no address is found, fallback is not
67 set and fatal is True then exit(1).
68 """
69 if network is None:
70 if fallback is not None:
71 return fallback
72
73 if fatal:
74 no_ip_found_error_out(network)
75 else:
76 return None
77
78 networks = network.split() or [network]
79 for network in networks:
80 _validate_cidr(network)
81 network = netaddr.IPNetwork(network)
82 for iface in netifaces.interfaces():
83 addresses = netifaces.ifaddresses(iface)
84 if network.version == 4 and netifaces.AF_INET in addresses:
85 addr = addresses[netifaces.AF_INET][0]['addr']
86 netmask = addresses[netifaces.AF_INET][0]['netmask']
87 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
88 if cidr in network:
89 return str(cidr.ip)
90
91 if network.version == 6 and netifaces.AF_INET6 in addresses:
92 for addr in addresses[netifaces.AF_INET6]:
93 if not addr['addr'].startswith('fe80'):
94 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
95 addr['netmask']))
96 if cidr in network:
97 return str(cidr.ip)
98
99 if fallback is not None:
100 return fallback
101
102 if fatal:
103 no_ip_found_error_out(network)
104
105 return None
106
107
108def is_ipv6(address):
109 """Determine whether provided address is IPv6 or not."""
110 try:
111 address = netaddr.IPAddress(address)
112 except netaddr.AddrFormatError:
113 # probably a hostname - so not an address at all!
114 return False
115
116 return address.version == 6
117
118
119def is_address_in_network(network, address):
120 """
121 Determine whether the provided address is within a network range.
122
123 :param network (str): CIDR presentation format. For example,
124 '192.168.1.0/24'.
125 :param address: An individual IPv4 or IPv6 address without a net
126 mask or subnet prefix. For example, '192.168.1.1'.
127 :returns boolean: Flag indicating whether address is in network.
128 """
129 try:
130 network = netaddr.IPNetwork(network)
131 except (netaddr.core.AddrFormatError, ValueError):
132 raise ValueError("Network (%s) is not in CIDR presentation format" %
133 network)
134
135 try:
136 address = netaddr.IPAddress(address)
137 except (netaddr.core.AddrFormatError, ValueError):
138 raise ValueError("Address (%s) is not in correct presentation format" %
139 address)
140
141 if address in network:
142 return True
143 else:
144 return False
145
146
147def _get_for_address(address, key):
148 """Retrieve an attribute of or the physical interface that
149 the IP address provided could be bound to.
150
151 :param address (str): An individual IPv4 or IPv6 address without a net
152 mask or subnet prefix. For example, '192.168.1.1'.
153 :param key: 'iface' for the physical interface name or an attribute
154 of the configured interface, for example 'netmask'.
155 :returns str: Requested attribute or None if address is not bindable.
156 """
157 address = netaddr.IPAddress(address)
158 for iface in netifaces.interfaces():
159 addresses = netifaces.ifaddresses(iface)
160 if address.version == 4 and netifaces.AF_INET in addresses:
161 addr = addresses[netifaces.AF_INET][0]['addr']
162 netmask = addresses[netifaces.AF_INET][0]['netmask']
163 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
164 cidr = network.cidr
165 if address in cidr:
166 if key == 'iface':
167 return iface
168 else:
169 return addresses[netifaces.AF_INET][0][key]
170
171 if address.version == 6 and netifaces.AF_INET6 in addresses:
172 for addr in addresses[netifaces.AF_INET6]:
173 if not addr['addr'].startswith('fe80'):
174 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
175 addr['netmask']))
176 cidr = network.cidr
177 if address in cidr:
178 if key == 'iface':
179 return iface
180 elif key == 'netmask' and cidr:
181 return str(cidr).split('/')[1]
182 else:
183 return addr[key]
184
185 return None
186
187
188get_iface_for_address = partial(_get_for_address, key='iface')
189
190
191get_netmask_for_address = partial(_get_for_address, key='netmask')
192
193
194def format_ipv6_addr(address):
195 """If address is IPv6, wrap it in '[]' otherwise return None.
196
197 This is required by most configuration files when specifying IPv6
198 addresses.
199 """
200 if is_ipv6(address):
201 return "[%s]" % address
202
203 return None
204
205
206def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
207 fatal=True, exc_list=None):
208 """Return the assigned IP address for a given interface, if any."""
209 # Extract nic if passed /dev/ethX
210 if '/' in iface:
211 iface = iface.split('/')[-1]
212
213 if not exc_list:
214 exc_list = []
215
216 try:
217 inet_num = getattr(netifaces, inet_type)
218 except AttributeError:
219 raise Exception("Unknown inet type '%s'" % str(inet_type))
220
221 interfaces = netifaces.interfaces()
222 if inc_aliases:
223 ifaces = []
224 for _iface in interfaces:
225 if iface == _iface or _iface.split(':')[0] == iface:
226 ifaces.append(_iface)
227
228 if fatal and not ifaces:
229 raise Exception("Invalid interface '%s'" % iface)
230
231 ifaces.sort()
232 else:
233 if iface not in interfaces:
234 if fatal:
235 raise Exception("Interface '%s' not found " % (iface))
236 else:
237 return []
238
239 else:
240 ifaces = [iface]
241
242 addresses = []
243 for netiface in ifaces:
244 net_info = netifaces.ifaddresses(netiface)
245 if inet_num in net_info:
246 for entry in net_info[inet_num]:
247 if 'addr' in entry and entry['addr'] not in exc_list:
248 addresses.append(entry['addr'])
249
250 if fatal and not addresses:
251 raise Exception("Interface '%s' doesn't have any %s addresses." %
252 (iface, inet_type))
253
254 return sorted(addresses)
255
256
257get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
258
259
260def get_iface_from_addr(addr):
261 """Work out on which interface the provided address is configured."""
262 for iface in netifaces.interfaces():
263 addresses = netifaces.ifaddresses(iface)
264 for inet_type in addresses:
265 for _addr in addresses[inet_type]:
266 _addr = _addr['addr']
267 # link local
268 ll_key = re.compile("(.+)%.*")
269 raw = re.match(ll_key, _addr)
270 if raw:
271 _addr = raw.group(1)
272
273 if _addr == addr:
274 log("Address '%s' is configured on iface '%s'" %
275 (addr, iface))
276 return iface
277
278 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
279 raise Exception(msg)
280
281
282def sniff_iface(f):
283 """Ensure decorated function is called with a value for iface.
284
285 If no iface provided, inject net iface inferred from unit private address.
286 """
287 def iface_sniffer(*args, **kwargs):
288 if not kwargs.get('iface', None):
289 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
290
291 return f(*args, **kwargs)
292
293 return iface_sniffer
294
295
296@sniff_iface
297def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
298 dynamic_only=True):
299 """Get assigned IPv6 address for a given interface.
300
301 Returns list of addresses found. If no address found, returns empty list.
302
303 If iface is None, we infer the current primary interface by doing a reverse
304 lookup on the unit private-address.
305
306 We currently only support scope global IPv6 addresses i.e. non-temporary
307 addresses. If no global IPv6 address is found, return the first one found
308 in the ipv6 address list.
309 """
310 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
311 inc_aliases=inc_aliases, fatal=fatal,
312 exc_list=exc_list)
313
314 if addresses:
315 global_addrs = []
316 for addr in addresses:
317 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
318 m = re.match(key_scope_link_local, addr)
319 if m:
320 eui_64_mac = m.group(1)
321 iface = m.group(2)
322 else:
323 global_addrs.append(addr)
324
325 if global_addrs:
326 # Make sure any found global addresses are not temporary
327 cmd = ['ip', 'addr', 'show', iface]
328 out = subprocess.check_output(cmd).decode('UTF-8')
329 if dynamic_only:
330 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
331 else:
332 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
333
334 addrs = []
335 for line in out.split('\n'):
336 line = line.strip()
337 m = re.match(key, line)
338 if m and 'temporary' not in line:
339 # Return the first valid address we find
340 for addr in global_addrs:
341 if m.group(1) == addr:
342 if not dynamic_only or \
343 m.group(1).endswith(eui_64_mac):
344 addrs.append(addr)
345
346 if addrs:
347 return addrs
348
349 if fatal:
350 raise Exception("Interface '%s' does not have a scope global "
351 "non-temporary ipv6 address." % iface)
352
353 return []
354
355
356def get_bridges(vnic_dir='/sys/devices/virtual/net'):
357 """Return a list of bridges on the system."""
358 b_regex = "%s/*/bridge" % vnic_dir
359 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
360
361
362def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
363 """Return a list of nics comprising a given bridge on the system."""
364 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
365 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
366
367
368def is_bridge_member(nic):
369 """Check if a given nic is a member of a bridge."""
370 for bridge in get_bridges():
371 if nic in get_bridge_nics(bridge):
372 return True
373
374 return False
375
376
377def is_ip(address):
378 """
379 Returns True if address is a valid IP address.
380 """
381 try:
382 # Test to see if already an IPv4 address
383 socket.inet_aton(address)
384 return True
385 except socket.error:
386 return False
387
388
389def ns_query(address):
390 try:
391 import dns.resolver
392 except ImportError:
393 apt_install('python-dnspython')
394 import dns.resolver
395
396 if isinstance(address, dns.name.Name):
397 rtype = 'PTR'
398 elif isinstance(address, six.string_types):
399 rtype = 'A'
400 else:
401 return None
402
403 answers = dns.resolver.query(address, rtype)
404 if answers:
405 return str(answers[0])
406 return None
407
408
409def get_host_ip(hostname, fallback=None):
410 """
411 Resolves the IP for a given hostname, or returns
412 the input if it is already an IP.
413 """
414 if is_ip(hostname):
415 return hostname
416
417 ip_addr = ns_query(hostname)
418 if not ip_addr:
419 try:
420 ip_addr = socket.gethostbyname(hostname)
421 except:
422 log("Failed to resolve hostname '%s'" % (hostname),
423 level=WARNING)
424 return fallback
425 return ip_addr
426
427
428def get_hostname(address, fqdn=True):
429 """
430 Resolves hostname for given IP, or returns the input
431 if it is already a hostname.
432 """
433 if is_ip(address):
434 try:
435 import dns.reversename
436 except ImportError:
437 apt_install("python-dnspython")
438 import dns.reversename
439
440 rev = dns.reversename.from_address(address)
441 result = ns_query(rev)
442
443 if not result:
444 try:
445 result = socket.gethostbyaddr(address)[0]
446 except:
447 return None
448 else:
449 result = address
450
451 if fqdn:
452 # strip trailing .
453 if result.endswith('.'):
454 return result[:-1]
455 else:
456 return result
457 else:
458 return result.split('.')[0]
0459
=== added directory 'charmhelpers.new/contrib/openstack'
=== added directory 'charmhelpers.new/contrib/openstack/amulet'
=== added file 'charmhelpers.new/contrib/openstack/amulet/deployment.py'
--- charmhelpers.new/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/amulet/deployment.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,302 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import logging
18import re
19import sys
20import six
21from collections import OrderedDict
22from charmhelpers.contrib.amulet.deployment import (
23 AmuletDeployment
24)
25
26DEBUG = logging.DEBUG
27ERROR = logging.ERROR
28
29
30class OpenStackAmuletDeployment(AmuletDeployment):
31 """OpenStack amulet deployment.
32
33 This class inherits from AmuletDeployment and has additional support
34 that is specifically for use by OpenStack charms.
35 """
36
37 def __init__(self, series=None, openstack=None, source=None,
38 stable=True, log_level=DEBUG):
39 """Initialize the deployment environment."""
40 super(OpenStackAmuletDeployment, self).__init__(series)
41 self.log = self.get_logger(level=log_level)
42 self.log.info('OpenStackAmuletDeployment: init')
43 self.openstack = openstack
44 self.source = source
45 self.stable = stable
46 # Note(coreycb): this needs to be changed when new next branches come
47 # out.
48 self.current_next = "trusty"
49
50 def get_logger(self, name="deployment-logger", level=logging.DEBUG):
51 """Get a logger object that will log to stdout."""
52 log = logging
53 logger = log.getLogger(name)
54 fmt = log.Formatter("%(asctime)s %(funcName)s "
55 "%(levelname)s: %(message)s")
56
57 handler = log.StreamHandler(stream=sys.stdout)
58 handler.setLevel(level)
59 handler.setFormatter(fmt)
60
61 logger.addHandler(handler)
62 logger.setLevel(level)
63
64 return logger
65
66 def _determine_branch_locations(self, other_services):
67 """Determine the branch locations for the other services.
68
69 Determine if the local branch being tested is derived from its
70 stable or next (dev) branch, and based on this, use the corresonding
71 stable or next branches for the other_services."""
72
73 self.log.info('OpenStackAmuletDeployment: determine branch locations')
74
75 # Charms outside the lp:~openstack-charmers namespace
76 base_charms = ['mysql', 'mongodb', 'nrpe']
77
78 # Force these charms to current series even when using an older series.
79 # ie. Use trusty/nrpe even when series is precise, as the P charm
80 # does not possess the necessary external master config and hooks.
81 force_series_current = ['nrpe']
82
83 if self.series in ['precise', 'trusty']:
84 base_series = self.series
85 else:
86 base_series = self.current_next
87
88 for svc in other_services:
89 if svc['name'] in force_series_current:
90 base_series = self.current_next
91 # If a location has been explicitly set, use it
92 if svc.get('location'):
93 continue
94 if self.stable:
95 temp = 'lp:charms/{}/{}'
96 svc['location'] = temp.format(base_series,
97 svc['name'])
98 else:
99 if svc['name'] in base_charms:
100 temp = 'lp:charms/{}/{}'
101 svc['location'] = temp.format(base_series,
102 svc['name'])
103 else:
104 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
105 svc['location'] = temp.format(self.current_next,
106 svc['name'])
107
108 return other_services
109
110 def _add_services(self, this_service, other_services):
111 """Add services to the deployment and set openstack-origin/source."""
112 self.log.info('OpenStackAmuletDeployment: adding services')
113
114 other_services = self._determine_branch_locations(other_services)
115
116 super(OpenStackAmuletDeployment, self)._add_services(this_service,
117 other_services)
118
119 services = other_services
120 services.append(this_service)
121
122 # Charms which should use the source config option
123 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
124 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
125
126 # Charms which can not use openstack-origin, ie. many subordinates
127 no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
128 'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
129 'cinder-backup']
130
131 if self.openstack:
132 for svc in services:
133 if svc['name'] not in use_source + no_origin:
134 config = {'openstack-origin': self.openstack}
135 self.d.configure(svc['name'], config)
136
137 if self.source:
138 for svc in services:
139 if svc['name'] in use_source and svc['name'] not in no_origin:
140 config = {'source': self.source}
141 self.d.configure(svc['name'], config)
142
143 def _configure_services(self, configs):
144 """Configure all of the services."""
145 self.log.info('OpenStackAmuletDeployment: configure services')
146 for service, config in six.iteritems(configs):
147 self.d.configure(service, config)
148
149 def _auto_wait_for_status(self, message=None, exclude_services=None,
150 include_only=None, timeout=1800):
151 """Wait for all units to have a specific extended status, except
152 for any defined as excluded. Unless specified via message, any
153 status containing any case of 'ready' will be considered a match.
154
155 Examples of message usage:
156
157 Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
158 message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
159
160 Wait for all units to reach this status (exact match):
161 message = re.compile('^Unit is ready and clustered$')
162
163 Wait for all units to reach any one of these (exact match):
164 message = re.compile('Unit is ready|OK|Ready')
165
166 Wait for at least one unit to reach this status (exact match):
167 message = {'ready'}
168
169 See Amulet's sentry.wait_for_messages() for message usage detail.
170 https://github.com/juju/amulet/blob/master/amulet/sentry.py
171
172 :param message: Expected status match
173 :param exclude_services: List of juju service names to ignore,
174 not to be used in conjuction with include_only.
175 :param include_only: List of juju service names to exclusively check,
176 not to be used in conjuction with exclude_services.
177 :param timeout: Maximum time in seconds to wait for status match
178 :returns: None. Raises if timeout is hit.
179 """
180 self.log.info('Waiting for extended status on units...')
181
182 all_services = self.d.services.keys()
183
184 if exclude_services and include_only:
185 raise ValueError('exclude_services can not be used '
186 'with include_only')
187
188 if message:
189 if isinstance(message, re._pattern_type):
190 match = message.pattern
191 else:
192 match = message
193
194 self.log.debug('Custom extended status wait match: '
195 '{}'.format(match))
196 else:
197 self.log.debug('Default extended status wait match: contains '
198 'READY (case-insensitive)')
199 message = re.compile('.*ready.*', re.IGNORECASE)
200
201 if exclude_services:
202 self.log.debug('Excluding services from extended status match: '
203 '{}'.format(exclude_services))
204 else:
205 exclude_services = []
206
207 if include_only:
208 services = include_only
209 else:
210 services = list(set(all_services) - set(exclude_services))
211
212 self.log.debug('Waiting up to {}s for extended status on services: '
213 '{}'.format(timeout, services))
214 service_messages = {service: message for service in services}
215 self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
216 self.log.info('OK')
217
218 def _get_openstack_release(self):
219 """Get openstack release.
220
221 Return an integer representing the enum value of the openstack
222 release.
223 """
224 # Must be ordered by OpenStack release (not by Ubuntu release):
225 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
226 self.precise_havana, self.precise_icehouse,
227 self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
228 self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
229 self.wily_liberty, self.trusty_mitaka,
230 self.xenial_mitaka) = range(14)
231
232 releases = {
233 ('precise', None): self.precise_essex,
234 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
235 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
236 ('precise', 'cloud:precise-havana'): self.precise_havana,
237 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
238 ('trusty', None): self.trusty_icehouse,
239 ('trusty', 'cloud:trusty-juno'): self.trusty_juno,
240 ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
241 ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
242 ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
243 ('utopic', None): self.utopic_juno,
244 ('vivid', None): self.vivid_kilo,
245 ('wily', None): self.wily_liberty,
246 ('xenial', None): self.xenial_mitaka}
247 return releases[(self.series, self.openstack)]
248
249 def _get_openstack_release_string(self):
250 """Get openstack release string.
251
252 Return a string representing the openstack release.
253 """
254 releases = OrderedDict([
255 ('precise', 'essex'),
256 ('quantal', 'folsom'),
257 ('raring', 'grizzly'),
258 ('saucy', 'havana'),
259 ('trusty', 'icehouse'),
260 ('utopic', 'juno'),
261 ('vivid', 'kilo'),
262 ('wily', 'liberty'),
263 ('xenial', 'mitaka'),
264 ])
265 if self.openstack:
266 os_origin = self.openstack.split(':')[1]
267 return os_origin.split('%s-' % self.series)[1].split('/')[0]
268 else:
269 return releases[self.series]
270
271 def get_ceph_expected_pools(self, radosgw=False):
272 """Return a list of expected ceph pools in a ceph + cinder + glance
273 test scenario, based on OpenStack release and whether ceph radosgw
274 is flagged as present or not."""
275
276 if self._get_openstack_release() >= self.trusty_kilo:
277 # Kilo or later
278 pools = [
279 'rbd',
280 'cinder',
281 'glance'
282 ]
283 else:
284 # Juno or earlier
285 pools = [
286 'data',
287 'metadata',
288 'rbd',
289 'cinder',
290 'glance'
291 ]
292
293 if radosgw:
294 pools.extend([
295 '.rgw.root',
296 '.rgw.control',
297 '.rgw',
298 '.rgw.gc',
299 '.users.uid'
300 ])
301
302 return pools
0303
=== added file 'charmhelpers.new/contrib/openstack/amulet/utils.py'
--- charmhelpers.new/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/amulet/utils.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,985 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import amulet
18import json
19import logging
20import os
21import re
22import six
23import time
24import urllib
25
26import cinderclient.v1.client as cinder_client
27import glanceclient.v1.client as glance_client
28import heatclient.v1.client as heat_client
29import keystoneclient.v2_0 as keystone_client
30import novaclient.v1_1.client as nova_client
31import pika
32import swiftclient
33
34from charmhelpers.contrib.amulet.utils import (
35 AmuletUtils
36)
37
38DEBUG = logging.DEBUG
39ERROR = logging.ERROR
40
41
42class OpenStackAmuletUtils(AmuletUtils):
43 """OpenStack amulet utilities.
44
45 This class inherits from AmuletUtils and has additional support
46 that is specifically for use by OpenStack charm tests.
47 """
48
49 def __init__(self, log_level=ERROR):
50 """Initialize the deployment environment."""
51 super(OpenStackAmuletUtils, self).__init__(log_level)
52
53 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
54 public_port, expected):
55 """Validate endpoint data.
56
57 Validate actual endpoint data vs expected endpoint data. The ports
58 are used to find the matching endpoint.
59 """
60 self.log.debug('Validating endpoint data...')
61 self.log.debug('actual: {}'.format(repr(endpoints)))
62 found = False
63 for ep in endpoints:
64 self.log.debug('endpoint: {}'.format(repr(ep)))
65 if (admin_port in ep.adminurl and
66 internal_port in ep.internalurl and
67 public_port in ep.publicurl):
68 found = True
69 actual = {'id': ep.id,
70 'region': ep.region,
71 'adminurl': ep.adminurl,
72 'internalurl': ep.internalurl,
73 'publicurl': ep.publicurl,
74 'service_id': ep.service_id}
75 ret = self._validate_dict_data(expected, actual)
76 if ret:
77 return 'unexpected endpoint data - {}'.format(ret)
78
79 if not found:
80 return 'endpoint not found'
81
82 def validate_svc_catalog_endpoint_data(self, expected, actual):
83 """Validate service catalog endpoint data.
84
85 Validate a list of actual service catalog endpoints vs a list of
86 expected service catalog endpoints.
87 """
88 self.log.debug('Validating service catalog endpoint data...')
89 self.log.debug('actual: {}'.format(repr(actual)))
90 for k, v in six.iteritems(expected):
91 if k in actual:
92 ret = self._validate_dict_data(expected[k][0], actual[k][0])
93 if ret:
94 return self.endpoint_error(k, ret)
95 else:
96 return "endpoint {} does not exist".format(k)
97 return ret
98
99 def validate_tenant_data(self, expected, actual):
100 """Validate tenant data.
101
102 Validate a list of actual tenant data vs list of expected tenant
103 data.
104 """
105 self.log.debug('Validating tenant data...')
106 self.log.debug('actual: {}'.format(repr(actual)))
107 for e in expected:
108 found = False
109 for act in actual:
110 a = {'enabled': act.enabled, 'description': act.description,
111 'name': act.name, 'id': act.id}
112 if e['name'] == a['name']:
113 found = True
114 ret = self._validate_dict_data(e, a)
115 if ret:
116 return "unexpected tenant data - {}".format(ret)
117 if not found:
118 return "tenant {} does not exist".format(e['name'])
119 return ret
120
121 def validate_role_data(self, expected, actual):
122 """Validate role data.
123
124 Validate a list of actual role data vs a list of expected role
125 data.
126 """
127 self.log.debug('Validating role data...')
128 self.log.debug('actual: {}'.format(repr(actual)))
129 for e in expected:
130 found = False
131 for act in actual:
132 a = {'name': act.name, 'id': act.id}
133 if e['name'] == a['name']:
134 found = True
135 ret = self._validate_dict_data(e, a)
136 if ret:
137 return "unexpected role data - {}".format(ret)
138 if not found:
139 return "role {} does not exist".format(e['name'])
140 return ret
141
142 def validate_user_data(self, expected, actual):
143 """Validate user data.
144
145 Validate a list of actual user data vs a list of expected user
146 data.
147 """
148 self.log.debug('Validating user data...')
149 self.log.debug('actual: {}'.format(repr(actual)))
150 for e in expected:
151 found = False
152 for act in actual:
153 a = {'enabled': act.enabled, 'name': act.name,
154 'email': act.email, 'tenantId': act.tenantId,
155 'id': act.id}
156 if e['name'] == a['name']:
157 found = True
158 ret = self._validate_dict_data(e, a)
159 if ret:
160 return "unexpected user data - {}".format(ret)
161 if not found:
162 return "user {} does not exist".format(e['name'])
163 return ret
164
165 def validate_flavor_data(self, expected, actual):
166 """Validate flavor data.
167
168 Validate a list of actual flavors vs a list of expected flavors.
169 """
170 self.log.debug('Validating flavor data...')
171 self.log.debug('actual: {}'.format(repr(actual)))
172 act = [a.name for a in actual]
173 return self._validate_list_data(expected, act)
174
175 def tenant_exists(self, keystone, tenant):
176 """Return True if tenant exists."""
177 self.log.debug('Checking if tenant exists ({})...'.format(tenant))
178 return tenant in [t.name for t in keystone.tenants.list()]
179
180 def authenticate_cinder_admin(self, keystone_sentry, username,
181 password, tenant):
182 """Authenticates admin user with cinder."""
183 # NOTE(beisner): cinder python client doesn't accept tokens.
184 service_ip = \
185 keystone_sentry.relation('shared-db',
186 'mysql:shared-db')['private-address']
187 ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
188 return cinder_client.Client(username, password, tenant, ept)
189
190 def authenticate_keystone_admin(self, keystone_sentry, user, password,
191 tenant):
192 """Authenticates admin user with the keystone admin endpoint."""
193 self.log.debug('Authenticating keystone admin...')
194 unit = keystone_sentry
195 service_ip = unit.relation('shared-db',
196 'mysql:shared-db')['private-address']
197 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
198 return keystone_client.Client(username=user, password=password,
199 tenant_name=tenant, auth_url=ep)
200
201 def authenticate_keystone_user(self, keystone, user, password, tenant):
202 """Authenticates a regular user with the keystone public endpoint."""
203 self.log.debug('Authenticating keystone user ({})...'.format(user))
204 ep = keystone.service_catalog.url_for(service_type='identity',
205 endpoint_type='publicURL')
206 return keystone_client.Client(username=user, password=password,
207 tenant_name=tenant, auth_url=ep)
208
209 def authenticate_glance_admin(self, keystone):
210 """Authenticates admin user with glance."""
211 self.log.debug('Authenticating glance admin...')
212 ep = keystone.service_catalog.url_for(service_type='image',
213 endpoint_type='adminURL')
214 return glance_client.Client(ep, token=keystone.auth_token)
215
216 def authenticate_heat_admin(self, keystone):
217 """Authenticates the admin user with heat."""
218 self.log.debug('Authenticating heat admin...')
219 ep = keystone.service_catalog.url_for(service_type='orchestration',
220 endpoint_type='publicURL')
221 return heat_client.Client(endpoint=ep, token=keystone.auth_token)
222
223 def authenticate_nova_user(self, keystone, user, password, tenant):
224 """Authenticates a regular user with nova-api."""
225 self.log.debug('Authenticating nova user ({})...'.format(user))
226 ep = keystone.service_catalog.url_for(service_type='identity',
227 endpoint_type='publicURL')
228 return nova_client.Client(username=user, api_key=password,
229 project_id=tenant, auth_url=ep)
230
231 def authenticate_swift_user(self, keystone, user, password, tenant):
232 """Authenticates a regular user with swift api."""
233 self.log.debug('Authenticating swift user ({})...'.format(user))
234 ep = keystone.service_catalog.url_for(service_type='identity',
235 endpoint_type='publicURL')
236 return swiftclient.Connection(authurl=ep,
237 user=user,
238 key=password,
239 tenant_name=tenant,
240 auth_version='2.0')
241
242 def create_cirros_image(self, glance, image_name):
243 """Download the latest cirros image and upload it to glance,
244 validate and return a resource pointer.
245
246 :param glance: pointer to authenticated glance connection
247 :param image_name: display name for new image
248 :returns: glance image pointer
249 """
250 self.log.debug('Creating glance cirros image '
251 '({})...'.format(image_name))
252
253 # Download cirros image
254 http_proxy = os.getenv('AMULET_HTTP_PROXY')
255 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
256 if http_proxy:
257 proxies = {'http': http_proxy}
258 opener = urllib.FancyURLopener(proxies)
259 else:
260 opener = urllib.FancyURLopener()
261
262 f = opener.open('http://download.cirros-cloud.net/version/released')
263 version = f.read().strip()
264 cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
265 local_path = os.path.join('tests', cirros_img)
266
267 if not os.path.exists(local_path):
268 cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
269 version, cirros_img)
270 opener.retrieve(cirros_url, local_path)
271 f.close()
272
273 # Create glance image
274 with open(local_path) as f:
275 image = glance.images.create(name=image_name, is_public=True,
276 disk_format='qcow2',
277 container_format='bare', data=f)
278
279 # Wait for image to reach active status
280 img_id = image.id
281 ret = self.resource_reaches_status(glance.images, img_id,
282 expected_stat='active',
283 msg='Image status wait')
284 if not ret:
285 msg = 'Glance image failed to reach expected state.'
286 amulet.raise_status(amulet.FAIL, msg=msg)
287
288 # Re-validate new image
289 self.log.debug('Validating image attributes...')
290 val_img_name = glance.images.get(img_id).name
291 val_img_stat = glance.images.get(img_id).status
292 val_img_pub = glance.images.get(img_id).is_public
293 val_img_cfmt = glance.images.get(img_id).container_format
294 val_img_dfmt = glance.images.get(img_id).disk_format
295 msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
296 'container fmt:{} disk fmt:{}'.format(
297 val_img_name, val_img_pub, img_id,
298 val_img_stat, val_img_cfmt, val_img_dfmt))
299
300 if val_img_name == image_name and val_img_stat == 'active' \
301 and val_img_pub is True and val_img_cfmt == 'bare' \
302 and val_img_dfmt == 'qcow2':
303 self.log.debug(msg_attr)
304 else:
305 msg = ('Volume validation failed, {}'.format(msg_attr))
306 amulet.raise_status(amulet.FAIL, msg=msg)
307
308 return image
309
310 def delete_image(self, glance, image):
311 """Delete the specified image."""
312
313 # /!\ DEPRECATION WARNING
314 self.log.warn('/!\\ DEPRECATION WARNING: use '
315 'delete_resource instead of delete_image.')
316 self.log.debug('Deleting glance image ({})...'.format(image))
317 return self.delete_resource(glance.images, image, msg='glance image')
318
319 def create_instance(self, nova, image_name, instance_name, flavor):
320 """Create the specified instance."""
321 self.log.debug('Creating instance '
322 '({}|{}|{})'.format(instance_name, image_name, flavor))
323 image = nova.images.find(name=image_name)
324 flavor = nova.flavors.find(name=flavor)
325 instance = nova.servers.create(name=instance_name, image=image,
326 flavor=flavor)
327
328 count = 1
329 status = instance.status
330 while status != 'ACTIVE' and count < 60:
331 time.sleep(3)
332 instance = nova.servers.get(instance.id)
333 status = instance.status
334 self.log.debug('instance status: {}'.format(status))
335 count += 1
336
337 if status != 'ACTIVE':
338 self.log.error('instance creation timed out')
339 return None
340
341 return instance
342
343 def delete_instance(self, nova, instance):
344 """Delete the specified instance."""
345
346 # /!\ DEPRECATION WARNING
347 self.log.warn('/!\\ DEPRECATION WARNING: use '
348 'delete_resource instead of delete_instance.')
349 self.log.debug('Deleting instance ({})...'.format(instance))
350 return self.delete_resource(nova.servers, instance,
351 msg='nova instance')
352
353 def create_or_get_keypair(self, nova, keypair_name="testkey"):
354 """Create a new keypair, or return pointer if it already exists."""
355 try:
356 _keypair = nova.keypairs.get(keypair_name)
357 self.log.debug('Keypair ({}) already exists, '
358 'using it.'.format(keypair_name))
359 return _keypair
360 except:
361 self.log.debug('Keypair ({}) does not exist, '
362 'creating it.'.format(keypair_name))
363
364 _keypair = nova.keypairs.create(name=keypair_name)
365 return _keypair
366
367 def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
368 img_id=None, src_vol_id=None, snap_id=None):
369 """Create cinder volume, optionally from a glance image, OR
370 optionally as a clone of an existing volume, OR optionally
371 from a snapshot. Wait for the new volume status to reach
372 the expected status, validate and return a resource pointer.
373
374 :param vol_name: cinder volume display name
375 :param vol_size: size in gigabytes
376 :param img_id: optional glance image id
377 :param src_vol_id: optional source volume id to clone
378 :param snap_id: optional snapshot id to use
379 :returns: cinder volume pointer
380 """
381 # Handle parameter input and avoid impossible combinations
382 if img_id and not src_vol_id and not snap_id:
383 # Create volume from image
384 self.log.debug('Creating cinder volume from glance image...')
385 bootable = 'true'
386 elif src_vol_id and not img_id and not snap_id:
387 # Clone an existing volume
388 self.log.debug('Cloning cinder volume...')
389 bootable = cinder.volumes.get(src_vol_id).bootable
390 elif snap_id and not src_vol_id and not img_id:
391 # Create volume from snapshot
392 self.log.debug('Creating cinder volume from snapshot...')
393 snap = cinder.volume_snapshots.find(id=snap_id)
394 vol_size = snap.size
395 snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
396 bootable = cinder.volumes.get(snap_vol_id).bootable
397 elif not img_id and not src_vol_id and not snap_id:
398 # Create volume
399 self.log.debug('Creating cinder volume...')
400 bootable = 'false'
401 else:
402 # Impossible combination of parameters
403 msg = ('Invalid method use - name:{} size:{} img_id:{} '
404 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
405 img_id, src_vol_id,
406 snap_id))
407 amulet.raise_status(amulet.FAIL, msg=msg)
408
409 # Create new volume
410 try:
411 vol_new = cinder.volumes.create(display_name=vol_name,
412 imageRef=img_id,
413 size=vol_size,
414 source_volid=src_vol_id,
415 snapshot_id=snap_id)
416 vol_id = vol_new.id
417 except Exception as e:
418 msg = 'Failed to create volume: {}'.format(e)
419 amulet.raise_status(amulet.FAIL, msg=msg)
420
421 # Wait for volume to reach available status
422 ret = self.resource_reaches_status(cinder.volumes, vol_id,
423 expected_stat="available",
424 msg="Volume status wait")
425 if not ret:
426 msg = 'Cinder volume failed to reach expected state.'
427 amulet.raise_status(amulet.FAIL, msg=msg)
428
429 # Re-validate new volume
430 self.log.debug('Validating volume attributes...')
431 val_vol_name = cinder.volumes.get(vol_id).display_name
432 val_vol_boot = cinder.volumes.get(vol_id).bootable
433 val_vol_stat = cinder.volumes.get(vol_id).status
434 val_vol_size = cinder.volumes.get(vol_id).size
435 msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
436 '{} size:{}'.format(val_vol_name, vol_id,
437 val_vol_stat, val_vol_boot,
438 val_vol_size))
439
440 if val_vol_boot == bootable and val_vol_stat == 'available' \
441 and val_vol_name == vol_name and val_vol_size == vol_size:
442 self.log.debug(msg_attr)
443 else:
444 msg = ('Volume validation failed, {}'.format(msg_attr))
445 amulet.raise_status(amulet.FAIL, msg=msg)
446
447 return vol_new
448
449 def delete_resource(self, resource, resource_id,
450 msg="resource", max_wait=120):
451 """Delete one openstack resource, such as one instance, keypair,
452 image, volume, stack, etc., and confirm deletion within max wait time.
453
454 :param resource: pointer to os resource type, ex:glance_client.images
455 :param resource_id: unique name or id for the openstack resource
456 :param msg: text to identify purpose in logging
457 :param max_wait: maximum wait time in seconds
458 :returns: True if successful, otherwise False
459 """
460 self.log.debug('Deleting OpenStack resource '
461 '{} ({})'.format(resource_id, msg))
462 num_before = len(list(resource.list()))
463 resource.delete(resource_id)
464
465 tries = 0
466 num_after = len(list(resource.list()))
467 while num_after != (num_before - 1) and tries < (max_wait / 4):
468 self.log.debug('{} delete check: '
469 '{} [{}:{}] {}'.format(msg, tries,
470 num_before,
471 num_after,
472 resource_id))
473 time.sleep(4)
474 num_after = len(list(resource.list()))
475 tries += 1
476
477 self.log.debug('{}: expected, actual count = {}, '
478 '{}'.format(msg, num_before - 1, num_after))
479
480 if num_after == (num_before - 1):
481 return True
482 else:
483 self.log.error('{} delete timed out'.format(msg))
484 return False
485
486 def resource_reaches_status(self, resource, resource_id,
487 expected_stat='available',
488 msg='resource', max_wait=120):
489 """Wait for an openstack resources status to reach an
490 expected status within a specified time. Useful to confirm that
491 nova instances, cinder vols, snapshots, glance images, heat stacks
492 and other resources eventually reach the expected status.
493
494 :param resource: pointer to os resource type, ex: heat_client.stacks
495 :param resource_id: unique id for the openstack resource
496 :param expected_stat: status to expect resource to reach
497 :param msg: text to identify purpose in logging
498 :param max_wait: maximum wait time in seconds
499 :returns: True if successful, False if status is not reached
500 """
501
502 tries = 0
503 resource_stat = resource.get(resource_id).status
504 while resource_stat != expected_stat and tries < (max_wait / 4):
505 self.log.debug('{} status check: '
506 '{} [{}:{}] {}'.format(msg, tries,
507 resource_stat,
508 expected_stat,
509 resource_id))
510 time.sleep(4)
511 resource_stat = resource.get(resource_id).status
512 tries += 1
513
514 self.log.debug('{}: expected, actual status = {}, '
515 '{}'.format(msg, resource_stat, expected_stat))
516
517 if resource_stat == expected_stat:
518 return True
519 else:
520 self.log.debug('{} never reached expected status: '
521 '{}'.format(resource_id, expected_stat))
522 return False
523
524 def get_ceph_osd_id_cmd(self, index):
525 """Produce a shell command that will return a ceph-osd id."""
526 return ("`initctl list | grep 'ceph-osd ' | "
527 "awk 'NR=={} {{ print $2 }}' | "
528 "grep -o '[0-9]*'`".format(index + 1))
529
530 def get_ceph_pools(self, sentry_unit):
531 """Return a dict of ceph pools from a single ceph unit, with
532 pool name as keys, pool id as vals."""
533 pools = {}
534 cmd = 'sudo ceph osd lspools'
535 output, code = sentry_unit.run(cmd)
536 if code != 0:
537 msg = ('{} `{}` returned {} '
538 '{}'.format(sentry_unit.info['unit_name'],
539 cmd, code, output))
540 amulet.raise_status(amulet.FAIL, msg=msg)
541
542 # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
543 for pool in str(output).split(','):
544 pool_id_name = pool.split(' ')
545 if len(pool_id_name) == 2:
546 pool_id = pool_id_name[0]
547 pool_name = pool_id_name[1]
548 pools[pool_name] = int(pool_id)
549
550 self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
551 pools))
552 return pools
553
554 def get_ceph_df(self, sentry_unit):
555 """Return dict of ceph df json output, including ceph pool state.
556
557 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
558 :returns: Dict of ceph df output
559 """
560 cmd = 'sudo ceph df --format=json'
561 output, code = sentry_unit.run(cmd)
562 if code != 0:
563 msg = ('{} `{}` returned {} '
564 '{}'.format(sentry_unit.info['unit_name'],
565 cmd, code, output))
566 amulet.raise_status(amulet.FAIL, msg=msg)
567 return json.loads(output)
568
569 def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
570 """Take a sample of attributes of a ceph pool, returning ceph
571 pool name, object count and disk space used for the specified
572 pool ID number.
573
574 :param sentry_unit: Pointer to amulet sentry instance (juju unit)
575 :param pool_id: Ceph pool ID
576 :returns: List of pool name, object count, kb disk space used
577 """
578 df = self.get_ceph_df(sentry_unit)
579 pool_name = df['pools'][pool_id]['name']
580 obj_count = df['pools'][pool_id]['stats']['objects']
581 kb_used = df['pools'][pool_id]['stats']['kb_used']
582 self.log.debug('Ceph {} pool (ID {}): {} objects, '
583 '{} kb used'.format(pool_name, pool_id,
584 obj_count, kb_used))
585 return pool_name, obj_count, kb_used
586
587 def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
588 """Validate ceph pool samples taken over time, such as pool
589 object counts or pool kb used, before adding, after adding, and
590 after deleting items which affect those pool attributes. The
591 2nd element is expected to be greater than the 1st; 3rd is expected
592 to be less than the 2nd.
593
594 :param samples: List containing 3 data samples
595 :param sample_type: String for logging and usage context
596 :returns: None if successful, Failure message otherwise
597 """
598 original, created, deleted = range(3)
599 if samples[created] <= samples[original] or \
600 samples[deleted] >= samples[created]:
601 return ('Ceph {} samples ({}) '
602 'unexpected.'.format(sample_type, samples))
603 else:
604 self.log.debug('Ceph {} samples (OK): '
605 '{}'.format(sample_type, samples))
606 return None
607
608 # rabbitmq/amqp specific helpers:
609
610 def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
611 """Wait for rmq units extended status to show cluster readiness,
612 after an optional initial sleep period. Initial sleep is likely
613 necessary to be effective following a config change, as status
614 message may not instantly update to non-ready."""
615
616 if init_sleep:
617 time.sleep(init_sleep)
618
619 message = re.compile('^Unit is ready and clustered$')
620 deployment._auto_wait_for_status(message=message,
621 timeout=timeout,
622 include_only=['rabbitmq-server'])
623
624 def add_rmq_test_user(self, sentry_units,
625 username="testuser1", password="changeme"):
626 """Add a test user via the first rmq juju unit, check connection as
627 the new user against all sentry units.
628
629 :param sentry_units: list of sentry unit pointers
630 :param username: amqp user name, default to testuser1
631 :param password: amqp user password
632 :returns: None if successful. Raise on error.
633 """
634 self.log.debug('Adding rmq user ({})...'.format(username))
635
636 # Check that user does not already exist
637 cmd_user_list = 'rabbitmqctl list_users'
638 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
639 if username in output:
640 self.log.warning('User ({}) already exists, returning '
641 'gracefully.'.format(username))
642 return
643
644 perms = '".*" ".*" ".*"'
645 cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
646 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
647
648 # Add user via first unit
649 for cmd in cmds:
650 output, _ = self.run_cmd_unit(sentry_units[0], cmd)
651
652 # Check connection against the other sentry_units
653 self.log.debug('Checking user connect against units...')
654 for sentry_unit in sentry_units:
655 connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
656 username=username,
657 password=password)
658 connection.close()
659
660 def delete_rmq_test_user(self, sentry_units, username="testuser1"):
661 """Delete a rabbitmq user via the first rmq juju unit.
662
663 :param sentry_units: list of sentry unit pointers
664 :param username: amqp user name, default to testuser1
665 :param password: amqp user password
666 :returns: None if successful or no such user.
667 """
668 self.log.debug('Deleting rmq user ({})...'.format(username))
669
670 # Check that the user exists
671 cmd_user_list = 'rabbitmqctl list_users'
672 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
673
674 if username not in output:
675 self.log.warning('User ({}) does not exist, returning '
676 'gracefully.'.format(username))
677 return
678
679 # Delete the user
680 cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
681 output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
682
683 def get_rmq_cluster_status(self, sentry_unit):
684 """Execute rabbitmq cluster status command on a unit and return
685 the full output.
686
687 :param unit: sentry unit
688 :returns: String containing console output of cluster status command
689 """
690 cmd = 'rabbitmqctl cluster_status'
691 output, _ = self.run_cmd_unit(sentry_unit, cmd)
692 self.log.debug('{} cluster_status:\n{}'.format(
693 sentry_unit.info['unit_name'], output))
694 return str(output)
695
696 def get_rmq_cluster_running_nodes(self, sentry_unit):
697 """Parse rabbitmqctl cluster_status output string, return list of
698 running rabbitmq cluster nodes.
699
700 :param unit: sentry unit
701 :returns: List containing node names of running nodes
702 """
703 # NOTE(beisner): rabbitmqctl cluster_status output is not
704 # json-parsable, do string chop foo, then json.loads that.
705 str_stat = self.get_rmq_cluster_status(sentry_unit)
706 if 'running_nodes' in str_stat:
707 pos_start = str_stat.find("{running_nodes,") + 15
708 pos_end = str_stat.find("]},", pos_start) + 1
709 str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
710 run_nodes = json.loads(str_run_nodes)
711 return run_nodes
712 else:
713 return []
714
715 def validate_rmq_cluster_running_nodes(self, sentry_units):
716 """Check that all rmq unit hostnames are represented in the
717 cluster_status output of all units.
718
719 :param host_names: dict of juju unit names to host names
720 :param units: list of sentry unit pointers (all rmq units)
721 :returns: None if successful, otherwise return error message
722 """
723 host_names = self.get_unit_hostnames(sentry_units)
724 errors = []
725
726 # Query every unit for cluster_status running nodes
727 for query_unit in sentry_units:
728 query_unit_name = query_unit.info['unit_name']
729 running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
730
731 # Confirm that every unit is represented in the queried unit's
732 # cluster_status running nodes output.
733 for validate_unit in sentry_units:
734 val_host_name = host_names[validate_unit.info['unit_name']]
735 val_node_name = 'rabbit@{}'.format(val_host_name)
736
737 if val_node_name not in running_nodes:
738 errors.append('Cluster member check failed on {}: {} not '
739 'in {}\n'.format(query_unit_name,
740 val_node_name,
741 running_nodes))
742 if errors:
743 return ''.join(errors)
744
745 def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
746 """Check a single juju rmq unit for ssl and port in the config file."""
747 host = sentry_unit.info['public-address']
748 unit_name = sentry_unit.info['unit_name']
749
750 conf_file = '/etc/rabbitmq/rabbitmq.config'
751 conf_contents = str(self.file_contents_safe(sentry_unit,
752 conf_file, max_wait=16))
753 # Checks
754 conf_ssl = 'ssl' in conf_contents
755 conf_port = str(port) in conf_contents
756
757 # Port explicitly checked in config
758 if port and conf_port and conf_ssl:
759 self.log.debug('SSL is enabled @{}:{} '
760 '({})'.format(host, port, unit_name))
761 return True
762 elif port and not conf_port and conf_ssl:
763 self.log.debug('SSL is enabled @{} but not on port {} '
764 '({})'.format(host, port, unit_name))
765 return False
766 # Port not checked (useful when checking that ssl is disabled)
767 elif not port and conf_ssl:
768 self.log.debug('SSL is enabled @{}:{} '
769 '({})'.format(host, port, unit_name))
770 return True
771 elif not conf_ssl:
772 self.log.debug('SSL not enabled @{}:{} '
773 '({})'.format(host, port, unit_name))
774 return False
775 else:
776 msg = ('Unknown condition when checking SSL status @{}:{} '
777 '({})'.format(host, port, unit_name))
778 amulet.raise_status(amulet.FAIL, msg)
779
780 def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
781 """Check that ssl is enabled on rmq juju sentry units.
782
783 :param sentry_units: list of all rmq sentry units
784 :param port: optional ssl port override to validate
785 :returns: None if successful, otherwise return error message
786 """
787 for sentry_unit in sentry_units:
788 if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
789 return ('Unexpected condition: ssl is disabled on unit '
790 '({})'.format(sentry_unit.info['unit_name']))
791 return None
792
793 def validate_rmq_ssl_disabled_units(self, sentry_units):
794 """Check that ssl is enabled on listed rmq juju sentry units.
795
796 :param sentry_units: list of all rmq sentry units
797 :returns: True if successful. Raise on error.
798 """
799 for sentry_unit in sentry_units:
800 if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
801 return ('Unexpected condition: ssl is enabled on unit '
802 '({})'.format(sentry_unit.info['unit_name']))
803 return None
804
805 def configure_rmq_ssl_on(self, sentry_units, deployment,
806 port=None, max_wait=60):
807 """Turn ssl charm config option on, with optional non-default
808 ssl port specification. Confirm that it is enabled on every
809 unit.
810
811 :param sentry_units: list of sentry units
812 :param deployment: amulet deployment object pointer
813 :param port: amqp port, use defaults if None
814 :param max_wait: maximum time to wait in seconds to confirm
815 :returns: None if successful. Raise on error.
816 """
817 self.log.debug('Setting ssl charm config option: on')
818
819 # Enable RMQ SSL
820 config = {'ssl': 'on'}
821 if port:
822 config['ssl_port'] = port
823
824 deployment.d.configure('rabbitmq-server', config)
825
826 # Wait for unit status
827 self.rmq_wait_for_cluster(deployment)
828
829 # Confirm
830 tries = 0
831 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
832 while ret and tries < (max_wait / 4):
833 time.sleep(4)
834 self.log.debug('Attempt {}: {}'.format(tries, ret))
835 ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
836 tries += 1
837
838 if ret:
839 amulet.raise_status(amulet.FAIL, ret)
840
841 def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
842 """Turn ssl charm config option off, confirm that it is disabled
843 on every unit.
844
845 :param sentry_units: list of sentry units
846 :param deployment: amulet deployment object pointer
847 :param max_wait: maximum time to wait in seconds to confirm
848 :returns: None if successful. Raise on error.
849 """
850 self.log.debug('Setting ssl charm config option: off')
851
852 # Disable RMQ SSL
853 config = {'ssl': 'off'}
854 deployment.d.configure('rabbitmq-server', config)
855
856 # Wait for unit status
857 self.rmq_wait_for_cluster(deployment)
858
859 # Confirm
860 tries = 0
861 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
862 while ret and tries < (max_wait / 4):
863 time.sleep(4)
864 self.log.debug('Attempt {}: {}'.format(tries, ret))
865 ret = self.validate_rmq_ssl_disabled_units(sentry_units)
866 tries += 1
867
868 if ret:
869 amulet.raise_status(amulet.FAIL, ret)
870
871 def connect_amqp_by_unit(self, sentry_unit, ssl=False,
872 port=None, fatal=True,
873 username="testuser1", password="changeme"):
874 """Establish and return a pika amqp connection to the rabbitmq service
875 running on a rmq juju unit.
876
877 :param sentry_unit: sentry unit pointer
878 :param ssl: boolean, default to False
879 :param port: amqp port, use defaults if None
880 :param fatal: boolean, default to True (raises on connect error)
881 :param username: amqp user name, default to testuser1
882 :param password: amqp user password
883 :returns: pika amqp connection pointer or None if failed and non-fatal
884 """
885 host = sentry_unit.info['public-address']
886 unit_name = sentry_unit.info['unit_name']
887
888 # Default port logic if port is not specified
889 if ssl and not port:
890 port = 5671
891 elif not ssl and not port:
892 port = 5672
893
894 self.log.debug('Connecting to amqp on {}:{} ({}) as '
895 '{}...'.format(host, port, unit_name, username))
896
897 try:
898 credentials = pika.PlainCredentials(username, password)
899 parameters = pika.ConnectionParameters(host=host, port=port,
900 credentials=credentials,
901 ssl=ssl,
902 connection_attempts=3,
903 retry_delay=5,
904 socket_timeout=1)
905 connection = pika.BlockingConnection(parameters)
906 assert connection.server_properties['product'] == 'RabbitMQ'
907 self.log.debug('Connect OK')
908 return connection
909 except Exception as e:
910 msg = ('amqp connection failed to {}:{} as '
911 '{} ({})'.format(host, port, username, str(e)))
912 if fatal:
913 amulet.raise_status(amulet.FAIL, msg)
914 else:
915 self.log.warn(msg)
916 return None
917
918 def publish_amqp_message_by_unit(self, sentry_unit, message,
919 queue="test", ssl=False,
920 username="testuser1",
921 password="changeme",
922 port=None):
923 """Publish an amqp message to a rmq juju unit.
924
925 :param sentry_unit: sentry unit pointer
926 :param message: amqp message string
927 :param queue: message queue, default to test
928 :param username: amqp user name, default to testuser1
929 :param password: amqp user password
930 :param ssl: boolean, default to False
931 :param port: amqp port, use defaults if None
932 :returns: None. Raises exception if publish failed.
933 """
934 self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
935 message))
936 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
937 port=port,
938 username=username,
939 password=password)
940
941 # NOTE(beisner): extra debug here re: pika hang potential:
942 # https://github.com/pika/pika/issues/297
943 # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
944 self.log.debug('Defining channel...')
945 channel = connection.channel()
946 self.log.debug('Declaring queue...')
947 channel.queue_declare(queue=queue, auto_delete=False, durable=True)
948 self.log.debug('Publishing message...')
949 channel.basic_publish(exchange='', routing_key=queue, body=message)
950 self.log.debug('Closing channel...')
951 channel.close()
952 self.log.debug('Closing connection...')
953 connection.close()
954
955 def get_amqp_message_by_unit(self, sentry_unit, queue="test",
956 username="testuser1",
957 password="changeme",
958 ssl=False, port=None):
959 """Get an amqp message from a rmq juju unit.
960
961 :param sentry_unit: sentry unit pointer
962 :param queue: message queue, default to test
963 :param username: amqp user name, default to testuser1
964 :param password: amqp user password
965 :param ssl: boolean, default to False
966 :param port: amqp port, use defaults if None
967 :returns: amqp message body as string. Raise if get fails.
968 """
969 connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
970 port=port,
971 username=username,
972 password=password)
973 channel = connection.channel()
974 method_frame, _, body = channel.basic_get(queue)
975
976 if method_frame:
977 self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
978 body))
979 channel.basic_ack(method_frame.delivery_tag)
980 channel.close()
981 connection.close()
982 return body
983 else:
984 msg = 'No message retrieved.'
985 amulet.raise_status(amulet.FAIL, msg)
0986
=== added file 'charmhelpers.new/contrib/openstack/context.py'
--- charmhelpers.new/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/context.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,1477 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17import glob
18import json
19import os
20import re
21import time
22from base64 import b64decode
23from subprocess import check_call
24
25import six
26import yaml
27
28from charmhelpers.fetch import (
29 apt_install,
30 filter_installed_packages,
31)
32from charmhelpers.core.hookenv import (
33 config,
34 is_relation_made,
35 local_unit,
36 log,
37 relation_get,
38 relation_ids,
39 related_units,
40 relation_set,
41 unit_get,
42 unit_private_ip,
43 charm_name,
44 DEBUG,
45 INFO,
46 WARNING,
47 ERROR,
48)
49
50from charmhelpers.core.sysctl import create as sysctl_create
51from charmhelpers.core.strutils import bool_from_string
52
53from charmhelpers.core.host import (
54 get_bond_master,
55 is_phy_iface,
56 list_nics,
57 get_nic_hwaddr,
58 mkdir,
59 write_file,
60 pwgen,
61)
62from charmhelpers.contrib.hahelpers.cluster import (
63 determine_apache_port,
64 determine_api_port,
65 https,
66 is_clustered,
67)
68from charmhelpers.contrib.hahelpers.apache import (
69 get_cert,
70 get_ca_cert,
71 install_ca_cert,
72)
73from charmhelpers.contrib.openstack.neutron import (
74 neutron_plugin_attribute,
75 parse_data_port_mappings,
76)
77from charmhelpers.contrib.openstack.ip import (
78 resolve_address,
79 INTERNAL,
80)
81from charmhelpers.contrib.network.ip import (
82 get_address_in_network,
83 get_ipv4_addr,
84 get_ipv6_addr,
85 get_netmask_for_address,
86 format_ipv6_addr,
87 is_address_in_network,
88 is_bridge_member,
89)
90from charmhelpers.contrib.openstack.utils import get_host_ip
91from charmhelpers.core.unitdata import kv
92
93try:
94 import psutil
95except ImportError:
96 apt_install('python-psutil', fatal=True)
97 import psutil
98
99CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
100ADDRESS_TYPES = ['admin', 'internal', 'public']
101
102
103class OSContextError(Exception):
104 pass
105
106
107def ensure_packages(packages):
108 """Install but do not upgrade required plugin packages."""
109 required = filter_installed_packages(packages)
110 if required:
111 apt_install(required, fatal=True)
112
113
114def context_complete(ctxt):
115 _missing = []
116 for k, v in six.iteritems(ctxt):
117 if v is None or v == '':
118 _missing.append(k)
119
120 if _missing:
121 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
122 return False
123
124 return True
125
126
127def config_flags_parser(config_flags):
128 """Parses config flags string into dict.
129
130 This parsing method supports a few different formats for the config
131 flag values to be parsed:
132
133 1. A string in the simple format of key=value pairs, with the possibility
134 of specifying multiple key value pairs within the same string. For
135 example, a string in the format of 'key1=value1, key2=value2' will
136 return a dict of:
137
138 {'key1': 'value1',
139 'key2': 'value2'}.
140
141 2. A string in the above format, but supporting a comma-delimited list
142 of values for the same key. For example, a string in the format of
143 'key1=value1, key2=value3,value4,value5' will return a dict of:
144
145 {'key1', 'value1',
146 'key2', 'value2,value3,value4'}
147
148 3. A string containing a colon character (:) prior to an equal
149 character (=) will be treated as yaml and parsed as such. This can be
150 used to specify more complex key value pairs. For example,
151 a string in the format of 'key1: subkey1=value1, subkey2=value2' will
152 return a dict of:
153
154 {'key1', 'subkey1=value1, subkey2=value2'}
155
156 The provided config_flags string may be a list of comma-separated values
157 which themselves may be comma-separated list of values.
158 """
159 # If we find a colon before an equals sign then treat it as yaml.
160 # Note: limit it to finding the colon first since this indicates assignment
161 # for inline yaml.
162 colon = config_flags.find(':')
163 equals = config_flags.find('=')
164 if colon > 0:
165 if colon < equals or equals < 0:
166 return yaml.safe_load(config_flags)
167
168 if config_flags.find('==') >= 0:
169 log("config_flags is not in expected format (key=value)", level=ERROR)
170 raise OSContextError
171
172 # strip the following from each value.
173 post_strippers = ' ,'
174 # we strip any leading/trailing '=' or ' ' from the string then
175 # split on '='.
176 split = config_flags.strip(' =').split('=')
177 limit = len(split)
178 flags = {}
179 for i in range(0, limit - 1):
180 current = split[i]
181 next = split[i + 1]
182 vindex = next.rfind(',')
183 if (i == limit - 2) or (vindex < 0):
184 value = next
185 else:
186 value = next[:vindex]
187
188 if i == 0:
189 key = current
190 else:
191 # if this not the first entry, expect an embedded key.
192 index = current.rfind(',')
193 if index < 0:
194 log("Invalid config value(s) at index %s" % (i), level=ERROR)
195 raise OSContextError
196 key = current[index + 1:]
197
198 # Add to collection.
199 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
200
201 return flags
202
203
204class OSContextGenerator(object):
205 """Base class for all context generators."""
206 interfaces = []
207 related = False
208 complete = False
209 missing_data = []
210
211 def __call__(self):
212 raise NotImplementedError
213
214 def context_complete(self, ctxt):
215 """Check for missing data for the required context data.
216 Set self.missing_data if it exists and return False.
217 Set self.complete if no missing data and return True.
218 """
219 # Fresh start
220 self.complete = False
221 self.missing_data = []
222 for k, v in six.iteritems(ctxt):
223 if v is None or v == '':
224 if k not in self.missing_data:
225 self.missing_data.append(k)
226
227 if self.missing_data:
228 self.complete = False
229 log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
230 else:
231 self.complete = True
232 return self.complete
233
234 def get_related(self):
235 """Check if any of the context interfaces have relation ids.
236 Set self.related and return True if one of the interfaces
237 has relation ids.
238 """
239 # Fresh start
240 self.related = False
241 try:
242 for interface in self.interfaces:
243 if relation_ids(interface):
244 self.related = True
245 return self.related
246 except AttributeError as e:
247 log("{} {}"
248 "".format(self, e), 'INFO')
249 return self.related
250
251
252class SharedDBContext(OSContextGenerator):
253 interfaces = ['shared-db']
254
255 def __init__(self,
256 database=None, user=None, relation_prefix=None, ssl_dir=None):
257 """Allows inspecting relation for settings prefixed with
258 relation_prefix. This is useful for parsing access for multiple
259 databases returned via the shared-db interface (eg, nova_password,
260 quantum_password)
261 """
262 self.relation_prefix = relation_prefix
263 self.database = database
264 self.user = user
265 self.ssl_dir = ssl_dir
266 self.rel_name = self.interfaces[0]
267
268 def __call__(self):
269 self.database = self.database or config('database')
270 self.user = self.user or config('database-user')
271 if None in [self.database, self.user]:
272 log("Could not generate shared_db context. Missing required charm "
273 "config options. (database name and user)", level=ERROR)
274 raise OSContextError
275
276 ctxt = {}
277
278 # NOTE(jamespage) if mysql charm provides a network upon which
279 # access to the database should be made, reconfigure relation
280 # with the service units local address and defer execution
281 access_network = relation_get('access-network')
282 if access_network is not None:
283 if self.relation_prefix is not None:
284 hostname_key = "{}_hostname".format(self.relation_prefix)
285 else:
286 hostname_key = "hostname"
287 access_hostname = get_address_in_network(access_network,
288 unit_get('private-address'))
289 set_hostname = relation_get(attribute=hostname_key,
290 unit=local_unit())
291 if set_hostname != access_hostname:
292 relation_set(relation_settings={hostname_key: access_hostname})
293 return None # Defer any further hook execution for now....
294
295 password_setting = 'password'
296 if self.relation_prefix:
297 password_setting = self.relation_prefix + '_password'
298
299 for rid in relation_ids(self.interfaces[0]):
300 self.related = True
301 for unit in related_units(rid):
302 rdata = relation_get(rid=rid, unit=unit)
303 host = rdata.get('db_host')
304 host = format_ipv6_addr(host) or host
305 ctxt = {
306 'database_host': host,
307 'database': self.database,
308 'database_user': self.user,
309 'database_password': rdata.get(password_setting),
310 'database_type': 'mysql'
311 }
312 if self.context_complete(ctxt):
313 db_ssl(rdata, ctxt, self.ssl_dir)
314 return ctxt
315 return {}
316
317
318class PostgresqlDBContext(OSContextGenerator):
319 interfaces = ['pgsql-db']
320
321 def __init__(self, database=None):
322 self.database = database
323
324 def __call__(self):
325 self.database = self.database or config('database')
326 if self.database is None:
327 log('Could not generate postgresql_db context. Missing required '
328 'charm config options. (database name)', level=ERROR)
329 raise OSContextError
330
331 ctxt = {}
332 for rid in relation_ids(self.interfaces[0]):
333 self.related = True
334 for unit in related_units(rid):
335 rel_host = relation_get('host', rid=rid, unit=unit)
336 rel_user = relation_get('user', rid=rid, unit=unit)
337 rel_passwd = relation_get('password', rid=rid, unit=unit)
338 ctxt = {'database_host': rel_host,
339 'database': self.database,
340 'database_user': rel_user,
341 'database_password': rel_passwd,
342 'database_type': 'postgresql'}
343 if self.context_complete(ctxt):
344 return ctxt
345
346 return {}
347
348
349def db_ssl(rdata, ctxt, ssl_dir):
350 if 'ssl_ca' in rdata and ssl_dir:
351 ca_path = os.path.join(ssl_dir, 'db-client.ca')
352 with open(ca_path, 'w') as fh:
353 fh.write(b64decode(rdata['ssl_ca']))
354
355 ctxt['database_ssl_ca'] = ca_path
356 elif 'ssl_ca' in rdata:
357 log("Charm not setup for ssl support but ssl ca found", level=INFO)
358 return ctxt
359
360 if 'ssl_cert' in rdata:
361 cert_path = os.path.join(
362 ssl_dir, 'db-client.cert')
363 if not os.path.exists(cert_path):
364 log("Waiting 1m for ssl client cert validity", level=INFO)
365 time.sleep(60)
366
367 with open(cert_path, 'w') as fh:
368 fh.write(b64decode(rdata['ssl_cert']))
369
370 ctxt['database_ssl_cert'] = cert_path
371 key_path = os.path.join(ssl_dir, 'db-client.key')
372 with open(key_path, 'w') as fh:
373 fh.write(b64decode(rdata['ssl_key']))
374
375 ctxt['database_ssl_key'] = key_path
376
377 return ctxt
378
379
380class IdentityServiceContext(OSContextGenerator):
381
382 def __init__(self, service=None, service_user=None, rel_name='identity-service'):
383 self.service = service
384 self.service_user = service_user
385 self.rel_name = rel_name
386 self.interfaces = [self.rel_name]
387
388 def __call__(self):
389 log('Generating template context for ' + self.rel_name, level=DEBUG)
390 ctxt = {}
391
392 if self.service and self.service_user:
393 # This is required for pki token signing if we don't want /tmp to
394 # be used.
395 cachedir = '/var/cache/%s' % (self.service)
396 if not os.path.isdir(cachedir):
397 log("Creating service cache dir %s" % (cachedir), level=DEBUG)
398 mkdir(path=cachedir, owner=self.service_user,
399 group=self.service_user, perms=0o700)
400
401 ctxt['signing_dir'] = cachedir
402
403 for rid in relation_ids(self.rel_name):
404 self.related = True
405 for unit in related_units(rid):
406 rdata = relation_get(rid=rid, unit=unit)
407 serv_host = rdata.get('service_host')
408 serv_host = format_ipv6_addr(serv_host) or serv_host
409 auth_host = rdata.get('auth_host')
410 auth_host = format_ipv6_addr(auth_host) or auth_host
411 svc_protocol = rdata.get('service_protocol') or 'http'
412 auth_protocol = rdata.get('auth_protocol') or 'http'
413 ctxt.update({'service_port': rdata.get('service_port'),
414 'service_host': serv_host,
415 'auth_host': auth_host,
416 'auth_port': rdata.get('auth_port'),
417 'admin_tenant_name': rdata.get('service_tenant'),
418 'admin_user': rdata.get('service_username'),
419 'admin_password': rdata.get('service_password'),
420 'service_protocol': svc_protocol,
421 'auth_protocol': auth_protocol})
422
423 if self.context_complete(ctxt):
424 # NOTE(jamespage) this is required for >= icehouse
425 # so a missing value just indicates keystone needs
426 # upgrading
427 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
428 return ctxt
429
430 return {}
431
432
433class AMQPContext(OSContextGenerator):
434
435 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
436 self.ssl_dir = ssl_dir
437 self.rel_name = rel_name
438 self.relation_prefix = relation_prefix
439 self.interfaces = [rel_name]
440
441 def __call__(self):
442 log('Generating template context for amqp', level=DEBUG)
443 conf = config()
444 if self.relation_prefix:
445 user_setting = '%s-rabbit-user' % (self.relation_prefix)
446 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
447 else:
448 user_setting = 'rabbit-user'
449 vhost_setting = 'rabbit-vhost'
450
451 try:
452 username = conf[user_setting]
453 vhost = conf[vhost_setting]
454 except KeyError as e:
455 log('Could not generate shared_db context. Missing required charm '
456 'config options: %s.' % e, level=ERROR)
457 raise OSContextError
458
459 ctxt = {}
460 for rid in relation_ids(self.rel_name):
461 ha_vip_only = False
462 self.related = True
463 for unit in related_units(rid):
464 if relation_get('clustered', rid=rid, unit=unit):
465 ctxt['clustered'] = True
466 vip = relation_get('vip', rid=rid, unit=unit)
467 vip = format_ipv6_addr(vip) or vip
468 ctxt['rabbitmq_host'] = vip
469 else:
470 host = relation_get('private-address', rid=rid, unit=unit)
471 host = format_ipv6_addr(host) or host
472 ctxt['rabbitmq_host'] = host
473
474 ctxt.update({
475 'rabbitmq_user': username,
476 'rabbitmq_password': relation_get('password', rid=rid,
477 unit=unit),
478 'rabbitmq_virtual_host': vhost,
479 })
480
481 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
482 if ssl_port:
483 ctxt['rabbit_ssl_port'] = ssl_port
484
485 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
486 if ssl_ca:
487 ctxt['rabbit_ssl_ca'] = ssl_ca
488
489 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
490 ctxt['rabbitmq_ha_queues'] = True
491
492 ha_vip_only = relation_get('ha-vip-only',
493 rid=rid, unit=unit) is not None
494
495 if self.context_complete(ctxt):
496 if 'rabbit_ssl_ca' in ctxt:
497 if not self.ssl_dir:
498 log("Charm not setup for ssl support but ssl ca "
499 "found", level=INFO)
500 break
501
502 ca_path = os.path.join(
503 self.ssl_dir, 'rabbit-client-ca.pem')
504 with open(ca_path, 'w') as fh:
505 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
506 ctxt['rabbit_ssl_ca'] = ca_path
507
508 # Sufficient information found = break out!
509 break
510
511 # Used for active/active rabbitmq >= grizzly
512 if (('clustered' not in ctxt or ha_vip_only) and
513 len(related_units(rid)) > 1):
514 rabbitmq_hosts = []
515 for unit in related_units(rid):
516 host = relation_get('private-address', rid=rid, unit=unit)
517 host = format_ipv6_addr(host) or host
518 rabbitmq_hosts.append(host)
519
520 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
521
522 oslo_messaging_flags = conf.get('oslo-messaging-flags', None)
523 if oslo_messaging_flags:
524 ctxt['oslo_messaging_flags'] = config_flags_parser(
525 oslo_messaging_flags)
526
527 if not self.complete:
528 return {}
529
530 return ctxt
531
532
533class CephContext(OSContextGenerator):
534 """Generates context for /etc/ceph/ceph.conf templates."""
535 interfaces = ['ceph']
536
537 def __call__(self):
538 if not relation_ids('ceph'):
539 return {}
540
541 log('Generating template context for ceph', level=DEBUG)
542 mon_hosts = []
543 ctxt = {
544 'use_syslog': str(config('use-syslog')).lower()
545 }
546 for rid in relation_ids('ceph'):
547 for unit in related_units(rid):
548 if not ctxt.get('auth'):
549 ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
550 if not ctxt.get('key'):
551 ctxt['key'] = relation_get('key', rid=rid, unit=unit)
552 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
553 unit=unit)
554 unit_priv_addr = relation_get('private-address', rid=rid,
555 unit=unit)
556 ceph_addr = ceph_pub_addr or unit_priv_addr
557 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
558 mon_hosts.append(ceph_addr)
559
560 ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
561
562 if not os.path.isdir('/etc/ceph'):
563 os.mkdir('/etc/ceph')
564
565 if not self.context_complete(ctxt):
566 return {}
567
568 ensure_packages(['ceph-common'])
569 return ctxt
570
571
572class HAProxyContext(OSContextGenerator):
573 """Provides half a context for the haproxy template, which describes
574 all peers to be included in the cluster. Each charm needs to include
575 its own context generator that describes the port mapping.
576 """
577 interfaces = ['cluster']
578
579 def __init__(self, singlenode_mode=False):
580 self.singlenode_mode = singlenode_mode
581
582 def __call__(self):
583 if not relation_ids('cluster') and not self.singlenode_mode:
584 return {}
585
586 if config('prefer-ipv6'):
587 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
588 else:
589 addr = get_host_ip(unit_get('private-address'))
590
591 l_unit = local_unit().replace('/', '-')
592 cluster_hosts = {}
593
594 # NOTE(jamespage): build out map of configured network endpoints
595 # and associated backends
596 for addr_type in ADDRESS_TYPES:
597 cfg_opt = 'os-{}-network'.format(addr_type)
598 laddr = get_address_in_network(config(cfg_opt))
599 if laddr:
600 netmask = get_netmask_for_address(laddr)
601 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
602 netmask),
603 'backends': {l_unit: laddr}}
604 for rid in relation_ids('cluster'):
605 for unit in related_units(rid):
606 _laddr = relation_get('{}-address'.format(addr_type),
607 rid=rid, unit=unit)
608 if _laddr:
609 _unit = unit.replace('/', '-')
610 cluster_hosts[laddr]['backends'][_unit] = _laddr
611
612 # NOTE(jamespage) add backend based on private address - this
613 # with either be the only backend or the fallback if no acls
614 # match in the frontend
615 cluster_hosts[addr] = {}
616 netmask = get_netmask_for_address(addr)
617 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
618 'backends': {l_unit: addr}}
619 for rid in relation_ids('cluster'):
620 for unit in related_units(rid):
621 _laddr = relation_get('private-address',
622 rid=rid, unit=unit)
623 if _laddr:
624 _unit = unit.replace('/', '-')
625 cluster_hosts[addr]['backends'][_unit] = _laddr
626
627 ctxt = {
628 'frontends': cluster_hosts,
629 'default_backend': addr
630 }
631
632 if config('haproxy-server-timeout'):
633 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
634
635 if config('haproxy-client-timeout'):
636 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
637
638 if config('haproxy-queue-timeout'):
639 ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
640
641 if config('haproxy-connect-timeout'):
642 ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
643
644 if config('prefer-ipv6'):
645 ctxt['ipv6'] = True
646 ctxt['local_host'] = 'ip6-localhost'
647 ctxt['haproxy_host'] = '::'
648 else:
649 ctxt['local_host'] = '127.0.0.1'
650 ctxt['haproxy_host'] = '0.0.0.0'
651
652 ctxt['stat_port'] = '8888'
653
654 db = kv()
655 ctxt['stat_password'] = db.get('stat-password')
656 if not ctxt['stat_password']:
657 ctxt['stat_password'] = db.set('stat-password',
658 pwgen(32))
659 db.flush()
660
661 for frontend in cluster_hosts:
662 if (len(cluster_hosts[frontend]['backends']) > 1 or
663 self.singlenode_mode):
664 # Enable haproxy when we have enough peers.
665 log('Ensuring haproxy enabled in /etc/default/haproxy.',
666 level=DEBUG)
667 with open('/etc/default/haproxy', 'w') as out:
668 out.write('ENABLED=1\n')
669
670 return ctxt
671
672 log('HAProxy context is incomplete, this unit has no peers.',
673 level=INFO)
674 return {}
675
676
677class ImageServiceContext(OSContextGenerator):
678 interfaces = ['image-service']
679
680 def __call__(self):
681 """Obtains the glance API server from the image-service relation.
682 Useful in nova and cinder (currently).
683 """
684 log('Generating template context for image-service.', level=DEBUG)
685 rids = relation_ids('image-service')
686 if not rids:
687 return {}
688
689 for rid in rids:
690 for unit in related_units(rid):
691 api_server = relation_get('glance-api-server',
692 rid=rid, unit=unit)
693 if api_server:
694 return {'glance_api_servers': api_server}
695
696 log("ImageService context is incomplete. Missing required relation "
697 "data.", level=INFO)
698 return {}
699
700
701class ApacheSSLContext(OSContextGenerator):
702 """Generates a context for an apache vhost configuration that configures
703 HTTPS reverse proxying for one or many endpoints. Generated context
704 looks something like::
705
706 {
707 'namespace': 'cinder',
708 'private_address': 'iscsi.mycinderhost.com',
709 'endpoints': [(8776, 8766), (8777, 8767)]
710 }
711
712 The endpoints list consists of a tuples mapping external ports
713 to internal ports.
714 """
715 interfaces = ['https']
716
717 # charms should inherit this context and set external ports
718 # and service namespace accordingly.
719 external_ports = []
720 service_namespace = None
721
722 def enable_modules(self):
723 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
724 check_call(cmd)
725
726 def configure_cert(self, cn=None):
727 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
728 mkdir(path=ssl_dir)
729 cert, key = get_cert(cn)
730 if cn:
731 cert_filename = 'cert_{}'.format(cn)
732 key_filename = 'key_{}'.format(cn)
733 else:
734 cert_filename = 'cert'
735 key_filename = 'key'
736
737 write_file(path=os.path.join(ssl_dir, cert_filename),
738 content=b64decode(cert))
739 write_file(path=os.path.join(ssl_dir, key_filename),
740 content=b64decode(key))
741
742 def configure_ca(self):
743 ca_cert = get_ca_cert()
744 if ca_cert:
745 install_ca_cert(b64decode(ca_cert))
746
747 def canonical_names(self):
748 """Figure out which canonical names clients will access this service.
749 """
750 cns = []
751 for r_id in relation_ids('identity-service'):
752 for unit in related_units(r_id):
753 rdata = relation_get(rid=r_id, unit=unit)
754 for k in rdata:
755 if k.startswith('ssl_key_'):
756 cns.append(k.lstrip('ssl_key_'))
757
758 return sorted(list(set(cns)))
759
760 def get_network_addresses(self):
761 """For each network configured, return corresponding address and vip
762 (if available).
763
764 Returns a list of tuples of the form:
765
766 [(address_in_net_a, vip_in_net_a),
767 (address_in_net_b, vip_in_net_b),
768 ...]
769
770 or, if no vip(s) available:
771
772 [(address_in_net_a, address_in_net_a),
773 (address_in_net_b, address_in_net_b),
774 ...]
775 """
776 addresses = []
777 if config('vip'):
778 vips = config('vip').split()
779 else:
780 vips = []
781
782 for net_type in ['os-internal-network', 'os-admin-network',
783 'os-public-network']:
784 addr = get_address_in_network(config(net_type),
785 unit_get('private-address'))
786 if len(vips) > 1 and is_clustered():
787 if not config(net_type):
788 log("Multiple networks configured but net_type "
789 "is None (%s)." % net_type, level=WARNING)
790 continue
791
792 for vip in vips:
793 if is_address_in_network(config(net_type), vip):
794 addresses.append((addr, vip))
795 break
796
797 elif is_clustered() and config('vip'):
798 addresses.append((addr, config('vip')))
799 else:
800 addresses.append((addr, addr))
801
802 return sorted(addresses)
803
804 def __call__(self):
805 if isinstance(self.external_ports, six.string_types):
806 self.external_ports = [self.external_ports]
807
808 if not self.external_ports or not https():
809 return {}
810
811 self.configure_ca()
812 self.enable_modules()
813
814 ctxt = {'namespace': self.service_namespace,
815 'endpoints': [],
816 'ext_ports': []}
817
818 cns = self.canonical_names()
819 if cns:
820 for cn in cns:
821 self.configure_cert(cn)
822 else:
823 # Expect cert/key provided in config (currently assumed that ca
824 # uses ip for cn)
825 cn = resolve_address(endpoint_type=INTERNAL)
826 self.configure_cert(cn)
827
828 addresses = self.get_network_addresses()
829 for address, endpoint in sorted(set(addresses)):
830 for api_port in self.external_ports:
831 ext_port = determine_apache_port(api_port,
832 singlenode_mode=True)
833 int_port = determine_api_port(api_port, singlenode_mode=True)
834 portmap = (address, endpoint, int(ext_port), int(int_port))
835 ctxt['endpoints'].append(portmap)
836 ctxt['ext_ports'].append(int(ext_port))
837
838 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
839 return ctxt
840
841
842class NeutronContext(OSContextGenerator):
843 interfaces = []
844
845 @property
846 def plugin(self):
847 return None
848
849 @property
850 def network_manager(self):
851 return None
852
853 @property
854 def packages(self):
855 return neutron_plugin_attribute(self.plugin, 'packages',
856 self.network_manager)
857
858 @property
859 def neutron_security_groups(self):
860 return None
861
862 def _ensure_packages(self):
863 for pkgs in self.packages:
864 ensure_packages(pkgs)
865
866 def _save_flag_file(self):
867 if self.network_manager == 'quantum':
868 _file = '/etc/nova/quantum_plugin.conf'
869 else:
870 _file = '/etc/nova/neutron_plugin.conf'
871
872 with open(_file, 'wb') as out:
873 out.write(self.plugin + '\n')
874
875 def ovs_ctxt(self):
876 driver = neutron_plugin_attribute(self.plugin, 'driver',
877 self.network_manager)
878 config = neutron_plugin_attribute(self.plugin, 'config',
879 self.network_manager)
880 ovs_ctxt = {'core_plugin': driver,
881 'neutron_plugin': 'ovs',
882 'neutron_security_groups': self.neutron_security_groups,
883 'local_ip': unit_private_ip(),
884 'config': config}
885
886 return ovs_ctxt
887
888 def nuage_ctxt(self):
889 driver = neutron_plugin_attribute(self.plugin, 'driver',
890 self.network_manager)
891 config = neutron_plugin_attribute(self.plugin, 'config',
892 self.network_manager)
893 nuage_ctxt = {'core_plugin': driver,
894 'neutron_plugin': 'vsp',
895 'neutron_security_groups': self.neutron_security_groups,
896 'local_ip': unit_private_ip(),
897 'config': config}
898
899 return nuage_ctxt
900
901 def nvp_ctxt(self):
902 driver = neutron_plugin_attribute(self.plugin, 'driver',
903 self.network_manager)
904 config = neutron_plugin_attribute(self.plugin, 'config',
905 self.network_manager)
906 nvp_ctxt = {'core_plugin': driver,
907 'neutron_plugin': 'nvp',
908 'neutron_security_groups': self.neutron_security_groups,
909 'local_ip': unit_private_ip(),
910 'config': config}
911
912 return nvp_ctxt
913
914 def n1kv_ctxt(self):
915 driver = neutron_plugin_attribute(self.plugin, 'driver',
916 self.network_manager)
917 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
918 self.network_manager)
919 n1kv_user_config_flags = config('n1kv-config-flags')
920 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
921 n1kv_ctxt = {'core_plugin': driver,
922 'neutron_plugin': 'n1kv',
923 'neutron_security_groups': self.neutron_security_groups,
924 'local_ip': unit_private_ip(),
925 'config': n1kv_config,
926 'vsm_ip': config('n1kv-vsm-ip'),
927 'vsm_username': config('n1kv-vsm-username'),
928 'vsm_password': config('n1kv-vsm-password'),
929 'restrict_policy_profiles': restrict_policy_profiles}
930
931 if n1kv_user_config_flags:
932 flags = config_flags_parser(n1kv_user_config_flags)
933 n1kv_ctxt['user_config_flags'] = flags
934
935 return n1kv_ctxt
936
937 def calico_ctxt(self):
938 driver = neutron_plugin_attribute(self.plugin, 'driver',
939 self.network_manager)
940 config = neutron_plugin_attribute(self.plugin, 'config',
941 self.network_manager)
942 calico_ctxt = {'core_plugin': driver,
943 'neutron_plugin': 'Calico',
944 'neutron_security_groups': self.neutron_security_groups,
945 'local_ip': unit_private_ip(),
946 'config': config}
947
948 return calico_ctxt
949
950 def neutron_ctxt(self):
951 if https():
952 proto = 'https'
953 else:
954 proto = 'http'
955
956 if is_clustered():
957 host = config('vip')
958 else:
959 host = unit_get('private-address')
960
961 ctxt = {'network_manager': self.network_manager,
962 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
963 return ctxt
964
965 def pg_ctxt(self):
966 driver = neutron_plugin_attribute(self.plugin, 'driver',
967 self.network_manager)
968 config = neutron_plugin_attribute(self.plugin, 'config',
969 self.network_manager)
970 ovs_ctxt = {'core_plugin': driver,
971 'neutron_plugin': 'plumgrid',
972 'neutron_security_groups': self.neutron_security_groups,
973 'local_ip': unit_private_ip(),
974 'config': config}
975 return ovs_ctxt
976
977 def midonet_ctxt(self):
978 driver = neutron_plugin_attribute(self.plugin, 'driver',
979 self.network_manager)
980 midonet_config = neutron_plugin_attribute(self.plugin, 'config',
981 self.network_manager)
982 mido_ctxt = {'core_plugin': driver,
983 'neutron_plugin': 'midonet',
984 'neutron_security_groups': self.neutron_security_groups,
985 'local_ip': unit_private_ip(),
986 'config': midonet_config}
987
988 return mido_ctxt
989
990 def __call__(self):
991 if self.network_manager not in ['quantum', 'neutron']:
992 return {}
993
994 if not self.plugin:
995 return {}
996
997 ctxt = self.neutron_ctxt()
998
999 if self.plugin == 'ovs':
1000 ctxt.update(self.ovs_ctxt())
1001 elif self.plugin in ['nvp', 'nsx']:
1002 ctxt.update(self.nvp_ctxt())
1003 elif self.plugin == 'n1kv':
1004 ctxt.update(self.n1kv_ctxt())
1005 elif self.plugin == 'Calico':
1006 ctxt.update(self.calico_ctxt())
1007 elif self.plugin == 'vsp':
1008 ctxt.update(self.nuage_ctxt())
1009 elif self.plugin == 'plumgrid':
1010 ctxt.update(self.pg_ctxt())
1011 elif self.plugin == 'midonet':
1012 ctxt.update(self.midonet_ctxt())
1013
1014 alchemy_flags = config('neutron-alchemy-flags')
1015 if alchemy_flags:
1016 flags = config_flags_parser(alchemy_flags)
1017 ctxt['neutron_alchemy_flags'] = flags
1018
1019 self._save_flag_file()
1020 return ctxt
1021
1022
1023class NeutronPortContext(OSContextGenerator):
1024
1025 def resolve_ports(self, ports):
1026 """Resolve NICs not yet bound to bridge(s)
1027
1028 If hwaddress provided then returns resolved hwaddress otherwise NIC.
1029 """
1030 if not ports:
1031 return None
1032
1033 hwaddr_to_nic = {}
1034 hwaddr_to_ip = {}
1035 for nic in list_nics():
1036 # Ignore virtual interfaces (bond masters will be identified from
1037 # their slaves)
1038 if not is_phy_iface(nic):
1039 continue
1040
1041 _nic = get_bond_master(nic)
1042 if _nic:
1043 log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
1044 level=DEBUG)
1045 nic = _nic
1046
1047 hwaddr = get_nic_hwaddr(nic)
1048 hwaddr_to_nic[hwaddr] = nic
1049 addresses = get_ipv4_addr(nic, fatal=False)
1050 addresses += get_ipv6_addr(iface=nic, fatal=False)
1051 hwaddr_to_ip[hwaddr] = addresses
1052
1053 resolved = []
1054 mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
1055 for entry in ports:
1056 if re.match(mac_regex, entry):
1057 # NIC is in known NICs and does NOT hace an IP address
1058 if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
1059 # If the nic is part of a bridge then don't use it
1060 if is_bridge_member(hwaddr_to_nic[entry]):
1061 continue
1062
1063 # Entry is a MAC address for a valid interface that doesn't
1064 # have an IP address assigned yet.
1065 resolved.append(hwaddr_to_nic[entry])
1066 else:
1067 # If the passed entry is not a MAC address, assume it's a valid
1068 # interface, and that the user put it there on purpose (we can
1069 # trust it to be the real external network).
1070 resolved.append(entry)
1071
1072 # Ensure no duplicates
1073 return list(set(resolved))
1074
1075
1076class OSConfigFlagContext(OSContextGenerator):
1077 """Provides support for user-defined config flags.
1078
1079 Users can define a comma-seperated list of key=value pairs
1080 in the charm configuration and apply them at any point in
1081 any file by using a template flag.
1082
1083 Sometimes users might want config flags inserted within a
1084 specific section so this class allows users to specify the
1085 template flag name, allowing for multiple template flags
1086 (sections) within the same context.
1087
1088 NOTE: the value of config-flags may be a comma-separated list of
1089 key=value pairs and some Openstack config files support
1090 comma-separated lists as values.
1091 """
1092
1093 def __init__(self, charm_flag='config-flags',
1094 template_flag='user_config_flags'):
1095 """
1096 :param charm_flag: config flags in charm configuration.
1097 :param template_flag: insert point for user-defined flags in template
1098 file.
1099 """
1100 super(OSConfigFlagContext, self).__init__()
1101 self._charm_flag = charm_flag
1102 self._template_flag = template_flag
1103
1104 def __call__(self):
1105 config_flags = config(self._charm_flag)
1106 if not config_flags:
1107 return {}
1108
1109 return {self._template_flag:
1110 config_flags_parser(config_flags)}
1111
1112
1113class LibvirtConfigFlagsContext(OSContextGenerator):
1114 """
1115 This context provides support for extending
1116 the libvirt section through user-defined flags.
1117 """
1118 def __call__(self):
1119 ctxt = {}
1120 libvirt_flags = config('libvirt-flags')
1121 if libvirt_flags:
1122 ctxt['libvirt_flags'] = config_flags_parser(
1123 libvirt_flags)
1124 return ctxt
1125
1126
1127class SubordinateConfigContext(OSContextGenerator):
1128
1129 """
1130 Responsible for inspecting relations to subordinates that
1131 may be exporting required config via a json blob.
1132
1133 The subordinate interface allows subordinates to export their
1134 configuration requirements to the principle for multiple config
1135 files and multiple serivces. Ie, a subordinate that has interfaces
1136 to both glance and nova may export to following yaml blob as json::
1137
1138 glance:
1139 /etc/glance/glance-api.conf:
1140 sections:
1141 DEFAULT:
1142 - [key1, value1]
1143 /etc/glance/glance-registry.conf:
1144 MYSECTION:
1145 - [key2, value2]
1146 nova:
1147 /etc/nova/nova.conf:
1148 sections:
1149 DEFAULT:
1150 - [key3, value3]
1151
1152
1153 It is then up to the principle charms to subscribe this context to
1154 the service+config file it is interestd in. Configuration data will
1155 be available in the template context, in glance's case, as::
1156
1157 ctxt = {
1158 ... other context ...
1159 'subordinate_configuration': {
1160 'DEFAULT': {
1161 'key1': 'value1',
1162 },
1163 'MYSECTION': {
1164 'key2': 'value2',
1165 },
1166 }
1167 }
1168 """
1169
1170 def __init__(self, service, config_file, interface):
1171 """
1172 :param service : Service name key to query in any subordinate
1173 data found
1174 :param config_file : Service's config file to query sections
1175 :param interface : Subordinate interface to inspect
1176 """
1177 self.config_file = config_file
1178 if isinstance(service, list):
1179 self.services = service
1180 else:
1181 self.services = [service]
1182 if isinstance(interface, list):
1183 self.interfaces = interface
1184 else:
1185 self.interfaces = [interface]
1186
1187 def __call__(self):
1188 ctxt = {'sections': {}}
1189 rids = []
1190 for interface in self.interfaces:
1191 rids.extend(relation_ids(interface))
1192 for rid in rids:
1193 for unit in related_units(rid):
1194 sub_config = relation_get('subordinate_configuration',
1195 rid=rid, unit=unit)
1196 if sub_config and sub_config != '':
1197 try:
1198 sub_config = json.loads(sub_config)
1199 except:
1200 log('Could not parse JSON from '
1201 'subordinate_configuration setting from %s'
1202 % rid, level=ERROR)
1203 continue
1204
1205 for service in self.services:
1206 if service not in sub_config:
1207 log('Found subordinate_configuration on %s but it '
1208 'contained nothing for %s service'
1209 % (rid, service), level=INFO)
1210 continue
1211
1212 sub_config = sub_config[service]
1213 if self.config_file not in sub_config:
1214 log('Found subordinate_configuration on %s but it '
1215 'contained nothing for %s'
1216 % (rid, self.config_file), level=INFO)
1217 continue
1218
1219 sub_config = sub_config[self.config_file]
1220 for k, v in six.iteritems(sub_config):
1221 if k == 'sections':
1222 for section, config_list in six.iteritems(v):
1223 log("adding section '%s'" % (section),
1224 level=DEBUG)
1225 if ctxt[k].get(section):
1226 ctxt[k][section].extend(config_list)
1227 else:
1228 ctxt[k][section] = config_list
1229 else:
1230 ctxt[k] = v
1231 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1232 return ctxt
1233
1234
1235class LogLevelContext(OSContextGenerator):
1236
1237 def __call__(self):
1238 ctxt = {}
1239 ctxt['debug'] = \
1240 False if config('debug') is None else config('debug')
1241 ctxt['verbose'] = \
1242 False if config('verbose') is None else config('verbose')
1243
1244 return ctxt
1245
1246
1247class SyslogContext(OSContextGenerator):
1248
1249 def __call__(self):
1250 ctxt = {'use_syslog': config('use-syslog')}
1251 return ctxt
1252
1253
1254class BindHostContext(OSContextGenerator):
1255
1256 def __call__(self):
1257 if config('prefer-ipv6'):
1258 return {'bind_host': '::'}
1259 else:
1260 return {'bind_host': '0.0.0.0'}
1261
1262
1263class WorkerConfigContext(OSContextGenerator):
1264
1265 @property
1266 def num_cpus(self):
1267 # NOTE: use cpu_count if present (16.04 support)
1268 if hasattr(psutil, 'cpu_count'):
1269 return psutil.cpu_count()
1270 else:
1271 return psutil.NUM_CPUS
1272
1273 def __call__(self):
1274 multiplier = config('worker-multiplier') or 0
1275 ctxt = {"workers": self.num_cpus * multiplier}
1276 return ctxt
1277
1278
1279class ZeroMQContext(OSContextGenerator):
1280 interfaces = ['zeromq-configuration']
1281
1282 def __call__(self):
1283 ctxt = {}
1284 if is_relation_made('zeromq-configuration', 'host'):
1285 for rid in relation_ids('zeromq-configuration'):
1286 for unit in related_units(rid):
1287 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1288 ctxt['zmq_host'] = relation_get('host', unit, rid)
1289 ctxt['zmq_redis_address'] = relation_get(
1290 'zmq_redis_address', unit, rid)
1291
1292 return ctxt
1293
1294
1295class NotificationDriverContext(OSContextGenerator):
1296
1297 def __init__(self, zmq_relation='zeromq-configuration',
1298 amqp_relation='amqp'):
1299 """
1300 :param zmq_relation: Name of Zeromq relation to check
1301 """
1302 self.zmq_relation = zmq_relation
1303 self.amqp_relation = amqp_relation
1304
1305 def __call__(self):
1306 ctxt = {'notifications': 'False'}
1307 if is_relation_made(self.amqp_relation):
1308 ctxt['notifications'] = "True"
1309
1310 return ctxt
1311
1312
1313class SysctlContext(OSContextGenerator):
1314 """This context check if the 'sysctl' option exists on configuration
1315 then creates a file with the loaded contents"""
1316 def __call__(self):
1317 sysctl_dict = config('sysctl')
1318 if sysctl_dict:
1319 sysctl_create(sysctl_dict,
1320 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1321 return {'sysctl': sysctl_dict}
1322
1323
1324class NeutronAPIContext(OSContextGenerator):
1325 '''
1326 Inspects current neutron-plugin-api relation for neutron settings. Return
1327 defaults if it is not present.
1328 '''
1329 interfaces = ['neutron-plugin-api']
1330
1331 def __call__(self):
1332 self.neutron_defaults = {
1333 'l2_population': {
1334 'rel_key': 'l2-population',
1335 'default': False,
1336 },
1337 'overlay_network_type': {
1338 'rel_key': 'overlay-network-type',
1339 'default': 'gre',
1340 },
1341 'neutron_security_groups': {
1342 'rel_key': 'neutron-security-groups',
1343 'default': False,
1344 },
1345 'network_device_mtu': {
1346 'rel_key': 'network-device-mtu',
1347 'default': None,
1348 },
1349 'enable_dvr': {
1350 'rel_key': 'enable-dvr',
1351 'default': False,
1352 },
1353 'enable_l3ha': {
1354 'rel_key': 'enable-l3ha',
1355 'default': False,
1356 },
1357 }
1358 ctxt = self.get_neutron_options({})
1359 for rid in relation_ids('neutron-plugin-api'):
1360 for unit in related_units(rid):
1361 rdata = relation_get(rid=rid, unit=unit)
1362 if 'l2-population' in rdata:
1363 ctxt.update(self.get_neutron_options(rdata))
1364
1365 return ctxt
1366
1367 def get_neutron_options(self, rdata):
1368 settings = {}
1369 for nkey in self.neutron_defaults.keys():
1370 defv = self.neutron_defaults[nkey]['default']
1371 rkey = self.neutron_defaults[nkey]['rel_key']
1372 if rkey in rdata.keys():
1373 if type(defv) is bool:
1374 settings[nkey] = bool_from_string(rdata[rkey])
1375 else:
1376 settings[nkey] = rdata[rkey]
1377 else:
1378 settings[nkey] = defv
1379 return settings
1380
1381
1382class ExternalPortContext(NeutronPortContext):
1383
1384 def __call__(self):
1385 ctxt = {}
1386 ports = config('ext-port')
1387 if ports:
1388 ports = [p.strip() for p in ports.split()]
1389 ports = self.resolve_ports(ports)
1390 if ports:
1391 ctxt = {"ext_port": ports[0]}
1392 napi_settings = NeutronAPIContext()()
1393 mtu = napi_settings.get('network_device_mtu')
1394 if mtu:
1395 ctxt['ext_port_mtu'] = mtu
1396
1397 return ctxt
1398
1399
1400class DataPortContext(NeutronPortContext):
1401
1402 def __call__(self):
1403 ports = config('data-port')
1404 if ports:
1405 # Map of {port/mac:bridge}
1406 portmap = parse_data_port_mappings(ports)
1407 ports = portmap.keys()
1408 # Resolve provided ports or mac addresses and filter out those
1409 # already attached to a bridge.
1410 resolved = self.resolve_ports(ports)
1411 # FIXME: is this necessary?
1412 normalized = {get_nic_hwaddr(port): port for port in resolved
1413 if port not in ports}
1414 normalized.update({port: port for port in resolved
1415 if port in ports})
1416 if resolved:
1417 return {normalized[port]: bridge for port, bridge in
1418 six.iteritems(portmap) if port in normalized.keys()}
1419
1420 return None
1421
1422
1423class PhyNICMTUContext(DataPortContext):
1424
1425 def __call__(self):
1426 ctxt = {}
1427 mappings = super(PhyNICMTUContext, self).__call__()
1428 if mappings and mappings.keys():
1429 ports = sorted(mappings.keys())
1430 napi_settings = NeutronAPIContext()()
1431 mtu = napi_settings.get('network_device_mtu')
1432 all_ports = set()
1433 # If any of ports is a vlan device, its underlying device must have
1434 # mtu applied first.
1435 for port in ports:
1436 for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
1437 lport = os.path.basename(lport)
1438 all_ports.add(lport.split('_')[1])
1439
1440 all_ports = list(all_ports)
1441 all_ports.extend(ports)
1442 if mtu:
1443 ctxt["devs"] = '\\n'.join(all_ports)
1444 ctxt['mtu'] = mtu
1445
1446 return ctxt
1447
1448
1449class NetworkServiceContext(OSContextGenerator):
1450
1451 def __init__(self, rel_name='quantum-network-service'):
1452 self.rel_name = rel_name
1453 self.interfaces = [rel_name]
1454
1455 def __call__(self):
1456 for rid in relation_ids(self.rel_name):
1457 for unit in related_units(rid):
1458 rdata = relation_get(rid=rid, unit=unit)
1459 ctxt = {
1460 'keystone_host': rdata.get('keystone_host'),
1461 'service_port': rdata.get('service_port'),
1462 'auth_port': rdata.get('auth_port'),
1463 'service_tenant': rdata.get('service_tenant'),
1464 'service_username': rdata.get('service_username'),
1465 'service_password': rdata.get('service_password'),
1466 'quantum_host': rdata.get('quantum_host'),
1467 'quantum_port': rdata.get('quantum_port'),
1468 'quantum_url': rdata.get('quantum_url'),
1469 'region': rdata.get('region'),
1470 'service_protocol':
1471 rdata.get('service_protocol') or 'http',
1472 'auth_protocol':
1473 rdata.get('auth_protocol') or 'http',
1474 }
1475 if self.context_complete(ctxt):
1476 return ctxt
1477 return {}
01478
=== added directory 'charmhelpers.new/contrib/openstack/files'
=== added file 'charmhelpers.new/contrib/openstack/files/check_haproxy.sh'
--- charmhelpers.new/contrib/openstack/files/check_haproxy.sh 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/files/check_haproxy.sh 2016-01-30 12:38:43 +0000
@@ -0,0 +1,34 @@
1#!/bin/bash
2#--------------------------------------------
3# This file is managed by Juju
4#--------------------------------------------
5#
6# Copyright 2009,2012 Canonical Ltd.
7# Author: Tom Haddon
8
9CRITICAL=0
10NOTACTIVE=''
11LOGFILE=/var/log/nagios/check_haproxy.log
12AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
13
14typeset -i N_INSTANCES=0
15for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
16do
17 N_INSTANCES=N_INSTANCES+1
18 output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
19 if [ $? != 0 ]; then
20 date >> $LOGFILE
21 echo $output >> $LOGFILE
22 /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
23 CRITICAL=1
24 NOTACTIVE="${NOTACTIVE} $appserver"
25 fi
26done
27
28if [ $CRITICAL = 1 ]; then
29 echo "CRITICAL:${NOTACTIVE}"
30 exit 2
31fi
32
33echo "OK: All haproxy instances ($N_INSTANCES) looking good"
34exit 0
035
=== added file 'charmhelpers.new/contrib/openstack/ip.py'
--- charmhelpers.new/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/ip.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,151 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17from charmhelpers.core.hookenv import (
18 config,
19 unit_get,
20 service_name,
21)
22from charmhelpers.contrib.network.ip import (
23 get_address_in_network,
24 is_address_in_network,
25 is_ipv6,
26 get_ipv6_addr,
27)
28from charmhelpers.contrib.hahelpers.cluster import is_clustered
29
30PUBLIC = 'public'
31INTERNAL = 'int'
32ADMIN = 'admin'
33
34ADDRESS_MAP = {
35 PUBLIC: {
36 'config': 'os-public-network',
37 'fallback': 'public-address',
38 'override': 'os-public-hostname',
39 },
40 INTERNAL: {
41 'config': 'os-internal-network',
42 'fallback': 'private-address',
43 'override': 'os-internal-hostname',
44 },
45 ADMIN: {
46 'config': 'os-admin-network',
47 'fallback': 'private-address',
48 'override': 'os-admin-hostname',
49 }
50}
51
52
53def canonical_url(configs, endpoint_type=PUBLIC):
54 """Returns the correct HTTP URL to this host given the state of HTTPS
55 configuration, hacluster and charm configuration.
56
57 :param configs: OSTemplateRenderer config templating object to inspect
58 for a complete https context.
59 :param endpoint_type: str endpoint type to resolve.
60 :param returns: str base URL for services on the current service unit.
61 """
62 scheme = _get_scheme(configs)
63
64 address = resolve_address(endpoint_type)
65 if is_ipv6(address):
66 address = "[{}]".format(address)
67
68 return '%s://%s' % (scheme, address)
69
70
71def _get_scheme(configs):
72 """Returns the scheme to use for the url (either http or https)
73 depending upon whether https is in the configs value.
74
75 :param configs: OSTemplateRenderer config templating object to inspect
76 for a complete https context.
77 :returns: either 'http' or 'https' depending on whether https is
78 configured within the configs context.
79 """
80 scheme = 'http'
81 if configs and 'https' in configs.complete_contexts():
82 scheme = 'https'
83 return scheme
84
85
86def _get_address_override(endpoint_type=PUBLIC):
87 """Returns any address overrides that the user has defined based on the
88 endpoint type.
89
90 Note: this function allows for the service name to be inserted into the
91 address if the user specifies {service_name}.somehost.org.
92
93 :param endpoint_type: the type of endpoint to retrieve the override
94 value for.
95 :returns: any endpoint address or hostname that the user has overridden
96 or None if an override is not present.
97 """
98 override_key = ADDRESS_MAP[endpoint_type]['override']
99 addr_override = config(override_key)
100 if not addr_override:
101 return None
102 else:
103 return addr_override.format(service_name=service_name())
104
105
106def resolve_address(endpoint_type=PUBLIC):
107 """Return unit address depending on net config.
108
109 If unit is clustered with vip(s) and has net splits defined, return vip on
110 correct network. If clustered with no nets defined, return primary vip.
111
112 If not clustered, return unit address ensuring address is on configured net
113 split if one is configured.
114
115 :param endpoint_type: Network endpoing type
116 """
117 resolved_address = _get_address_override(endpoint_type)
118 if resolved_address:
119 return resolved_address
120
121 vips = config('vip')
122 if vips:
123 vips = vips.split()
124
125 net_type = ADDRESS_MAP[endpoint_type]['config']
126 net_addr = config(net_type)
127 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
128 clustered = is_clustered()
129 if clustered:
130 if not net_addr:
131 # If no net-splits defined, we expect a single vip
132 resolved_address = vips[0]
133 else:
134 for vip in vips:
135 if is_address_in_network(net_addr, vip):
136 resolved_address = vip
137 break
138 else:
139 if config('prefer-ipv6'):
140 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
141 else:
142 fallback_addr = unit_get(net_fallback)
143
144 resolved_address = get_address_in_network(net_addr, fallback_addr)
145
146 if resolved_address is None:
147 raise ValueError("Unable to resolve a suitable IP address based on "
148 "charm state and configuration. (net_type=%s, "
149 "clustered=%s)" % (net_type, clustered))
150
151 return resolved_address
0152
=== added file 'charmhelpers.new/contrib/openstack/neutron.py'
--- charmhelpers.new/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
+++ charmhelpers.new/contrib/openstack/neutron.py 2016-01-30 12:38:43 +0000
@@ -0,0 +1,378 @@
1# Copyright 2014-2015 Canonical Limited.
2#
3# This file is part of charm-helpers.
4#
5# charm-helpers is free software: you can redistribute it and/or modify
6# it under the terms of the GNU Lesser General Public License version 3 as
7# published by the Free Software Foundation.
8#
9# charm-helpers is distributed in the hope that it will be useful,
10# but WITHOUT ANY WARRANTY; without even the implied warranty of
11# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12# GNU Lesser General Public License for more details.
13#
14# You should have received a copy of the GNU Lesser General Public License
15# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
16
17# Various utilies for dealing with Neutron and the renaming from Quantum.
18
19import six
20from subprocess import check_output
21
22from charmhelpers.core.hookenv import (
23 config,
24 log,
25 ERROR,
26)
27
28from charmhelpers.contrib.openstack.utils import os_release
29
30
31def headers_package():
32 """Ensures correct linux-headers for running kernel are installed,
33 for building DKMS package"""
34 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
35 return 'linux-headers-%s' % kver
36
37QUANTUM_CONF_DIR = '/etc/quantum'
38
39
40def kernel_version():
41 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
42 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
43 kver = kver.split('.')
44 return (int(kver[0]), int(kver[1]))
45
46
47def determine_dkms_package():
48 """ Determine which DKMS package should be used based on kernel version """
49 # NOTE: 3.13 kernels have support for GRE and VXLAN native
50 if kernel_version() >= (3, 13):
51 return []
52 else:
53 return [headers_package(), 'openvswitch-datapath-dkms']
54
55
56# legacy
57
58
59def quantum_plugins():
60 from charmhelpers.contrib.openstack import context
61 return {
62 'ovs': {
63 'config': '/etc/quantum/plugins/openvswitch/'
64 'ovs_quantum_plugin.ini',
65 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
66 'OVSQuantumPluginV2',
67 'contexts': [
68 context.SharedDBContext(user=config('neutron-database-user'),
69 database=config('neutron-database'),
70 relation_prefix='neutron',
71 ssl_dir=QUANTUM_CONF_DIR)],
72 'services': ['quantum-plugin-openvswitch-agent'],
73 'packages': [determine_dkms_package(),
74 ['quantum-plugin-openvswitch-agent']],
75 'server_packages': ['quantum-server',
76 'quantum-plugin-openvswitch'],
77 'server_services': ['quantum-server']
78 },
79 'nvp': {
80 'config': '/etc/quantum/plugins/nicira/nvp.ini',
81 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
82 'QuantumPlugin.NvpPluginV2',
83 'contexts': [
84 context.SharedDBContext(user=config('neutron-database-user'),
85 database=config('neutron-database'),
86 relation_prefix='neutron',
87 ssl_dir=QUANTUM_CONF_DIR)],
88 'services': [],
89 'packages': [],
90 'server_packages': ['quantum-server',
91 'quantum-plugin-nicira'],
92 'server_services': ['quantum-server']
93 }
94 }
95
96NEUTRON_CONF_DIR = '/etc/neutron'
97
98
99def neutron_plugins():
100 from charmhelpers.contrib.openstack import context
101 release = os_release('nova-common')
102 plugins = {
103 'ovs': {
104 'config': '/etc/neutron/plugins/openvswitch/'
105 'ovs_neutron_plugin.ini',
106 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
107 'OVSNeutronPluginV2',
108 'contexts': [
109 context.SharedDBContext(user=config('neutron-database-user'),
110 database=config('neutron-database'),
111 relation_prefix='neutron',
112 ssl_dir=NEUTRON_CONF_DIR)],
113 'services': ['neutron-plugin-openvswitch-agent'],
114 'packages': [determine_dkms_package(),
115 ['neutron-plugin-openvswitch-agent']],
116 'server_packages': ['neutron-server',
117 'neutron-plugin-openvswitch'],
118 'server_services': ['neutron-server']
119 },
120 'nvp': {
121 'config': '/etc/neutron/plugins/nicira/nvp.ini',
122 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
123 'NeutronPlugin.NvpPluginV2',
124 'contexts': [
125 context.SharedDBContext(user=config('neutron-database-user'),
126 database=config('neutron-database'),
127 relation_prefix='neutron',
128 ssl_dir=NEUTRON_CONF_DIR)],
129 'services': [],
130 'packages': [],
131 'server_packages': ['neutron-server',
132 'neutron-plugin-nicira'],
133 'server_services': ['neutron-server']
134 },
135 'nsx': {
136 'config': '/etc/neutron/plugins/vmware/nsx.ini',
137 'driver': 'vmware',
138 'contexts': [
139 context.SharedDBContext(user=config('neutron-database-user'),
140 database=config('neutron-database'),
141 relation_prefix='neutron',
142 ssl_dir=NEUTRON_CONF_DIR)],
143 'services': [],
144 'packages': [],
145 'server_packages': ['neutron-server',
146 'neutron-plugin-vmware'],
147 'server_services': ['neutron-server']
148 },
149 'n1kv': {
150 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
151 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
152 'contexts': [
153 context.SharedDBContext(user=config('neutron-database-user'),
154 database=config('neutron-database'),
155 relation_prefix='neutron',
156 ssl_dir=NEUTRON_CONF_DIR)],
157 'services': [],
158 'packages': [determine_dkms_package(),
159 ['neutron-plugin-cisco']],
160 'server_packages': ['neutron-server',
161 'neutron-plugin-cisco'],
162 'server_services': ['neutron-server']
163 },
164 'Calico': {
165 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
166 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
167 'contexts': [
168 context.SharedDBContext(user=config('neutron-database-user'),
169 database=config('neutron-database'),
170 relation_prefix='neutron',
171 ssl_dir=NEUTRON_CONF_DIR)],
172 'services': ['calico-felix',
173 'bird',
174 'neutron-dhcp-agent',
175 'nova-api-metadata',
176 'etcd'],
177 'packages': [determine_dkms_package(),
178 ['calico-compute',
179 'bird',
180 'neutron-dhcp-agent',
181 'nova-api-metadata',
182 'etcd']],
183 'server_packages': ['neutron-server', 'calico-control', 'etcd'],
184 'server_services': ['neutron-server', 'etcd']
185 },
186 'vsp': {
187 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
188 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
189 'contexts': [
190 context.SharedDBContext(user=config('neutron-database-user'),
191 database=config('neutron-database'),
192 relation_prefix='neutron',
193 ssl_dir=NEUTRON_CONF_DIR)],
194 'services': [],
195 'packages': [],
196 'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
197 'server_services': ['neutron-server']
198 },
199 'plumgrid': {
200 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
201 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
202 'contexts': [
203 context.SharedDBContext(user=config('database-user'),
204 database=config('database'),
205 ssl_dir=NEUTRON_CONF_DIR)],
206 'services': [],
207 'packages': ['plumgrid-lxc',
208 'iovisor-dkms'],
209 'server_packages': ['neutron-server',
210 'neutron-plugin-plumgrid'],
211 'server_services': ['neutron-server']
212 },
213 'midonet': {
214 'config': '/etc/neutron/plugins/midonet/midonet.ini',
215 'driver': 'midonet.neutron.plugin.MidonetPluginV2',
216 'contexts': [
217 context.SharedDBContext(user=config('neutron-database-user'),
218 database=config('neutron-database'),
219 relation_prefix='neutron',
220 ssl_dir=NEUTRON_CONF_DIR)],
221 'services': [],
222 'packages': [determine_dkms_package()],
223 'server_packages': ['neutron-server',
224 'python-neutron-plugin-midonet'],
225 'server_services': ['neutron-server']
226 }
227 }
228 if release >= 'icehouse':
229 # NOTE: patch in ml2 plugin for icehouse onwards
230 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
231 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
232 plugins['ovs']['server_packages'] = ['neutron-server',
233 'neutron-plugin-ml2']
234 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches