Merge lp:~james-page/charm-helpers/vpp-rebase into lp:~gnuoy/charm-helpers/cisco-vpp
- vpp-rebase
- Merge into cisco-vpp
Proposed by
James Page
Status: | Merged |
---|---|
Merged at revision: | 399 |
Proposed branch: | lp:~james-page/charm-helpers/vpp-rebase |
Merge into: | lp:~gnuoy/charm-helpers/cisco-vpp |
Diff against target: |
5730 lines (+3512/-430) 67 files modified
VERSION (+1/-1) charmhelpers/cli/__init__.py (+32/-5) charmhelpers/cli/commands.py (+5/-4) charmhelpers/cli/hookenv.py (+23/-0) charmhelpers/contrib/amulet/utils.py (+239/-9) charmhelpers/contrib/benchmark/__init__.py (+3/-1) charmhelpers/contrib/database/mysql.py (+3/-0) charmhelpers/contrib/network/ufw.py (+46/-3) charmhelpers/contrib/openstack/amulet/deployment.py (+38/-4) charmhelpers/contrib/openstack/amulet/utils.py (+361/-51) charmhelpers/contrib/openstack/context.py (+47/-34) charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6) charmhelpers/contrib/openstack/templating.py (+2/-2) charmhelpers/contrib/openstack/utils.py (+77/-23) charmhelpers/contrib/peerstorage/__init__.py (+5/-4) charmhelpers/contrib/python/packages.py (+2/-0) charmhelpers/contrib/storage/linux/ceph.py (+6/-6) charmhelpers/contrib/storage/linux/utils.py (+4/-3) charmhelpers/contrib/unison/__init__.py (+23/-8) charmhelpers/coordinator.py (+607/-0) charmhelpers/core/files.py (+45/-0) charmhelpers/core/hookenv.py (+192/-40) charmhelpers/core/host.py (+31/-5) charmhelpers/core/services/base.py (+12/-9) charmhelpers/core/services/helpers.py (+1/-2) charmhelpers/core/unitdata.py (+61/-17) charmhelpers/fetch/__init__.py (+31/-14) charmhelpers/fetch/archiveurl.py (+7/-1) charmhelpers/fetch/giturl.py (+1/-1) docs/_extensions/automembersummary.py (+86/-0) docs/api/charmhelpers.coordinator.rst (+10/-0) docs/api/charmhelpers.core.decorators.rst (+7/-0) docs/api/charmhelpers.core.fstab.rst (+7/-0) docs/api/charmhelpers.core.hookenv.rst (+12/-0) docs/api/charmhelpers.core.host.rst (+12/-0) docs/api/charmhelpers.core.rst (+11/-38) docs/api/charmhelpers.core.services.base.rst (+12/-0) docs/api/charmhelpers.core.services.helpers.rst (+12/-0) docs/api/charmhelpers.core.services.rst (+12/-0) docs/api/charmhelpers.core.strutils.rst (+7/-0) docs/api/charmhelpers.core.sysctl.rst (+7/-0) docs/api/charmhelpers.core.templating.rst (+7/-0) docs/api/charmhelpers.core.unitdata.rst (+7/-0) docs/api/charmhelpers.rst (+4/-2) docs/api/modules.rst (+0/-7) docs/conf.py (+4/-1) setup.py (+22/-1) test_requirements.txt (+3/-1) tests/cli/test_cmdline.py (+56/-9) tests/contrib/amulet/test_utils.py (+105/-0) tests/contrib/benchmark/test_benchmark.py (+17/-13) tests/contrib/hahelpers/test_apache_utils.py (+1/-1) tests/contrib/network/test_ufw.py (+72/-0) tests/contrib/openstack/test_openstack_utils.py (+34/-10) tests/contrib/openstack/test_os_contexts.py (+60/-1) tests/contrib/peerstorage/test_peerstorage.py (+7/-7) tests/contrib/python/test_debug.py (+1/-1) tests/contrib/storage/test_linux_ceph.py (+11/-11) tests/contrib/storage/test_linux_storage_utils.py (+11/-2) tests/contrib/unison/test_unison.py (+58/-1) tests/coordinator/test_coordinator.py (+535/-0) tests/core/test_files.py (+32/-0) tests/core/test_hookenv.py (+232/-31) tests/core/test_host.py (+33/-1) tests/core/test_services.py (+13/-7) tests/fetch/test_archiveurl.py (+21/-3) tests/fetch/test_fetch.py (+60/-29) |
To merge this branch: | bzr merge lp:~james-page/charm-helpers/vpp-rebase |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young | Approve | ||
Review via email: mp+267916@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'VERSION' | |||
2 | --- VERSION 2015-05-20 14:31:33 +0000 | |||
3 | +++ VERSION 2015-08-13 08:33:21 +0000 | |||
4 | @@ -1,1 +1,1 @@ | |||
6 | 1 | 0.3.2 | 1 | 0.5.0 |
7 | 2 | 2 | ||
8 | === modified file 'charmhelpers/cli/__init__.py' | |||
9 | --- charmhelpers/cli/__init__.py 2015-01-22 06:06:03 +0000 | |||
10 | +++ charmhelpers/cli/__init__.py 2015-08-13 08:33:21 +0000 | |||
11 | @@ -20,6 +20,8 @@ | |||
12 | 20 | 20 | ||
13 | 21 | from six.moves import zip | 21 | from six.moves import zip |
14 | 22 | 22 | ||
15 | 23 | from charmhelpers.core import unitdata | ||
16 | 24 | |||
17 | 23 | 25 | ||
18 | 24 | class OutputFormatter(object): | 26 | class OutputFormatter(object): |
19 | 25 | def __init__(self, outfile=sys.stdout): | 27 | def __init__(self, outfile=sys.stdout): |
20 | @@ -53,6 +55,8 @@ | |||
21 | 53 | 55 | ||
22 | 54 | def raw(self, output): | 56 | def raw(self, output): |
23 | 55 | """Output data as raw string (default)""" | 57 | """Output data as raw string (default)""" |
24 | 58 | if isinstance(output, (list, tuple)): | ||
25 | 59 | output = '\n'.join(map(str, output)) | ||
26 | 56 | self.outfile.write(str(output)) | 60 | self.outfile.write(str(output)) |
27 | 57 | 61 | ||
28 | 58 | def py(self, output): | 62 | def py(self, output): |
29 | @@ -91,6 +95,7 @@ | |||
30 | 91 | argument_parser = None | 95 | argument_parser = None |
31 | 92 | subparsers = None | 96 | subparsers = None |
32 | 93 | formatter = None | 97 | formatter = None |
33 | 98 | exit_code = 0 | ||
34 | 94 | 99 | ||
35 | 95 | def __init__(self): | 100 | def __init__(self): |
36 | 96 | if not self.argument_parser: | 101 | if not self.argument_parser: |
37 | @@ -115,6 +120,21 @@ | |||
38 | 115 | return decorated | 120 | return decorated |
39 | 116 | return wrapper | 121 | return wrapper |
40 | 117 | 122 | ||
41 | 123 | def test_command(self, decorated): | ||
42 | 124 | """ | ||
43 | 125 | Subcommand is a boolean test function, so bool return values should be | ||
44 | 126 | converted to a 0/1 exit code. | ||
45 | 127 | """ | ||
46 | 128 | decorated._cli_test_command = True | ||
47 | 129 | return decorated | ||
48 | 130 | |||
49 | 131 | def no_output(self, decorated): | ||
50 | 132 | """ | ||
51 | 133 | Subcommand is not expected to return a value, so don't print a spurious None. | ||
52 | 134 | """ | ||
53 | 135 | decorated._cli_no_output = True | ||
54 | 136 | return decorated | ||
55 | 137 | |||
56 | 118 | def subcommand_builder(self, command_name, description=None): | 138 | def subcommand_builder(self, command_name, description=None): |
57 | 119 | """ | 139 | """ |
58 | 120 | Decorate a function that builds a subcommand. Builders should accept a | 140 | Decorate a function that builds a subcommand. Builders should accept a |
59 | @@ -132,12 +152,19 @@ | |||
60 | 132 | arguments = self.argument_parser.parse_args() | 152 | arguments = self.argument_parser.parse_args() |
61 | 133 | argspec = inspect.getargspec(arguments.func) | 153 | argspec = inspect.getargspec(arguments.func) |
62 | 134 | vargs = [] | 154 | vargs = [] |
64 | 135 | kwargs = {} | 155 | for arg in argspec.args: |
65 | 156 | vargs.append(getattr(arguments, arg)) | ||
66 | 136 | if argspec.varargs: | 157 | if argspec.varargs: |
71 | 137 | vargs = getattr(arguments, argspec.varargs) | 158 | vargs.extend(getattr(arguments, argspec.varargs)) |
72 | 138 | for arg in argspec.args: | 159 | output = arguments.func(*vargs) |
73 | 139 | kwargs[arg] = getattr(arguments, arg) | 160 | if getattr(arguments.func, '_cli_test_command', False): |
74 | 140 | self.formatter.format_output(arguments.func(*vargs, **kwargs), arguments.format) | 161 | self.exit_code = 0 if output else 1 |
75 | 162 | output = '' | ||
76 | 163 | if getattr(arguments.func, '_cli_no_output', False): | ||
77 | 164 | output = '' | ||
78 | 165 | self.formatter.format_output(output, arguments.format) | ||
79 | 166 | if unitdata._KV: | ||
80 | 167 | unitdata._KV.flush() | ||
81 | 141 | 168 | ||
82 | 142 | 169 | ||
83 | 143 | cmdline = CommandLine() | 170 | cmdline = CommandLine() |
84 | 144 | 171 | ||
85 | === modified file 'charmhelpers/cli/commands.py' | |||
86 | --- charmhelpers/cli/commands.py 2015-05-13 20:44:19 +0000 | |||
87 | +++ charmhelpers/cli/commands.py 2015-08-13 08:33:21 +0000 | |||
88 | @@ -24,8 +24,9 @@ | |||
89 | 24 | from . import CommandLine # noqa | 24 | from . import CommandLine # noqa |
90 | 25 | 25 | ||
91 | 26 | """ | 26 | """ |
93 | 27 | Import the sub-modules to be included by chlp. | 27 | Import the sub-modules which have decorated subcommands to register with chlp. |
94 | 28 | """ | 28 | """ |
98 | 29 | import host # noqa | 29 | from . import host # noqa |
99 | 30 | import benchmark # noqa | 30 | from . import benchmark # noqa |
100 | 31 | import unitdata # noqa | 31 | from . import unitdata # noqa |
101 | 32 | from . import hookenv # noqa | ||
102 | 32 | 33 | ||
103 | === added file 'charmhelpers/cli/hookenv.py' | |||
104 | --- charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000 | |||
105 | +++ charmhelpers/cli/hookenv.py 2015-08-13 08:33:21 +0000 | |||
106 | @@ -0,0 +1,23 @@ | |||
107 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
108 | 2 | # | ||
109 | 3 | # This file is part of charm-helpers. | ||
110 | 4 | # | ||
111 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
112 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
113 | 7 | # published by the Free Software Foundation. | ||
114 | 8 | # | ||
115 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
116 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
117 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
118 | 12 | # GNU Lesser General Public License for more details. | ||
119 | 13 | # | ||
120 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
121 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
122 | 16 | |||
123 | 17 | from . import cmdline | ||
124 | 18 | from charmhelpers.core import hookenv | ||
125 | 19 | |||
126 | 20 | |||
127 | 21 | cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) | ||
128 | 22 | cmdline.subcommand('service-name')(hookenv.service_name) | ||
129 | 23 | cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) | ||
130 | 0 | 24 | ||
131 | === modified file 'charmhelpers/contrib/amulet/utils.py' | |||
132 | --- charmhelpers/contrib/amulet/utils.py 2015-04-21 15:40:51 +0000 | |||
133 | +++ charmhelpers/contrib/amulet/utils.py 2015-08-13 08:33:21 +0000 | |||
134 | @@ -14,14 +14,21 @@ | |||
135 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
136 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
137 | 16 | 16 | ||
138 | 17 | import ConfigParser | ||
139 | 18 | import io | 17 | import io |
140 | 19 | import logging | 18 | import logging |
141 | 19 | import os | ||
142 | 20 | import re | 20 | import re |
143 | 21 | import sys | 21 | import sys |
144 | 22 | import time | 22 | import time |
145 | 23 | 23 | ||
146 | 24 | import amulet | ||
147 | 25 | import distro_info | ||
148 | 24 | import six | 26 | import six |
149 | 27 | from six.moves import configparser | ||
150 | 28 | if six.PY3: | ||
151 | 29 | from urllib import parse as urlparse | ||
152 | 30 | else: | ||
153 | 31 | import urlparse | ||
154 | 25 | 32 | ||
155 | 26 | 33 | ||
156 | 27 | class AmuletUtils(object): | 34 | class AmuletUtils(object): |
157 | @@ -33,6 +40,7 @@ | |||
158 | 33 | 40 | ||
159 | 34 | def __init__(self, log_level=logging.ERROR): | 41 | def __init__(self, log_level=logging.ERROR): |
160 | 35 | self.log = self.get_logger(level=log_level) | 42 | self.log = self.get_logger(level=log_level) |
161 | 43 | self.ubuntu_releases = self.get_ubuntu_releases() | ||
162 | 36 | 44 | ||
163 | 37 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | 45 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): |
164 | 38 | """Get a logger object that will log to stdout.""" | 46 | """Get a logger object that will log to stdout.""" |
165 | @@ -70,12 +78,44 @@ | |||
166 | 70 | else: | 78 | else: |
167 | 71 | return False | 79 | return False |
168 | 72 | 80 | ||
169 | 81 | def get_ubuntu_release_from_sentry(self, sentry_unit): | ||
170 | 82 | """Get Ubuntu release codename from sentry unit. | ||
171 | 83 | |||
172 | 84 | :param sentry_unit: amulet sentry/service unit pointer | ||
173 | 85 | :returns: list of strings - release codename, failure message | ||
174 | 86 | """ | ||
175 | 87 | msg = None | ||
176 | 88 | cmd = 'lsb_release -cs' | ||
177 | 89 | release, code = sentry_unit.run(cmd) | ||
178 | 90 | if code == 0: | ||
179 | 91 | self.log.debug('{} lsb_release: {}'.format( | ||
180 | 92 | sentry_unit.info['unit_name'], release)) | ||
181 | 93 | else: | ||
182 | 94 | msg = ('{} `{}` returned {} ' | ||
183 | 95 | '{}'.format(sentry_unit.info['unit_name'], | ||
184 | 96 | cmd, release, code)) | ||
185 | 97 | if release not in self.ubuntu_releases: | ||
186 | 98 | msg = ("Release ({}) not found in Ubuntu releases " | ||
187 | 99 | "({})".format(release, self.ubuntu_releases)) | ||
188 | 100 | return release, msg | ||
189 | 101 | |||
190 | 73 | def validate_services(self, commands): | 102 | def validate_services(self, commands): |
194 | 74 | """Validate services. | 103 | """Validate that lists of commands succeed on service units. Can be |
195 | 75 | 104 | used to verify system services are running on the corresponding | |
193 | 76 | Verify the specified services are running on the corresponding | ||
196 | 77 | service units. | 105 | service units. |
198 | 78 | """ | 106 | |
199 | 107 | :param commands: dict with sentry keys and arbitrary command list vals | ||
200 | 108 | :returns: None if successful, Failure string message otherwise | ||
201 | 109 | """ | ||
202 | 110 | self.log.debug('Checking status of system services...') | ||
203 | 111 | |||
204 | 112 | # /!\ DEPRECATION WARNING (beisner): | ||
205 | 113 | # New and existing tests should be rewritten to use | ||
206 | 114 | # validate_services_by_name() as it is aware of init systems. | ||
207 | 115 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
208 | 116 | 'validate_services_by_name instead of validate_services ' | ||
209 | 117 | 'due to init system differences.') | ||
210 | 118 | |||
211 | 79 | for k, v in six.iteritems(commands): | 119 | for k, v in six.iteritems(commands): |
212 | 80 | for cmd in v: | 120 | for cmd in v: |
213 | 81 | output, code = k.run(cmd) | 121 | output, code = k.run(cmd) |
214 | @@ -86,6 +126,45 @@ | |||
215 | 86 | return "command `{}` returned {}".format(cmd, str(code)) | 126 | return "command `{}` returned {}".format(cmd, str(code)) |
216 | 87 | return None | 127 | return None |
217 | 88 | 128 | ||
218 | 129 | def validate_services_by_name(self, sentry_services): | ||
219 | 130 | """Validate system service status by service name, automatically | ||
220 | 131 | detecting init system based on Ubuntu release codename. | ||
221 | 132 | |||
222 | 133 | :param sentry_services: dict with sentry keys and svc list values | ||
223 | 134 | :returns: None if successful, Failure string message otherwise | ||
224 | 135 | """ | ||
225 | 136 | self.log.debug('Checking status of system services...') | ||
226 | 137 | |||
227 | 138 | # Point at which systemd became a thing | ||
228 | 139 | systemd_switch = self.ubuntu_releases.index('vivid') | ||
229 | 140 | |||
230 | 141 | for sentry_unit, services_list in six.iteritems(sentry_services): | ||
231 | 142 | # Get lsb_release codename from unit | ||
232 | 143 | release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) | ||
233 | 144 | if ret: | ||
234 | 145 | return ret | ||
235 | 146 | |||
236 | 147 | for service_name in services_list: | ||
237 | 148 | if (self.ubuntu_releases.index(release) >= systemd_switch or | ||
238 | 149 | service_name in ['rabbitmq-server', 'apache2']): | ||
239 | 150 | # init is systemd (or regular sysv) | ||
240 | 151 | cmd = 'sudo service {} status'.format(service_name) | ||
241 | 152 | output, code = sentry_unit.run(cmd) | ||
242 | 153 | service_running = code == 0 | ||
243 | 154 | elif self.ubuntu_releases.index(release) < systemd_switch: | ||
244 | 155 | # init is upstart | ||
245 | 156 | cmd = 'sudo status {}'.format(service_name) | ||
246 | 157 | output, code = sentry_unit.run(cmd) | ||
247 | 158 | service_running = code == 0 and "start/running" in output | ||
248 | 159 | |||
249 | 160 | self.log.debug('{} `{}` returned ' | ||
250 | 161 | '{}'.format(sentry_unit.info['unit_name'], | ||
251 | 162 | cmd, code)) | ||
252 | 163 | if not service_running: | ||
253 | 164 | return u"command `{}` returned {} {}".format( | ||
254 | 165 | cmd, output, str(code)) | ||
255 | 166 | return None | ||
256 | 167 | |||
257 | 89 | def _get_config(self, unit, filename): | 168 | def _get_config(self, unit, filename): |
258 | 90 | """Get a ConfigParser object for parsing a unit's config file.""" | 169 | """Get a ConfigParser object for parsing a unit's config file.""" |
259 | 91 | file_contents = unit.file_contents(filename) | 170 | file_contents = unit.file_contents(filename) |
260 | @@ -93,7 +172,7 @@ | |||
261 | 93 | # NOTE(beisner): by default, ConfigParser does not handle options | 172 | # NOTE(beisner): by default, ConfigParser does not handle options |
262 | 94 | # with no value, such as the flags used in the mysql my.cnf file. | 173 | # with no value, such as the flags used in the mysql my.cnf file. |
263 | 95 | # https://bugs.python.org/issue7005 | 174 | # https://bugs.python.org/issue7005 |
265 | 96 | config = ConfigParser.ConfigParser(allow_no_value=True) | 175 | config = configparser.ConfigParser(allow_no_value=True) |
266 | 97 | config.readfp(io.StringIO(file_contents)) | 176 | config.readfp(io.StringIO(file_contents)) |
267 | 98 | return config | 177 | return config |
268 | 99 | 178 | ||
269 | @@ -103,7 +182,15 @@ | |||
270 | 103 | 182 | ||
271 | 104 | Verify that the specified section of the config file contains | 183 | Verify that the specified section of the config file contains |
272 | 105 | the expected option key:value pairs. | 184 | the expected option key:value pairs. |
273 | 185 | |||
274 | 186 | Compare expected dictionary data vs actual dictionary data. | ||
275 | 187 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
276 | 188 | longs, or can be a function that evaluates a variable and returns a | ||
277 | 189 | bool. | ||
278 | 106 | """ | 190 | """ |
279 | 191 | self.log.debug('Validating config file data ({} in {} on {})' | ||
280 | 192 | '...'.format(section, config_file, | ||
281 | 193 | sentry_unit.info['unit_name'])) | ||
282 | 107 | config = self._get_config(sentry_unit, config_file) | 194 | config = self._get_config(sentry_unit, config_file) |
283 | 108 | 195 | ||
284 | 109 | if section != 'DEFAULT' and not config.has_section(section): | 196 | if section != 'DEFAULT' and not config.has_section(section): |
285 | @@ -112,9 +199,20 @@ | |||
286 | 112 | for k in expected.keys(): | 199 | for k in expected.keys(): |
287 | 113 | if not config.has_option(section, k): | 200 | if not config.has_option(section, k): |
288 | 114 | return "section [{}] is missing option {}".format(section, k) | 201 | return "section [{}] is missing option {}".format(section, k) |
290 | 115 | if config.get(section, k) != expected[k]: | 202 | |
291 | 203 | actual = config.get(section, k) | ||
292 | 204 | v = expected[k] | ||
293 | 205 | if (isinstance(v, six.string_types) or | ||
294 | 206 | isinstance(v, bool) or | ||
295 | 207 | isinstance(v, six.integer_types)): | ||
296 | 208 | # handle explicit values | ||
297 | 209 | if actual != v: | ||
298 | 210 | return "section [{}] {}:{} != expected {}:{}".format( | ||
299 | 211 | section, k, actual, k, expected[k]) | ||
300 | 212 | # handle function pointers, such as not_null or valid_ip | ||
301 | 213 | elif not v(actual): | ||
302 | 116 | return "section [{}] {}:{} != expected {}:{}".format( | 214 | return "section [{}] {}:{} != expected {}:{}".format( |
304 | 117 | section, k, config.get(section, k), k, expected[k]) | 215 | section, k, actual, k, expected[k]) |
305 | 118 | return None | 216 | return None |
306 | 119 | 217 | ||
307 | 120 | def _validate_dict_data(self, expected, actual): | 218 | def _validate_dict_data(self, expected, actual): |
308 | @@ -122,7 +220,7 @@ | |||
309 | 122 | 220 | ||
310 | 123 | Compare expected dictionary data vs actual dictionary data. | 221 | Compare expected dictionary data vs actual dictionary data. |
311 | 124 | The values in the 'expected' dictionary can be strings, bools, ints, | 222 | The values in the 'expected' dictionary can be strings, bools, ints, |
313 | 125 | longs, or can be a function that evaluate a variable and returns a | 223 | longs, or can be a function that evaluates a variable and returns a |
314 | 126 | bool. | 224 | bool. |
315 | 127 | """ | 225 | """ |
316 | 128 | self.log.debug('actual: {}'.format(repr(actual))) | 226 | self.log.debug('actual: {}'.format(repr(actual))) |
317 | @@ -133,8 +231,10 @@ | |||
318 | 133 | if (isinstance(v, six.string_types) or | 231 | if (isinstance(v, six.string_types) or |
319 | 134 | isinstance(v, bool) or | 232 | isinstance(v, bool) or |
320 | 135 | isinstance(v, six.integer_types)): | 233 | isinstance(v, six.integer_types)): |
321 | 234 | # handle explicit values | ||
322 | 136 | if v != actual[k]: | 235 | if v != actual[k]: |
323 | 137 | return "{}:{}".format(k, actual[k]) | 236 | return "{}:{}".format(k, actual[k]) |
324 | 237 | # handle function pointers, such as not_null or valid_ip | ||
325 | 138 | elif not v(actual[k]): | 238 | elif not v(actual[k]): |
326 | 139 | return "{}:{}".format(k, actual[k]) | 239 | return "{}:{}".format(k, actual[k]) |
327 | 140 | else: | 240 | else: |
328 | @@ -321,3 +421,133 @@ | |||
329 | 321 | 421 | ||
330 | 322 | def endpoint_error(self, name, data): | 422 | def endpoint_error(self, name, data): |
331 | 323 | return 'unexpected endpoint data in {} - {}'.format(name, data) | 423 | return 'unexpected endpoint data in {} - {}'.format(name, data) |
332 | 424 | |||
333 | 425 | def get_ubuntu_releases(self): | ||
334 | 426 | """Return a list of all Ubuntu releases in order of release.""" | ||
335 | 427 | _d = distro_info.UbuntuDistroInfo() | ||
336 | 428 | _release_list = _d.all | ||
337 | 429 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
338 | 430 | return _release_list | ||
339 | 431 | |||
340 | 432 | def file_to_url(self, file_rel_path): | ||
341 | 433 | """Convert a relative file path to a file URL.""" | ||
342 | 434 | _abs_path = os.path.abspath(file_rel_path) | ||
343 | 435 | return urlparse.urlparse(_abs_path, scheme='file').geturl() | ||
344 | 436 | |||
345 | 437 | def check_commands_on_units(self, commands, sentry_units): | ||
346 | 438 | """Check that all commands in a list exit zero on all | ||
347 | 439 | sentry units in a list. | ||
348 | 440 | |||
349 | 441 | :param commands: list of bash commands | ||
350 | 442 | :param sentry_units: list of sentry unit pointers | ||
351 | 443 | :returns: None if successful; Failure message otherwise | ||
352 | 444 | """ | ||
353 | 445 | self.log.debug('Checking exit codes for {} commands on {} ' | ||
354 | 446 | 'sentry units...'.format(len(commands), | ||
355 | 447 | len(sentry_units))) | ||
356 | 448 | for sentry_unit in sentry_units: | ||
357 | 449 | for cmd in commands: | ||
358 | 450 | output, code = sentry_unit.run(cmd) | ||
359 | 451 | if code == 0: | ||
360 | 452 | self.log.debug('{} `{}` returned {} ' | ||
361 | 453 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
362 | 454 | cmd, code)) | ||
363 | 455 | else: | ||
364 | 456 | return ('{} `{}` returned {} ' | ||
365 | 457 | '{}'.format(sentry_unit.info['unit_name'], | ||
366 | 458 | cmd, code, output)) | ||
367 | 459 | return None | ||
368 | 460 | |||
369 | 461 | def get_process_id_list(self, sentry_unit, process_name): | ||
370 | 462 | """Get a list of process ID(s) from a single sentry juju unit | ||
371 | 463 | for a single process name. | ||
372 | 464 | |||
373 | 465 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
374 | 466 | :param process_name: Process name | ||
375 | 467 | :returns: List of process IDs | ||
376 | 468 | """ | ||
377 | 469 | cmd = 'pidof {}'.format(process_name) | ||
378 | 470 | output, code = sentry_unit.run(cmd) | ||
379 | 471 | if code != 0: | ||
380 | 472 | msg = ('{} `{}` returned {} ' | ||
381 | 473 | '{}'.format(sentry_unit.info['unit_name'], | ||
382 | 474 | cmd, code, output)) | ||
383 | 475 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
384 | 476 | return str(output).split() | ||
385 | 477 | |||
386 | 478 | def get_unit_process_ids(self, unit_processes): | ||
387 | 479 | """Construct a dict containing unit sentries, process names, and | ||
388 | 480 | process IDs.""" | ||
389 | 481 | pid_dict = {} | ||
390 | 482 | for sentry_unit, process_list in unit_processes.iteritems(): | ||
391 | 483 | pid_dict[sentry_unit] = {} | ||
392 | 484 | for process in process_list: | ||
393 | 485 | pids = self.get_process_id_list(sentry_unit, process) | ||
394 | 486 | pid_dict[sentry_unit].update({process: pids}) | ||
395 | 487 | return pid_dict | ||
396 | 488 | |||
397 | 489 | def validate_unit_process_ids(self, expected, actual): | ||
398 | 490 | """Validate process id quantities for services on units.""" | ||
399 | 491 | self.log.debug('Checking units for running processes...') | ||
400 | 492 | self.log.debug('Expected PIDs: {}'.format(expected)) | ||
401 | 493 | self.log.debug('Actual PIDs: {}'.format(actual)) | ||
402 | 494 | |||
403 | 495 | if len(actual) != len(expected): | ||
404 | 496 | return ('Unit count mismatch. expected, actual: {}, ' | ||
405 | 497 | '{} '.format(len(expected), len(actual))) | ||
406 | 498 | |||
407 | 499 | for (e_sentry, e_proc_names) in expected.iteritems(): | ||
408 | 500 | e_sentry_name = e_sentry.info['unit_name'] | ||
409 | 501 | if e_sentry in actual.keys(): | ||
410 | 502 | a_proc_names = actual[e_sentry] | ||
411 | 503 | else: | ||
412 | 504 | return ('Expected sentry ({}) not found in actual dict data.' | ||
413 | 505 | '{}'.format(e_sentry_name, e_sentry)) | ||
414 | 506 | |||
415 | 507 | if len(e_proc_names.keys()) != len(a_proc_names.keys()): | ||
416 | 508 | return ('Process name count mismatch. expected, actual: {}, ' | ||
417 | 509 | '{}'.format(len(expected), len(actual))) | ||
418 | 510 | |||
419 | 511 | for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ | ||
420 | 512 | zip(e_proc_names.items(), a_proc_names.items()): | ||
421 | 513 | if e_proc_name != a_proc_name: | ||
422 | 514 | return ('Process name mismatch. expected, actual: {}, ' | ||
423 | 515 | '{}'.format(e_proc_name, a_proc_name)) | ||
424 | 516 | |||
425 | 517 | a_pids_length = len(a_pids) | ||
426 | 518 | fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' | ||
427 | 519 | '{}, {} ({})'.format(e_sentry_name, e_proc_name, | ||
428 | 520 | e_pids_length, a_pids_length, | ||
429 | 521 | a_pids)) | ||
430 | 522 | |||
431 | 523 | # If expected is not bool, ensure PID quantities match | ||
432 | 524 | if not isinstance(e_pids_length, bool) and \ | ||
433 | 525 | a_pids_length != e_pids_length: | ||
434 | 526 | return fail_msg | ||
435 | 527 | # If expected is bool True, ensure 1 or more PIDs exist | ||
436 | 528 | elif isinstance(e_pids_length, bool) and \ | ||
437 | 529 | e_pids_length is True and a_pids_length < 1: | ||
438 | 530 | return fail_msg | ||
439 | 531 | # If expected is bool False, ensure 0 PIDs exist | ||
440 | 532 | elif isinstance(e_pids_length, bool) and \ | ||
441 | 533 | e_pids_length is False and a_pids_length != 0: | ||
442 | 534 | return fail_msg | ||
443 | 535 | else: | ||
444 | 536 | self.log.debug('PID check OK: {} {} {}: ' | ||
445 | 537 | '{}'.format(e_sentry_name, e_proc_name, | ||
446 | 538 | e_pids_length, a_pids)) | ||
447 | 539 | return None | ||
448 | 540 | |||
449 | 541 | def validate_list_of_identical_dicts(self, list_of_dicts): | ||
450 | 542 | """Check that all dicts within a list are identical.""" | ||
451 | 543 | hashes = [] | ||
452 | 544 | for _dict in list_of_dicts: | ||
453 | 545 | hashes.append(hash(frozenset(_dict.items()))) | ||
454 | 546 | |||
455 | 547 | self.log.debug('Hashes: {}'.format(hashes)) | ||
456 | 548 | if len(set(hashes)) == 1: | ||
457 | 549 | self.log.debug('Dicts within list are identical') | ||
458 | 550 | else: | ||
459 | 551 | return 'Dicts within list are not identical' | ||
460 | 552 | |||
461 | 553 | return None | ||
462 | 324 | 554 | ||
463 | === modified file 'charmhelpers/contrib/benchmark/__init__.py' | |||
464 | --- charmhelpers/contrib/benchmark/__init__.py 2015-04-24 16:18:42 +0000 | |||
465 | +++ charmhelpers/contrib/benchmark/__init__.py 2015-08-13 08:33:21 +0000 | |||
466 | @@ -63,6 +63,8 @@ | |||
467 | 63 | 63 | ||
468 | 64 | """ | 64 | """ |
469 | 65 | 65 | ||
470 | 66 | BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing | ||
471 | 67 | |||
472 | 66 | required_keys = [ | 68 | required_keys = [ |
473 | 67 | 'hostname', | 69 | 'hostname', |
474 | 68 | 'port', | 70 | 'port', |
475 | @@ -91,7 +93,7 @@ | |||
476 | 91 | break | 93 | break |
477 | 92 | 94 | ||
478 | 93 | if len(config): | 95 | if len(config): |
480 | 94 | with open('/etc/benchmark.conf', 'w') as f: | 96 | with open(self.BENCHMARK_CONF, 'w') as f: |
481 | 95 | for key, val in iter(config.items()): | 97 | for key, val in iter(config.items()): |
482 | 96 | f.write("%s=%s\n" % (key, val)) | 98 | f.write("%s=%s\n" % (key, val)) |
483 | 97 | 99 | ||
484 | 98 | 100 | ||
485 | === modified file 'charmhelpers/contrib/database/mysql.py' | |||
486 | --- charmhelpers/contrib/database/mysql.py 2015-06-03 20:31:29 +0000 | |||
487 | +++ charmhelpers/contrib/database/mysql.py 2015-08-13 08:33:21 +0000 | |||
488 | @@ -381,6 +381,9 @@ | |||
489 | 381 | if 'wait-timeout' in config: | 381 | if 'wait-timeout' in config: |
490 | 382 | mysql_config['wait_timeout'] = config['wait-timeout'] | 382 | mysql_config['wait_timeout'] = config['wait-timeout'] |
491 | 383 | 383 | ||
492 | 384 | if 'innodb-flush-log-at-trx-commit' in config: | ||
493 | 385 | mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit'] | ||
494 | 386 | |||
495 | 384 | # Set a sane default key_buffer size | 387 | # Set a sane default key_buffer size |
496 | 385 | mysql_config['key_buffer'] = self.human_to_bytes('32M') | 388 | mysql_config['key_buffer'] = self.human_to_bytes('32M') |
497 | 386 | total_memory = self.human_to_bytes(self.get_mem_total()) | 389 | total_memory = self.human_to_bytes(self.get_mem_total()) |
498 | 387 | 390 | ||
499 | === modified file 'charmhelpers/contrib/network/ufw.py' | |||
500 | --- charmhelpers/contrib/network/ufw.py 2015-02-12 20:08:28 +0000 | |||
501 | +++ charmhelpers/contrib/network/ufw.py 2015-08-13 08:33:21 +0000 | |||
502 | @@ -180,7 +180,43 @@ | |||
503 | 180 | return True | 180 | return True |
504 | 181 | 181 | ||
505 | 182 | 182 | ||
507 | 183 | def modify_access(src, dst='any', port=None, proto=None, action='allow'): | 183 | def default_policy(policy='deny', direction='incoming'): |
508 | 184 | """ | ||
509 | 185 | Changes the default policy for traffic `direction` | ||
510 | 186 | |||
511 | 187 | :param policy: allow, deny or reject | ||
512 | 188 | :param direction: traffic direction, possible values: incoming, outgoing, | ||
513 | 189 | routed | ||
514 | 190 | """ | ||
515 | 191 | if policy not in ['allow', 'deny', 'reject']: | ||
516 | 192 | raise UFWError(('Unknown policy %s, valid values: ' | ||
517 | 193 | 'allow, deny, reject') % policy) | ||
518 | 194 | |||
519 | 195 | if direction not in ['incoming', 'outgoing', 'routed']: | ||
520 | 196 | raise UFWError(('Unknown direction %s, valid values: ' | ||
521 | 197 | 'incoming, outgoing, routed') % direction) | ||
522 | 198 | |||
523 | 199 | output = subprocess.check_output(['ufw', 'default', policy, direction], | ||
524 | 200 | universal_newlines=True, | ||
525 | 201 | env={'LANG': 'en_US', | ||
526 | 202 | 'PATH': os.environ['PATH']}) | ||
527 | 203 | hookenv.log(output, level='DEBUG') | ||
528 | 204 | |||
529 | 205 | m = re.findall("^Default %s policy changed to '%s'\n" % (direction, | ||
530 | 206 | policy), | ||
531 | 207 | output, re.M) | ||
532 | 208 | if len(m) == 0: | ||
533 | 209 | hookenv.log("ufw couldn't change the default policy to %s for %s" | ||
534 | 210 | % (policy, direction), level='WARN') | ||
535 | 211 | return False | ||
536 | 212 | else: | ||
537 | 213 | hookenv.log("ufw default policy for %s changed to %s" | ||
538 | 214 | % (direction, policy), level='INFO') | ||
539 | 215 | return True | ||
540 | 216 | |||
541 | 217 | |||
542 | 218 | def modify_access(src, dst='any', port=None, proto=None, action='allow', | ||
543 | 219 | index=None): | ||
544 | 184 | """ | 220 | """ |
545 | 185 | Grant access to an address or subnet | 221 | Grant access to an address or subnet |
546 | 186 | 222 | ||
547 | @@ -192,6 +228,8 @@ | |||
548 | 192 | :param port: destiny port | 228 | :param port: destiny port |
549 | 193 | :param proto: protocol (tcp or udp) | 229 | :param proto: protocol (tcp or udp) |
550 | 194 | :param action: `allow` or `delete` | 230 | :param action: `allow` or `delete` |
551 | 231 | :param index: if different from None the rule is inserted at the given | ||
552 | 232 | `index`. | ||
553 | 195 | """ | 233 | """ |
554 | 196 | if not is_enabled(): | 234 | if not is_enabled(): |
555 | 197 | hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') | 235 | hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') |
556 | @@ -199,6 +237,8 @@ | |||
557 | 199 | 237 | ||
558 | 200 | if action == 'delete': | 238 | if action == 'delete': |
559 | 201 | cmd = ['ufw', 'delete', 'allow'] | 239 | cmd = ['ufw', 'delete', 'allow'] |
560 | 240 | elif index is not None: | ||
561 | 241 | cmd = ['ufw', 'insert', str(index), action] | ||
562 | 202 | else: | 242 | else: |
563 | 203 | cmd = ['ufw', action] | 243 | cmd = ['ufw', action] |
564 | 204 | 244 | ||
565 | @@ -227,7 +267,7 @@ | |||
566 | 227 | level='ERROR') | 267 | level='ERROR') |
567 | 228 | 268 | ||
568 | 229 | 269 | ||
570 | 230 | def grant_access(src, dst='any', port=None, proto=None): | 270 | def grant_access(src, dst='any', port=None, proto=None, index=None): |
571 | 231 | """ | 271 | """ |
572 | 232 | Grant access to an address or subnet | 272 | Grant access to an address or subnet |
573 | 233 | 273 | ||
574 | @@ -238,8 +278,11 @@ | |||
575 | 238 | field has to be set. | 278 | field has to be set. |
576 | 239 | :param port: destiny port | 279 | :param port: destiny port |
577 | 240 | :param proto: protocol (tcp or udp) | 280 | :param proto: protocol (tcp or udp) |
578 | 281 | :param index: if different from None the rule is inserted at the given | ||
579 | 282 | `index`. | ||
580 | 241 | """ | 283 | """ |
582 | 242 | return modify_access(src, dst=dst, port=port, proto=proto, action='allow') | 284 | return modify_access(src, dst=dst, port=port, proto=proto, action='allow', |
583 | 285 | index=index) | ||
584 | 243 | 286 | ||
585 | 244 | 287 | ||
586 | 245 | def revoke_access(src, dst='any', port=None, proto=None): | 288 | def revoke_access(src, dst='any', port=None, proto=None): |
587 | 246 | 289 | ||
588 | === modified file 'charmhelpers/contrib/openstack/amulet/deployment.py' | |||
589 | --- charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-12 07:50:34 +0000 | |||
590 | +++ charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-13 08:33:21 +0000 | |||
591 | @@ -44,7 +44,7 @@ | |||
592 | 44 | Determine if the local branch being tested is derived from its | 44 | Determine if the local branch being tested is derived from its |
593 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 45 | stable or next (dev) branch, and based on this, use the corresonding |
594 | 46 | stable or next branches for the other_services.""" | 46 | stable or next branches for the other_services.""" |
596 | 47 | base_charms = ['mysql', 'mongodb'] | 47 | base_charms = ['mysql', 'mongodb', 'nrpe'] |
597 | 48 | 48 | ||
598 | 49 | if self.series in ['precise', 'trusty']: | 49 | if self.series in ['precise', 'trusty']: |
599 | 50 | base_series = self.series | 50 | base_series = self.series |
600 | @@ -83,9 +83,10 @@ | |||
601 | 83 | services.append(this_service) | 83 | services.append(this_service) |
602 | 84 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 84 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
603 | 85 | 'ceph-osd', 'ceph-radosgw'] | 85 | 'ceph-osd', 'ceph-radosgw'] |
607 | 86 | # Openstack subordinate charms do not expose an origin option as that | 86 | # Most OpenStack subordinate charms do not expose an origin option |
608 | 87 | # is controlled by the principle | 87 | # as that is controlled by the principle. |
609 | 88 | ignore = ['neutron-openvswitch', 'cisco-vpp', 'odl-controller'] | 88 | ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
610 | 89 | 'cisco-vpp', 'odl-controller'] | ||
611 | 89 | 90 | ||
612 | 90 | if self.openstack: | 91 | if self.openstack: |
613 | 91 | for svc in services: | 92 | for svc in services: |
614 | @@ -152,3 +153,36 @@ | |||
615 | 152 | return os_origin.split('%s-' % self.series)[1].split('/')[0] | 153 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
616 | 153 | else: | 154 | else: |
617 | 154 | return releases[self.series] | 155 | return releases[self.series] |
618 | 156 | |||
619 | 157 | def get_ceph_expected_pools(self, radosgw=False): | ||
620 | 158 | """Return a list of expected ceph pools in a ceph + cinder + glance | ||
621 | 159 | test scenario, based on OpenStack release and whether ceph radosgw | ||
622 | 160 | is flagged as present or not.""" | ||
623 | 161 | |||
624 | 162 | if self._get_openstack_release() >= self.trusty_kilo: | ||
625 | 163 | # Kilo or later | ||
626 | 164 | pools = [ | ||
627 | 165 | 'rbd', | ||
628 | 166 | 'cinder', | ||
629 | 167 | 'glance' | ||
630 | 168 | ] | ||
631 | 169 | else: | ||
632 | 170 | # Juno or earlier | ||
633 | 171 | pools = [ | ||
634 | 172 | 'data', | ||
635 | 173 | 'metadata', | ||
636 | 174 | 'rbd', | ||
637 | 175 | 'cinder', | ||
638 | 176 | 'glance' | ||
639 | 177 | ] | ||
640 | 178 | |||
641 | 179 | if radosgw: | ||
642 | 180 | pools.extend([ | ||
643 | 181 | '.rgw.root', | ||
644 | 182 | '.rgw.control', | ||
645 | 183 | '.rgw', | ||
646 | 184 | '.rgw.gc', | ||
647 | 185 | '.users.uid' | ||
648 | 186 | ]) | ||
649 | 187 | |||
650 | 188 | return pools | ||
651 | 155 | 189 | ||
652 | === modified file 'charmhelpers/contrib/openstack/amulet/utils.py' | |||
653 | --- charmhelpers/contrib/openstack/amulet/utils.py 2015-01-22 06:06:03 +0000 | |||
654 | +++ charmhelpers/contrib/openstack/amulet/utils.py 2015-08-13 08:33:21 +0000 | |||
655 | @@ -14,16 +14,20 @@ | |||
656 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
657 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
658 | 16 | 16 | ||
659 | 17 | import amulet | ||
660 | 18 | import json | ||
661 | 17 | import logging | 19 | import logging |
662 | 18 | import os | 20 | import os |
663 | 21 | import six | ||
664 | 19 | import time | 22 | import time |
665 | 20 | import urllib | 23 | import urllib |
666 | 21 | 24 | ||
667 | 25 | import cinderclient.v1.client as cinder_client | ||
668 | 22 | import glanceclient.v1.client as glance_client | 26 | import glanceclient.v1.client as glance_client |
669 | 27 | import heatclient.v1.client as heat_client | ||
670 | 23 | import keystoneclient.v2_0 as keystone_client | 28 | import keystoneclient.v2_0 as keystone_client |
671 | 24 | import novaclient.v1_1.client as nova_client | 29 | import novaclient.v1_1.client as nova_client |
674 | 25 | 30 | import swiftclient | |
673 | 26 | import six | ||
675 | 27 | 31 | ||
676 | 28 | from charmhelpers.contrib.amulet.utils import ( | 32 | from charmhelpers.contrib.amulet.utils import ( |
677 | 29 | AmuletUtils | 33 | AmuletUtils |
678 | @@ -37,7 +41,7 @@ | |||
679 | 37 | """OpenStack amulet utilities. | 41 | """OpenStack amulet utilities. |
680 | 38 | 42 | ||
681 | 39 | This class inherits from AmuletUtils and has additional support | 43 | This class inherits from AmuletUtils and has additional support |
683 | 40 | that is specifically for use by OpenStack charms. | 44 | that is specifically for use by OpenStack charm tests. |
684 | 41 | """ | 45 | """ |
685 | 42 | 46 | ||
686 | 43 | def __init__(self, log_level=ERROR): | 47 | def __init__(self, log_level=ERROR): |
687 | @@ -51,6 +55,8 @@ | |||
688 | 51 | Validate actual endpoint data vs expected endpoint data. The ports | 55 | Validate actual endpoint data vs expected endpoint data. The ports |
689 | 52 | are used to find the matching endpoint. | 56 | are used to find the matching endpoint. |
690 | 53 | """ | 57 | """ |
691 | 58 | self.log.debug('Validating endpoint data...') | ||
692 | 59 | self.log.debug('actual: {}'.format(repr(endpoints))) | ||
693 | 54 | found = False | 60 | found = False |
694 | 55 | for ep in endpoints: | 61 | for ep in endpoints: |
695 | 56 | self.log.debug('endpoint: {}'.format(repr(ep))) | 62 | self.log.debug('endpoint: {}'.format(repr(ep))) |
696 | @@ -77,6 +83,7 @@ | |||
697 | 77 | Validate a list of actual service catalog endpoints vs a list of | 83 | Validate a list of actual service catalog endpoints vs a list of |
698 | 78 | expected service catalog endpoints. | 84 | expected service catalog endpoints. |
699 | 79 | """ | 85 | """ |
700 | 86 | self.log.debug('Validating service catalog endpoint data...') | ||
701 | 80 | self.log.debug('actual: {}'.format(repr(actual))) | 87 | self.log.debug('actual: {}'.format(repr(actual))) |
702 | 81 | for k, v in six.iteritems(expected): | 88 | for k, v in six.iteritems(expected): |
703 | 82 | if k in actual: | 89 | if k in actual: |
704 | @@ -93,6 +100,7 @@ | |||
705 | 93 | Validate a list of actual tenant data vs list of expected tenant | 100 | Validate a list of actual tenant data vs list of expected tenant |
706 | 94 | data. | 101 | data. |
707 | 95 | """ | 102 | """ |
708 | 103 | self.log.debug('Validating tenant data...') | ||
709 | 96 | self.log.debug('actual: {}'.format(repr(actual))) | 104 | self.log.debug('actual: {}'.format(repr(actual))) |
710 | 97 | for e in expected: | 105 | for e in expected: |
711 | 98 | found = False | 106 | found = False |
712 | @@ -114,6 +122,7 @@ | |||
713 | 114 | Validate a list of actual role data vs a list of expected role | 122 | Validate a list of actual role data vs a list of expected role |
714 | 115 | data. | 123 | data. |
715 | 116 | """ | 124 | """ |
716 | 125 | self.log.debug('Validating role data...') | ||
717 | 117 | self.log.debug('actual: {}'.format(repr(actual))) | 126 | self.log.debug('actual: {}'.format(repr(actual))) |
718 | 118 | for e in expected: | 127 | for e in expected: |
719 | 119 | found = False | 128 | found = False |
720 | @@ -134,6 +143,7 @@ | |||
721 | 134 | Validate a list of actual user data vs a list of expected user | 143 | Validate a list of actual user data vs a list of expected user |
722 | 135 | data. | 144 | data. |
723 | 136 | """ | 145 | """ |
724 | 146 | self.log.debug('Validating user data...') | ||
725 | 137 | self.log.debug('actual: {}'.format(repr(actual))) | 147 | self.log.debug('actual: {}'.format(repr(actual))) |
726 | 138 | for e in expected: | 148 | for e in expected: |
727 | 139 | found = False | 149 | found = False |
728 | @@ -155,17 +165,30 @@ | |||
729 | 155 | 165 | ||
730 | 156 | Validate a list of actual flavors vs a list of expected flavors. | 166 | Validate a list of actual flavors vs a list of expected flavors. |
731 | 157 | """ | 167 | """ |
732 | 168 | self.log.debug('Validating flavor data...') | ||
733 | 158 | self.log.debug('actual: {}'.format(repr(actual))) | 169 | self.log.debug('actual: {}'.format(repr(actual))) |
734 | 159 | act = [a.name for a in actual] | 170 | act = [a.name for a in actual] |
735 | 160 | return self._validate_list_data(expected, act) | 171 | return self._validate_list_data(expected, act) |
736 | 161 | 172 | ||
737 | 162 | def tenant_exists(self, keystone, tenant): | 173 | def tenant_exists(self, keystone, tenant): |
738 | 163 | """Return True if tenant exists.""" | 174 | """Return True if tenant exists.""" |
739 | 175 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) | ||
740 | 164 | return tenant in [t.name for t in keystone.tenants.list()] | 176 | return tenant in [t.name for t in keystone.tenants.list()] |
741 | 165 | 177 | ||
742 | 178 | def authenticate_cinder_admin(self, keystone_sentry, username, | ||
743 | 179 | password, tenant): | ||
744 | 180 | """Authenticates admin user with cinder.""" | ||
745 | 181 | # NOTE(beisner): cinder python client doesn't accept tokens. | ||
746 | 182 | service_ip = \ | ||
747 | 183 | keystone_sentry.relation('shared-db', | ||
748 | 184 | 'mysql:shared-db')['private-address'] | ||
749 | 185 | ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) | ||
750 | 186 | return cinder_client.Client(username, password, tenant, ept) | ||
751 | 187 | |||
752 | 166 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 188 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
753 | 167 | tenant): | 189 | tenant): |
754 | 168 | """Authenticates admin user with the keystone admin endpoint.""" | 190 | """Authenticates admin user with the keystone admin endpoint.""" |
755 | 191 | self.log.debug('Authenticating keystone admin...') | ||
756 | 169 | unit = keystone_sentry | 192 | unit = keystone_sentry |
757 | 170 | service_ip = unit.relation('shared-db', | 193 | service_ip = unit.relation('shared-db', |
758 | 171 | 'mysql:shared-db')['private-address'] | 194 | 'mysql:shared-db')['private-address'] |
759 | @@ -175,6 +198,7 @@ | |||
760 | 175 | 198 | ||
761 | 176 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 199 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
762 | 177 | """Authenticates a regular user with the keystone public endpoint.""" | 200 | """Authenticates a regular user with the keystone public endpoint.""" |
763 | 201 | self.log.debug('Authenticating keystone user ({})...'.format(user)) | ||
764 | 178 | ep = keystone.service_catalog.url_for(service_type='identity', | 202 | ep = keystone.service_catalog.url_for(service_type='identity', |
765 | 179 | endpoint_type='publicURL') | 203 | endpoint_type='publicURL') |
766 | 180 | return keystone_client.Client(username=user, password=password, | 204 | return keystone_client.Client(username=user, password=password, |
767 | @@ -182,19 +206,49 @@ | |||
768 | 182 | 206 | ||
769 | 183 | def authenticate_glance_admin(self, keystone): | 207 | def authenticate_glance_admin(self, keystone): |
770 | 184 | """Authenticates admin user with glance.""" | 208 | """Authenticates admin user with glance.""" |
771 | 209 | self.log.debug('Authenticating glance admin...') | ||
772 | 185 | ep = keystone.service_catalog.url_for(service_type='image', | 210 | ep = keystone.service_catalog.url_for(service_type='image', |
773 | 186 | endpoint_type='adminURL') | 211 | endpoint_type='adminURL') |
774 | 187 | return glance_client.Client(ep, token=keystone.auth_token) | 212 | return glance_client.Client(ep, token=keystone.auth_token) |
775 | 188 | 213 | ||
776 | 214 | def authenticate_heat_admin(self, keystone): | ||
777 | 215 | """Authenticates the admin user with heat.""" | ||
778 | 216 | self.log.debug('Authenticating heat admin...') | ||
779 | 217 | ep = keystone.service_catalog.url_for(service_type='orchestration', | ||
780 | 218 | endpoint_type='publicURL') | ||
781 | 219 | return heat_client.Client(endpoint=ep, token=keystone.auth_token) | ||
782 | 220 | |||
783 | 189 | def authenticate_nova_user(self, keystone, user, password, tenant): | 221 | def authenticate_nova_user(self, keystone, user, password, tenant): |
784 | 190 | """Authenticates a regular user with nova-api.""" | 222 | """Authenticates a regular user with nova-api.""" |
785 | 223 | self.log.debug('Authenticating nova user ({})...'.format(user)) | ||
786 | 191 | ep = keystone.service_catalog.url_for(service_type='identity', | 224 | ep = keystone.service_catalog.url_for(service_type='identity', |
787 | 192 | endpoint_type='publicURL') | 225 | endpoint_type='publicURL') |
788 | 193 | return nova_client.Client(username=user, api_key=password, | 226 | return nova_client.Client(username=user, api_key=password, |
789 | 194 | project_id=tenant, auth_url=ep) | 227 | project_id=tenant, auth_url=ep) |
790 | 195 | 228 | ||
791 | 229 | def authenticate_swift_user(self, keystone, user, password, tenant): | ||
792 | 230 | """Authenticates a regular user with swift api.""" | ||
793 | 231 | self.log.debug('Authenticating swift user ({})...'.format(user)) | ||
794 | 232 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
795 | 233 | endpoint_type='publicURL') | ||
796 | 234 | return swiftclient.Connection(authurl=ep, | ||
797 | 235 | user=user, | ||
798 | 236 | key=password, | ||
799 | 237 | tenant_name=tenant, | ||
800 | 238 | auth_version='2.0') | ||
801 | 239 | |||
802 | 196 | def create_cirros_image(self, glance, image_name): | 240 | def create_cirros_image(self, glance, image_name): |
804 | 197 | """Download the latest cirros image and upload it to glance.""" | 241 | """Download the latest cirros image and upload it to glance, |
805 | 242 | validate and return a resource pointer. | ||
806 | 243 | |||
807 | 244 | :param glance: pointer to authenticated glance connection | ||
808 | 245 | :param image_name: display name for new image | ||
809 | 246 | :returns: glance image pointer | ||
810 | 247 | """ | ||
811 | 248 | self.log.debug('Creating glance cirros image ' | ||
812 | 249 | '({})...'.format(image_name)) | ||
813 | 250 | |||
814 | 251 | # Download cirros image | ||
815 | 198 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | 252 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
816 | 199 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | 253 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
817 | 200 | if http_proxy: | 254 | if http_proxy: |
818 | @@ -203,57 +257,67 @@ | |||
819 | 203 | else: | 257 | else: |
820 | 204 | opener = urllib.FancyURLopener() | 258 | opener = urllib.FancyURLopener() |
821 | 205 | 259 | ||
823 | 206 | f = opener.open("http://download.cirros-cloud.net/version/released") | 260 | f = opener.open('http://download.cirros-cloud.net/version/released') |
824 | 207 | version = f.read().strip() | 261 | version = f.read().strip() |
826 | 208 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) | 262 | cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
827 | 209 | local_path = os.path.join('tests', cirros_img) | 263 | local_path = os.path.join('tests', cirros_img) |
828 | 210 | 264 | ||
829 | 211 | if not os.path.exists(local_path): | 265 | if not os.path.exists(local_path): |
831 | 212 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | 266 | cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
832 | 213 | version, cirros_img) | 267 | version, cirros_img) |
833 | 214 | opener.retrieve(cirros_url, local_path) | 268 | opener.retrieve(cirros_url, local_path) |
834 | 215 | f.close() | 269 | f.close() |
835 | 216 | 270 | ||
836 | 271 | # Create glance image | ||
837 | 217 | with open(local_path) as f: | 272 | with open(local_path) as f: |
838 | 218 | image = glance.images.create(name=image_name, is_public=True, | 273 | image = glance.images.create(name=image_name, is_public=True, |
839 | 219 | disk_format='qcow2', | 274 | disk_format='qcow2', |
840 | 220 | container_format='bare', data=f) | 275 | container_format='bare', data=f) |
853 | 221 | count = 1 | 276 | |
854 | 222 | status = image.status | 277 | # Wait for image to reach active status |
855 | 223 | while status != 'active' and count < 10: | 278 | img_id = image.id |
856 | 224 | time.sleep(3) | 279 | ret = self.resource_reaches_status(glance.images, img_id, |
857 | 225 | image = glance.images.get(image.id) | 280 | expected_stat='active', |
858 | 226 | status = image.status | 281 | msg='Image status wait') |
859 | 227 | self.log.debug('image status: {}'.format(status)) | 282 | if not ret: |
860 | 228 | count += 1 | 283 | msg = 'Glance image failed to reach expected state.' |
861 | 229 | 284 | amulet.raise_status(amulet.FAIL, msg=msg) | |
862 | 230 | if status != 'active': | 285 | |
863 | 231 | self.log.error('image creation timed out') | 286 | # Re-validate new image |
864 | 232 | return None | 287 | self.log.debug('Validating image attributes...') |
865 | 288 | val_img_name = glance.images.get(img_id).name | ||
866 | 289 | val_img_stat = glance.images.get(img_id).status | ||
867 | 290 | val_img_pub = glance.images.get(img_id).is_public | ||
868 | 291 | val_img_cfmt = glance.images.get(img_id).container_format | ||
869 | 292 | val_img_dfmt = glance.images.get(img_id).disk_format | ||
870 | 293 | msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' | ||
871 | 294 | 'container fmt:{} disk fmt:{}'.format( | ||
872 | 295 | val_img_name, val_img_pub, img_id, | ||
873 | 296 | val_img_stat, val_img_cfmt, val_img_dfmt)) | ||
874 | 297 | |||
875 | 298 | if val_img_name == image_name and val_img_stat == 'active' \ | ||
876 | 299 | and val_img_pub is True and val_img_cfmt == 'bare' \ | ||
877 | 300 | and val_img_dfmt == 'qcow2': | ||
878 | 301 | self.log.debug(msg_attr) | ||
879 | 302 | else: | ||
880 | 303 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
881 | 304 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
882 | 233 | 305 | ||
883 | 234 | return image | 306 | return image |
884 | 235 | 307 | ||
885 | 236 | def delete_image(self, glance, image): | 308 | def delete_image(self, glance, image): |
886 | 237 | """Delete the specified image.""" | 309 | """Delete the specified image.""" |
903 | 238 | num_before = len(list(glance.images.list())) | 310 | |
904 | 239 | glance.images.delete(image) | 311 | # /!\ DEPRECATION WARNING |
905 | 240 | 312 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
906 | 241 | count = 1 | 313 | 'delete_resource instead of delete_image.') |
907 | 242 | num_after = len(list(glance.images.list())) | 314 | self.log.debug('Deleting glance image ({})...'.format(image)) |
908 | 243 | while num_after != (num_before - 1) and count < 10: | 315 | return self.delete_resource(glance.images, image, msg='glance image') |
893 | 244 | time.sleep(3) | ||
894 | 245 | num_after = len(list(glance.images.list())) | ||
895 | 246 | self.log.debug('number of images: {}'.format(num_after)) | ||
896 | 247 | count += 1 | ||
897 | 248 | |||
898 | 249 | if num_after != (num_before - 1): | ||
899 | 250 | self.log.error('image deletion timed out') | ||
900 | 251 | return False | ||
901 | 252 | |||
902 | 253 | return True | ||
909 | 254 | 316 | ||
910 | 255 | def create_instance(self, nova, image_name, instance_name, flavor): | 317 | def create_instance(self, nova, image_name, instance_name, flavor): |
911 | 256 | """Create the specified instance.""" | 318 | """Create the specified instance.""" |
912 | 319 | self.log.debug('Creating instance ' | ||
913 | 320 | '({}|{}|{})'.format(instance_name, image_name, flavor)) | ||
914 | 257 | image = nova.images.find(name=image_name) | 321 | image = nova.images.find(name=image_name) |
915 | 258 | flavor = nova.flavors.find(name=flavor) | 322 | flavor = nova.flavors.find(name=flavor) |
916 | 259 | instance = nova.servers.create(name=instance_name, image=image, | 323 | instance = nova.servers.create(name=instance_name, image=image, |
917 | @@ -276,19 +340,265 @@ | |||
918 | 276 | 340 | ||
919 | 277 | def delete_instance(self, nova, instance): | 341 | def delete_instance(self, nova, instance): |
920 | 278 | """Delete the specified instance.""" | 342 | """Delete the specified instance.""" |
937 | 279 | num_before = len(list(nova.servers.list())) | 343 | |
938 | 280 | nova.servers.delete(instance) | 344 | # /!\ DEPRECATION WARNING |
939 | 281 | 345 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
940 | 282 | count = 1 | 346 | 'delete_resource instead of delete_instance.') |
941 | 283 | num_after = len(list(nova.servers.list())) | 347 | self.log.debug('Deleting instance ({})...'.format(instance)) |
942 | 284 | while num_after != (num_before - 1) and count < 10: | 348 | return self.delete_resource(nova.servers, instance, |
943 | 285 | time.sleep(3) | 349 | msg='nova instance') |
944 | 286 | num_after = len(list(nova.servers.list())) | 350 | |
945 | 287 | self.log.debug('number of instances: {}'.format(num_after)) | 351 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
946 | 288 | count += 1 | 352 | """Create a new keypair, or return pointer if it already exists.""" |
947 | 289 | 353 | try: | |
948 | 290 | if num_after != (num_before - 1): | 354 | _keypair = nova.keypairs.get(keypair_name) |
949 | 291 | self.log.error('instance deletion timed out') | 355 | self.log.debug('Keypair ({}) already exists, ' |
950 | 292 | return False | 356 | 'using it.'.format(keypair_name)) |
951 | 293 | 357 | return _keypair | |
952 | 294 | return True | 358 | except: |
953 | 359 | self.log.debug('Keypair ({}) does not exist, ' | ||
954 | 360 | 'creating it.'.format(keypair_name)) | ||
955 | 361 | |||
956 | 362 | _keypair = nova.keypairs.create(name=keypair_name) | ||
957 | 363 | return _keypair | ||
958 | 364 | |||
959 | 365 | def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, | ||
960 | 366 | img_id=None, src_vol_id=None, snap_id=None): | ||
961 | 367 | """Create cinder volume, optionally from a glance image, OR | ||
962 | 368 | optionally as a clone of an existing volume, OR optionally | ||
963 | 369 | from a snapshot. Wait for the new volume status to reach | ||
964 | 370 | the expected status, validate and return a resource pointer. | ||
965 | 371 | |||
966 | 372 | :param vol_name: cinder volume display name | ||
967 | 373 | :param vol_size: size in gigabytes | ||
968 | 374 | :param img_id: optional glance image id | ||
969 | 375 | :param src_vol_id: optional source volume id to clone | ||
970 | 376 | :param snap_id: optional snapshot id to use | ||
971 | 377 | :returns: cinder volume pointer | ||
972 | 378 | """ | ||
973 | 379 | # Handle parameter input and avoid impossible combinations | ||
974 | 380 | if img_id and not src_vol_id and not snap_id: | ||
975 | 381 | # Create volume from image | ||
976 | 382 | self.log.debug('Creating cinder volume from glance image...') | ||
977 | 383 | bootable = 'true' | ||
978 | 384 | elif src_vol_id and not img_id and not snap_id: | ||
979 | 385 | # Clone an existing volume | ||
980 | 386 | self.log.debug('Cloning cinder volume...') | ||
981 | 387 | bootable = cinder.volumes.get(src_vol_id).bootable | ||
982 | 388 | elif snap_id and not src_vol_id and not img_id: | ||
983 | 389 | # Create volume from snapshot | ||
984 | 390 | self.log.debug('Creating cinder volume from snapshot...') | ||
985 | 391 | snap = cinder.volume_snapshots.find(id=snap_id) | ||
986 | 392 | vol_size = snap.size | ||
987 | 393 | snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id | ||
988 | 394 | bootable = cinder.volumes.get(snap_vol_id).bootable | ||
989 | 395 | elif not img_id and not src_vol_id and not snap_id: | ||
990 | 396 | # Create volume | ||
991 | 397 | self.log.debug('Creating cinder volume...') | ||
992 | 398 | bootable = 'false' | ||
993 | 399 | else: | ||
994 | 400 | # Impossible combination of parameters | ||
995 | 401 | msg = ('Invalid method use - name:{} size:{} img_id:{} ' | ||
996 | 402 | 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, | ||
997 | 403 | img_id, src_vol_id, | ||
998 | 404 | snap_id)) | ||
999 | 405 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1000 | 406 | |||
1001 | 407 | # Create new volume | ||
1002 | 408 | try: | ||
1003 | 409 | vol_new = cinder.volumes.create(display_name=vol_name, | ||
1004 | 410 | imageRef=img_id, | ||
1005 | 411 | size=vol_size, | ||
1006 | 412 | source_volid=src_vol_id, | ||
1007 | 413 | snapshot_id=snap_id) | ||
1008 | 414 | vol_id = vol_new.id | ||
1009 | 415 | except Exception as e: | ||
1010 | 416 | msg = 'Failed to create volume: {}'.format(e) | ||
1011 | 417 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1012 | 418 | |||
1013 | 419 | # Wait for volume to reach available status | ||
1014 | 420 | ret = self.resource_reaches_status(cinder.volumes, vol_id, | ||
1015 | 421 | expected_stat="available", | ||
1016 | 422 | msg="Volume status wait") | ||
1017 | 423 | if not ret: | ||
1018 | 424 | msg = 'Cinder volume failed to reach expected state.' | ||
1019 | 425 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1020 | 426 | |||
1021 | 427 | # Re-validate new volume | ||
1022 | 428 | self.log.debug('Validating volume attributes...') | ||
1023 | 429 | val_vol_name = cinder.volumes.get(vol_id).display_name | ||
1024 | 430 | val_vol_boot = cinder.volumes.get(vol_id).bootable | ||
1025 | 431 | val_vol_stat = cinder.volumes.get(vol_id).status | ||
1026 | 432 | val_vol_size = cinder.volumes.get(vol_id).size | ||
1027 | 433 | msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' | ||
1028 | 434 | '{} size:{}'.format(val_vol_name, vol_id, | ||
1029 | 435 | val_vol_stat, val_vol_boot, | ||
1030 | 436 | val_vol_size)) | ||
1031 | 437 | |||
1032 | 438 | if val_vol_boot == bootable and val_vol_stat == 'available' \ | ||
1033 | 439 | and val_vol_name == vol_name and val_vol_size == vol_size: | ||
1034 | 440 | self.log.debug(msg_attr) | ||
1035 | 441 | else: | ||
1036 | 442 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
1037 | 443 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1038 | 444 | |||
1039 | 445 | return vol_new | ||
1040 | 446 | |||
1041 | 447 | def delete_resource(self, resource, resource_id, | ||
1042 | 448 | msg="resource", max_wait=120): | ||
1043 | 449 | """Delete one openstack resource, such as one instance, keypair, | ||
1044 | 450 | image, volume, stack, etc., and confirm deletion within max wait time. | ||
1045 | 451 | |||
1046 | 452 | :param resource: pointer to os resource type, ex:glance_client.images | ||
1047 | 453 | :param resource_id: unique name or id for the openstack resource | ||
1048 | 454 | :param msg: text to identify purpose in logging | ||
1049 | 455 | :param max_wait: maximum wait time in seconds | ||
1050 | 456 | :returns: True if successful, otherwise False | ||
1051 | 457 | """ | ||
1052 | 458 | self.log.debug('Deleting OpenStack resource ' | ||
1053 | 459 | '{} ({})'.format(resource_id, msg)) | ||
1054 | 460 | num_before = len(list(resource.list())) | ||
1055 | 461 | resource.delete(resource_id) | ||
1056 | 462 | |||
1057 | 463 | tries = 0 | ||
1058 | 464 | num_after = len(list(resource.list())) | ||
1059 | 465 | while num_after != (num_before - 1) and tries < (max_wait / 4): | ||
1060 | 466 | self.log.debug('{} delete check: ' | ||
1061 | 467 | '{} [{}:{}] {}'.format(msg, tries, | ||
1062 | 468 | num_before, | ||
1063 | 469 | num_after, | ||
1064 | 470 | resource_id)) | ||
1065 | 471 | time.sleep(4) | ||
1066 | 472 | num_after = len(list(resource.list())) | ||
1067 | 473 | tries += 1 | ||
1068 | 474 | |||
1069 | 475 | self.log.debug('{}: expected, actual count = {}, ' | ||
1070 | 476 | '{}'.format(msg, num_before - 1, num_after)) | ||
1071 | 477 | |||
1072 | 478 | if num_after == (num_before - 1): | ||
1073 | 479 | return True | ||
1074 | 480 | else: | ||
1075 | 481 | self.log.error('{} delete timed out'.format(msg)) | ||
1076 | 482 | return False | ||
1077 | 483 | |||
1078 | 484 | def resource_reaches_status(self, resource, resource_id, | ||
1079 | 485 | expected_stat='available', | ||
1080 | 486 | msg='resource', max_wait=120): | ||
1081 | 487 | """Wait for an openstack resources status to reach an | ||
1082 | 488 | expected status within a specified time. Useful to confirm that | ||
1083 | 489 | nova instances, cinder vols, snapshots, glance images, heat stacks | ||
1084 | 490 | and other resources eventually reach the expected status. | ||
1085 | 491 | |||
1086 | 492 | :param resource: pointer to os resource type, ex: heat_client.stacks | ||
1087 | 493 | :param resource_id: unique id for the openstack resource | ||
1088 | 494 | :param expected_stat: status to expect resource to reach | ||
1089 | 495 | :param msg: text to identify purpose in logging | ||
1090 | 496 | :param max_wait: maximum wait time in seconds | ||
1091 | 497 | :returns: True if successful, False if status is not reached | ||
1092 | 498 | """ | ||
1093 | 499 | |||
1094 | 500 | tries = 0 | ||
1095 | 501 | resource_stat = resource.get(resource_id).status | ||
1096 | 502 | while resource_stat != expected_stat and tries < (max_wait / 4): | ||
1097 | 503 | self.log.debug('{} status check: ' | ||
1098 | 504 | '{} [{}:{}] {}'.format(msg, tries, | ||
1099 | 505 | resource_stat, | ||
1100 | 506 | expected_stat, | ||
1101 | 507 | resource_id)) | ||
1102 | 508 | time.sleep(4) | ||
1103 | 509 | resource_stat = resource.get(resource_id).status | ||
1104 | 510 | tries += 1 | ||
1105 | 511 | |||
1106 | 512 | self.log.debug('{}: expected, actual status = {}, ' | ||
1107 | 513 | '{}'.format(msg, resource_stat, expected_stat)) | ||
1108 | 514 | |||
1109 | 515 | if resource_stat == expected_stat: | ||
1110 | 516 | return True | ||
1111 | 517 | else: | ||
1112 | 518 | self.log.debug('{} never reached expected status: ' | ||
1113 | 519 | '{}'.format(resource_id, expected_stat)) | ||
1114 | 520 | return False | ||
1115 | 521 | |||
1116 | 522 | def get_ceph_osd_id_cmd(self, index): | ||
1117 | 523 | """Produce a shell command that will return a ceph-osd id.""" | ||
1118 | 524 | return ("`initctl list | grep 'ceph-osd ' | " | ||
1119 | 525 | "awk 'NR=={} {{ print $2 }}' | " | ||
1120 | 526 | "grep -o '[0-9]*'`".format(index + 1)) | ||
1121 | 527 | |||
1122 | 528 | def get_ceph_pools(self, sentry_unit): | ||
1123 | 529 | """Return a dict of ceph pools from a single ceph unit, with | ||
1124 | 530 | pool name as keys, pool id as vals.""" | ||
1125 | 531 | pools = {} | ||
1126 | 532 | cmd = 'sudo ceph osd lspools' | ||
1127 | 533 | output, code = sentry_unit.run(cmd) | ||
1128 | 534 | if code != 0: | ||
1129 | 535 | msg = ('{} `{}` returned {} ' | ||
1130 | 536 | '{}'.format(sentry_unit.info['unit_name'], | ||
1131 | 537 | cmd, code, output)) | ||
1132 | 538 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1133 | 539 | |||
1134 | 540 | # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, | ||
1135 | 541 | for pool in str(output).split(','): | ||
1136 | 542 | pool_id_name = pool.split(' ') | ||
1137 | 543 | if len(pool_id_name) == 2: | ||
1138 | 544 | pool_id = pool_id_name[0] | ||
1139 | 545 | pool_name = pool_id_name[1] | ||
1140 | 546 | pools[pool_name] = int(pool_id) | ||
1141 | 547 | |||
1142 | 548 | self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], | ||
1143 | 549 | pools)) | ||
1144 | 550 | return pools | ||
1145 | 551 | |||
1146 | 552 | def get_ceph_df(self, sentry_unit): | ||
1147 | 553 | """Return dict of ceph df json output, including ceph pool state. | ||
1148 | 554 | |||
1149 | 555 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
1150 | 556 | :returns: Dict of ceph df output | ||
1151 | 557 | """ | ||
1152 | 558 | cmd = 'sudo ceph df --format=json' | ||
1153 | 559 | output, code = sentry_unit.run(cmd) | ||
1154 | 560 | if code != 0: | ||
1155 | 561 | msg = ('{} `{}` returned {} ' | ||
1156 | 562 | '{}'.format(sentry_unit.info['unit_name'], | ||
1157 | 563 | cmd, code, output)) | ||
1158 | 564 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1159 | 565 | return json.loads(output) | ||
1160 | 566 | |||
1161 | 567 | def get_ceph_pool_sample(self, sentry_unit, pool_id=0): | ||
1162 | 568 | """Take a sample of attributes of a ceph pool, returning ceph | ||
1163 | 569 | pool name, object count and disk space used for the specified | ||
1164 | 570 | pool ID number. | ||
1165 | 571 | |||
1166 | 572 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
1167 | 573 | :param pool_id: Ceph pool ID | ||
1168 | 574 | :returns: List of pool name, object count, kb disk space used | ||
1169 | 575 | """ | ||
1170 | 576 | df = self.get_ceph_df(sentry_unit) | ||
1171 | 577 | pool_name = df['pools'][pool_id]['name'] | ||
1172 | 578 | obj_count = df['pools'][pool_id]['stats']['objects'] | ||
1173 | 579 | kb_used = df['pools'][pool_id]['stats']['kb_used'] | ||
1174 | 580 | self.log.debug('Ceph {} pool (ID {}): {} objects, ' | ||
1175 | 581 | '{} kb used'.format(pool_name, pool_id, | ||
1176 | 582 | obj_count, kb_used)) | ||
1177 | 583 | return pool_name, obj_count, kb_used | ||
1178 | 584 | |||
1179 | 585 | def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): | ||
1180 | 586 | """Validate ceph pool samples taken over time, such as pool | ||
1181 | 587 | object counts or pool kb used, before adding, after adding, and | ||
1182 | 588 | after deleting items which affect those pool attributes. The | ||
1183 | 589 | 2nd element is expected to be greater than the 1st; 3rd is expected | ||
1184 | 590 | to be less than the 2nd. | ||
1185 | 591 | |||
1186 | 592 | :param samples: List containing 3 data samples | ||
1187 | 593 | :param sample_type: String for logging and usage context | ||
1188 | 594 | :returns: None if successful, Failure message otherwise | ||
1189 | 595 | """ | ||
1190 | 596 | original, created, deleted = range(3) | ||
1191 | 597 | if samples[created] <= samples[original] or \ | ||
1192 | 598 | samples[deleted] >= samples[created]: | ||
1193 | 599 | return ('Ceph {} samples ({}) ' | ||
1194 | 600 | 'unexpected.'.format(sample_type, samples)) | ||
1195 | 601 | else: | ||
1196 | 602 | self.log.debug('Ceph {} samples (OK): ' | ||
1197 | 603 | '{}'.format(sample_type, samples)) | ||
1198 | 604 | return None | ||
1199 | 295 | 605 | ||
1200 | === modified file 'charmhelpers/contrib/openstack/context.py' | |||
1201 | --- charmhelpers/contrib/openstack/context.py 2015-04-16 19:19:18 +0000 | |||
1202 | +++ charmhelpers/contrib/openstack/context.py 2015-08-13 08:33:21 +0000 | |||
1203 | @@ -122,21 +122,24 @@ | |||
1204 | 122 | of specifying multiple key value pairs within the same string. For | 122 | of specifying multiple key value pairs within the same string. For |
1205 | 123 | example, a string in the format of 'key1=value1, key2=value2' will | 123 | example, a string in the format of 'key1=value1, key2=value2' will |
1206 | 124 | return a dict of: | 124 | return a dict of: |
1209 | 125 | {'key1': 'value1', | 125 | |
1210 | 126 | 'key2': 'value2'}. | 126 | {'key1': 'value1', |
1211 | 127 | 'key2': 'value2'}. | ||
1212 | 127 | 128 | ||
1213 | 128 | 2. A string in the above format, but supporting a comma-delimited list | 129 | 2. A string in the above format, but supporting a comma-delimited list |
1214 | 129 | of values for the same key. For example, a string in the format of | 130 | of values for the same key. For example, a string in the format of |
1215 | 130 | 'key1=value1, key2=value3,value4,value5' will return a dict of: | 131 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
1218 | 131 | {'key1', 'value1', | 132 | |
1219 | 132 | 'key2', 'value2,value3,value4'} | 133 | {'key1', 'value1', |
1220 | 134 | 'key2', 'value2,value3,value4'} | ||
1221 | 133 | 135 | ||
1222 | 134 | 3. A string containing a colon character (:) prior to an equal | 136 | 3. A string containing a colon character (:) prior to an equal |
1223 | 135 | character (=) will be treated as yaml and parsed as such. This can be | 137 | character (=) will be treated as yaml and parsed as such. This can be |
1224 | 136 | used to specify more complex key value pairs. For example, | 138 | used to specify more complex key value pairs. For example, |
1225 | 137 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will | 139 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
1226 | 138 | return a dict of: | 140 | return a dict of: |
1228 | 139 | {'key1', 'subkey1=value1, subkey2=value2'} | 141 | |
1229 | 142 | {'key1', 'subkey1=value1, subkey2=value2'} | ||
1230 | 140 | 143 | ||
1231 | 141 | The provided config_flags string may be a list of comma-separated values | 144 | The provided config_flags string may be a list of comma-separated values |
1232 | 142 | which themselves may be comma-separated list of values. | 145 | which themselves may be comma-separated list of values. |
1233 | @@ -240,7 +243,7 @@ | |||
1234 | 240 | if self.relation_prefix: | 243 | if self.relation_prefix: |
1235 | 241 | password_setting = self.relation_prefix + '_password' | 244 | password_setting = self.relation_prefix + '_password' |
1236 | 242 | 245 | ||
1238 | 243 | for rid in relation_ids('shared-db'): | 246 | for rid in relation_ids(self.interfaces[0]): |
1239 | 244 | for unit in related_units(rid): | 247 | for unit in related_units(rid): |
1240 | 245 | rdata = relation_get(rid=rid, unit=unit) | 248 | rdata = relation_get(rid=rid, unit=unit) |
1241 | 246 | host = rdata.get('db_host') | 249 | host = rdata.get('db_host') |
1242 | @@ -891,8 +894,6 @@ | |||
1243 | 891 | return ctxt | 894 | return ctxt |
1244 | 892 | 895 | ||
1245 | 893 | def __call__(self): | 896 | def __call__(self): |
1246 | 894 | self._ensure_packages() | ||
1247 | 895 | |||
1248 | 896 | if self.network_manager not in ['quantum', 'neutron']: | 897 | if self.network_manager not in ['quantum', 'neutron']: |
1249 | 897 | return {} | 898 | return {} |
1250 | 898 | 899 | ||
1251 | @@ -1050,13 +1051,22 @@ | |||
1252 | 1050 | :param config_file : Service's config file to query sections | 1051 | :param config_file : Service's config file to query sections |
1253 | 1051 | :param interface : Subordinate interface to inspect | 1052 | :param interface : Subordinate interface to inspect |
1254 | 1052 | """ | 1053 | """ |
1255 | 1053 | self.service = service | ||
1256 | 1054 | self.config_file = config_file | 1054 | self.config_file = config_file |
1258 | 1055 | self.interface = interface | 1055 | if isinstance(service, list): |
1259 | 1056 | self.services = service | ||
1260 | 1057 | else: | ||
1261 | 1058 | self.services = [service] | ||
1262 | 1059 | if isinstance(interface, list): | ||
1263 | 1060 | self.interfaces = interface | ||
1264 | 1061 | else: | ||
1265 | 1062 | self.interfaces = [interface] | ||
1266 | 1056 | 1063 | ||
1267 | 1057 | def __call__(self): | 1064 | def __call__(self): |
1268 | 1058 | ctxt = {'sections': {}} | 1065 | ctxt = {'sections': {}} |
1270 | 1059 | for rid in relation_ids(self.interface): | 1066 | rids = [] |
1271 | 1067 | for interface in self.interfaces: | ||
1272 | 1068 | rids.extend(relation_ids(interface)) | ||
1273 | 1069 | for rid in rids: | ||
1274 | 1060 | for unit in related_units(rid): | 1070 | for unit in related_units(rid): |
1275 | 1061 | sub_config = relation_get('subordinate_configuration', | 1071 | sub_config = relation_get('subordinate_configuration', |
1276 | 1062 | rid=rid, unit=unit) | 1072 | rid=rid, unit=unit) |
1277 | @@ -1068,29 +1078,32 @@ | |||
1278 | 1068 | 'setting from %s' % rid, level=ERROR) | 1078 | 'setting from %s' % rid, level=ERROR) |
1279 | 1069 | continue | 1079 | continue |
1280 | 1070 | 1080 | ||
1304 | 1071 | if self.service not in sub_config: | 1081 | for service in self.services: |
1305 | 1072 | log('Found subordinate_config on %s but it contained' | 1082 | if service not in sub_config: |
1306 | 1073 | 'nothing for %s service' % (rid, self.service), | 1083 | log('Found subordinate_config on %s but it contained' |
1307 | 1074 | level=INFO) | 1084 | 'nothing for %s service' % (rid, service), |
1308 | 1075 | continue | 1085 | level=INFO) |
1309 | 1076 | 1086 | continue | |
1310 | 1077 | sub_config = sub_config[self.service] | 1087 | |
1311 | 1078 | if self.config_file not in sub_config: | 1088 | sub_config = sub_config[service] |
1312 | 1079 | log('Found subordinate_config on %s but it contained' | 1089 | if self.config_file not in sub_config: |
1313 | 1080 | 'nothing for %s' % (rid, self.config_file), | 1090 | log('Found subordinate_config on %s but it contained' |
1314 | 1081 | level=INFO) | 1091 | 'nothing for %s' % (rid, self.config_file), |
1315 | 1082 | continue | 1092 | level=INFO) |
1316 | 1083 | 1093 | continue | |
1317 | 1084 | sub_config = sub_config[self.config_file] | 1094 | |
1318 | 1085 | for k, v in six.iteritems(sub_config): | 1095 | sub_config = sub_config[self.config_file] |
1319 | 1086 | if k == 'sections': | 1096 | for k, v in six.iteritems(sub_config): |
1320 | 1087 | for section, config_dict in six.iteritems(v): | 1097 | if k == 'sections': |
1321 | 1088 | log("adding section '%s'" % (section), | 1098 | for section, config_list in six.iteritems(v): |
1322 | 1089 | level=DEBUG) | 1099 | log("adding section '%s'" % (section), |
1323 | 1090 | ctxt[k][section] = config_dict | 1100 | level=DEBUG) |
1324 | 1091 | else: | 1101 | if ctxt[k].get(section): |
1325 | 1092 | ctxt[k] = v | 1102 | ctxt[k][section].extend(config_list) |
1326 | 1093 | 1103 | else: | |
1327 | 1104 | ctxt[k][section] = config_list | ||
1328 | 1105 | else: | ||
1329 | 1106 | ctxt[k] = v | ||
1330 | 1094 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | 1107 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
1331 | 1095 | return ctxt | 1108 | return ctxt |
1332 | 1096 | 1109 | ||
1333 | 1097 | 1110 | ||
1334 | === modified file 'charmhelpers/contrib/openstack/templates/ceph.conf' | |||
1335 | --- charmhelpers/contrib/openstack/templates/ceph.conf 2014-03-26 10:26:36 +0000 | |||
1336 | +++ charmhelpers/contrib/openstack/templates/ceph.conf 2015-08-13 08:33:21 +0000 | |||
1337 | @@ -5,11 +5,11 @@ | |||
1338 | 5 | ############################################################################### | 5 | ############################################################################### |
1339 | 6 | [global] | 6 | [global] |
1340 | 7 | {% if auth -%} | 7 | {% if auth -%} |
1344 | 8 | auth_supported = {{ auth }} | 8 | auth_supported = {{ auth }} |
1345 | 9 | keyring = /etc/ceph/$cluster.$name.keyring | 9 | keyring = /etc/ceph/$cluster.$name.keyring |
1346 | 10 | mon host = {{ mon_hosts }} | 10 | mon host = {{ mon_hosts }} |
1347 | 11 | {% endif -%} | 11 | {% endif -%} |
1351 | 12 | log to syslog = {{ use_syslog }} | 12 | log to syslog = {{ use_syslog }} |
1352 | 13 | err to syslog = {{ use_syslog }} | 13 | err to syslog = {{ use_syslog }} |
1353 | 14 | clog to syslog = {{ use_syslog }} | 14 | clog to syslog = {{ use_syslog }} |
1354 | 15 | 15 | ||
1355 | 16 | 16 | ||
1356 | === modified file 'charmhelpers/contrib/openstack/templating.py' | |||
1357 | --- charmhelpers/contrib/openstack/templating.py 2015-06-11 09:00:37 +0000 | |||
1358 | +++ charmhelpers/contrib/openstack/templating.py 2015-08-13 08:33:21 +0000 | |||
1359 | @@ -29,8 +29,8 @@ | |||
1360 | 29 | try: | 29 | try: |
1361 | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions | 30 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
1362 | 31 | except ImportError: | 31 | except ImportError: |
1365 | 32 | # python-jinja2 may not be installed yet, or we're running unittests. | 32 | apt_install('python-jinja2', fatal=True) |
1366 | 33 | FileSystemLoader = ChoiceLoader = Environment = exceptions = None | 33 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
1367 | 34 | 34 | ||
1368 | 35 | 35 | ||
1369 | 36 | class OSConfigException(Exception): | 36 | class OSConfigException(Exception): |
1370 | 37 | 37 | ||
1371 | === modified file 'charmhelpers/contrib/openstack/utils.py' | |||
1372 | --- charmhelpers/contrib/openstack/utils.py 2015-06-17 12:22:29 +0000 | |||
1373 | +++ charmhelpers/contrib/openstack/utils.py 2015-08-13 08:33:21 +0000 | |||
1374 | @@ -25,6 +25,7 @@ | |||
1375 | 25 | import os | 25 | import os |
1376 | 26 | import sys | 26 | import sys |
1377 | 27 | import uuid | 27 | import uuid |
1378 | 28 | import re | ||
1379 | 28 | 29 | ||
1380 | 29 | import six | 30 | import six |
1381 | 30 | import yaml | 31 | import yaml |
1382 | @@ -71,7 +72,6 @@ | |||
1383 | 71 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' | 72 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
1384 | 72 | 'restricted main multiverse universe') | 73 | 'restricted main multiverse universe') |
1385 | 73 | 74 | ||
1386 | 74 | |||
1387 | 75 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ | 75 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
1388 | 76 | ('oneiric', 'diablo'), | 76 | ('oneiric', 'diablo'), |
1389 | 77 | ('precise', 'essex'), | 77 | ('precise', 'essex'), |
1390 | @@ -81,6 +81,7 @@ | |||
1391 | 81 | ('trusty', 'icehouse'), | 81 | ('trusty', 'icehouse'), |
1392 | 82 | ('utopic', 'juno'), | 82 | ('utopic', 'juno'), |
1393 | 83 | ('vivid', 'kilo'), | 83 | ('vivid', 'kilo'), |
1394 | 84 | ('wily', 'liberty'), | ||
1395 | 84 | ]) | 85 | ]) |
1396 | 85 | 86 | ||
1397 | 86 | 87 | ||
1398 | @@ -93,6 +94,7 @@ | |||
1399 | 93 | ('2014.1', 'icehouse'), | 94 | ('2014.1', 'icehouse'), |
1400 | 94 | ('2014.2', 'juno'), | 95 | ('2014.2', 'juno'), |
1401 | 95 | ('2015.1', 'kilo'), | 96 | ('2015.1', 'kilo'), |
1402 | 97 | ('2015.2', 'liberty'), | ||
1403 | 96 | ]) | 98 | ]) |
1404 | 97 | 99 | ||
1405 | 98 | # The ugly duckling | 100 | # The ugly duckling |
1406 | @@ -115,8 +117,37 @@ | |||
1407 | 115 | ('2.2.0', 'juno'), | 117 | ('2.2.0', 'juno'), |
1408 | 116 | ('2.2.1', 'kilo'), | 118 | ('2.2.1', 'kilo'), |
1409 | 117 | ('2.2.2', 'kilo'), | 119 | ('2.2.2', 'kilo'), |
1410 | 120 | ('2.3.0', 'liberty'), | ||
1411 | 118 | ]) | 121 | ]) |
1412 | 119 | 122 | ||
1413 | 123 | # >= Liberty version->codename mapping | ||
1414 | 124 | PACKAGE_CODENAMES = { | ||
1415 | 125 | 'nova-common': OrderedDict([ | ||
1416 | 126 | ('12.0.0', 'liberty'), | ||
1417 | 127 | ]), | ||
1418 | 128 | 'neutron-common': OrderedDict([ | ||
1419 | 129 | ('7.0.0', 'liberty'), | ||
1420 | 130 | ]), | ||
1421 | 131 | 'cinder-common': OrderedDict([ | ||
1422 | 132 | ('7.0.0', 'liberty'), | ||
1423 | 133 | ]), | ||
1424 | 134 | 'keystone': OrderedDict([ | ||
1425 | 135 | ('8.0.0', 'liberty'), | ||
1426 | 136 | ]), | ||
1427 | 137 | 'horizon-common': OrderedDict([ | ||
1428 | 138 | ('8.0.0', 'liberty'), | ||
1429 | 139 | ]), | ||
1430 | 140 | 'ceilometer-common': OrderedDict([ | ||
1431 | 141 | ('5.0.0', 'liberty'), | ||
1432 | 142 | ]), | ||
1433 | 143 | 'heat-common': OrderedDict([ | ||
1434 | 144 | ('5.0.0', 'liberty'), | ||
1435 | 145 | ]), | ||
1436 | 146 | 'glance-common': OrderedDict([ | ||
1437 | 147 | ('11.0.0', 'liberty'), | ||
1438 | 148 | ]), | ||
1439 | 149 | } | ||
1440 | 150 | |||
1441 | 120 | DEFAULT_LOOPBACK_SIZE = '5G' | 151 | DEFAULT_LOOPBACK_SIZE = '5G' |
1442 | 121 | 152 | ||
1443 | 122 | 153 | ||
1444 | @@ -200,20 +231,29 @@ | |||
1445 | 200 | error_out(e) | 231 | error_out(e) |
1446 | 201 | 232 | ||
1447 | 202 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 233 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
1448 | 234 | match = re.match('^(\d)\.(\d)\.(\d)', vers) | ||
1449 | 235 | if match: | ||
1450 | 236 | vers = match.group(0) | ||
1451 | 203 | 237 | ||
1465 | 204 | try: | 238 | # >= Liberty independent project versions |
1466 | 205 | if 'swift' in pkg.name: | 239 | if (package in PACKAGE_CODENAMES and |
1467 | 206 | swift_vers = vers[:5] | 240 | vers in PACKAGE_CODENAMES[package]): |
1468 | 207 | if swift_vers not in SWIFT_CODENAMES: | 241 | return PACKAGE_CODENAMES[package][vers] |
1469 | 208 | # Deal with 1.10.0 upward | 242 | else: |
1470 | 209 | swift_vers = vers[:6] | 243 | # < Liberty co-ordinated project versions |
1471 | 210 | return SWIFT_CODENAMES[swift_vers] | 244 | try: |
1472 | 211 | else: | 245 | if 'swift' in pkg.name: |
1473 | 212 | vers = vers[:6] | 246 | swift_vers = vers[:5] |
1474 | 213 | return OPENSTACK_CODENAMES[vers] | 247 | if swift_vers not in SWIFT_CODENAMES: |
1475 | 214 | except KeyError: | 248 | # Deal with 1.10.0 upward |
1476 | 215 | e = 'Could not determine OpenStack codename for version %s' % vers | 249 | swift_vers = vers[:6] |
1477 | 216 | error_out(e) | 250 | return SWIFT_CODENAMES[swift_vers] |
1478 | 251 | else: | ||
1479 | 252 | vers = vers[:6] | ||
1480 | 253 | return OPENSTACK_CODENAMES[vers] | ||
1481 | 254 | except KeyError: | ||
1482 | 255 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
1483 | 256 | error_out(e) | ||
1484 | 217 | 257 | ||
1485 | 218 | 258 | ||
1486 | 219 | def get_os_version_package(pkg, fatal=True): | 259 | def get_os_version_package(pkg, fatal=True): |
1487 | @@ -323,6 +363,9 @@ | |||
1488 | 323 | 'kilo': 'trusty-updates/kilo', | 363 | 'kilo': 'trusty-updates/kilo', |
1489 | 324 | 'kilo/updates': 'trusty-updates/kilo', | 364 | 'kilo/updates': 'trusty-updates/kilo', |
1490 | 325 | 'kilo/proposed': 'trusty-proposed/kilo', | 365 | 'kilo/proposed': 'trusty-proposed/kilo', |
1491 | 366 | 'liberty': 'trusty-updates/liberty', | ||
1492 | 367 | 'liberty/updates': 'trusty-updates/liberty', | ||
1493 | 368 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
1494 | 326 | } | 369 | } |
1495 | 327 | 370 | ||
1496 | 328 | try: | 371 | try: |
1497 | @@ -518,6 +561,7 @@ | |||
1498 | 518 | Clone/install all specified OpenStack repositories. | 561 | Clone/install all specified OpenStack repositories. |
1499 | 519 | 562 | ||
1500 | 520 | The expected format of projects_yaml is: | 563 | The expected format of projects_yaml is: |
1501 | 564 | |||
1502 | 521 | repositories: | 565 | repositories: |
1503 | 522 | - {name: keystone, | 566 | - {name: keystone, |
1504 | 523 | repository: 'git://git.openstack.org/openstack/keystone.git', | 567 | repository: 'git://git.openstack.org/openstack/keystone.git', |
1505 | @@ -525,11 +569,13 @@ | |||
1506 | 525 | - {name: requirements, | 569 | - {name: requirements, |
1507 | 526 | repository: 'git://git.openstack.org/openstack/requirements.git', | 570 | repository: 'git://git.openstack.org/openstack/requirements.git', |
1508 | 527 | branch: 'stable/icehouse'} | 571 | branch: 'stable/icehouse'} |
1509 | 572 | |||
1510 | 528 | directory: /mnt/openstack-git | 573 | directory: /mnt/openstack-git |
1511 | 529 | http_proxy: squid-proxy-url | 574 | http_proxy: squid-proxy-url |
1512 | 530 | https_proxy: squid-proxy-url | 575 | https_proxy: squid-proxy-url |
1513 | 531 | 576 | ||
1515 | 532 | The directory, http_proxy, and https_proxy keys are optional. | 577 | The directory, http_proxy, and https_proxy keys are optional. |
1516 | 578 | |||
1517 | 533 | """ | 579 | """ |
1518 | 534 | global requirements_dir | 580 | global requirements_dir |
1519 | 535 | parent_dir = '/mnt/openstack-git' | 581 | parent_dir = '/mnt/openstack-git' |
1520 | @@ -551,6 +597,12 @@ | |||
1521 | 551 | 597 | ||
1522 | 552 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) | 598 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
1523 | 553 | 599 | ||
1524 | 600 | # Upgrade setuptools and pip from default virtualenv versions. The default | ||
1525 | 601 | # versions in trusty break master OpenStack branch deployments. | ||
1526 | 602 | for p in ['pip', 'setuptools']: | ||
1527 | 603 | pip_install(p, upgrade=True, proxy=http_proxy, | ||
1528 | 604 | venv=os.path.join(parent_dir, 'venv')) | ||
1529 | 605 | |||
1530 | 554 | for p in projects['repositories']: | 606 | for p in projects['repositories']: |
1531 | 555 | repo = p['repository'] | 607 | repo = p['repository'] |
1532 | 556 | branch = p['branch'] | 608 | branch = p['branch'] |
1533 | @@ -612,24 +664,24 @@ | |||
1534 | 612 | else: | 664 | else: |
1535 | 613 | repo_dir = dest_dir | 665 | repo_dir = dest_dir |
1536 | 614 | 666 | ||
1537 | 667 | venv = os.path.join(parent_dir, 'venv') | ||
1538 | 668 | |||
1539 | 615 | if update_requirements: | 669 | if update_requirements: |
1540 | 616 | if not requirements_dir: | 670 | if not requirements_dir: |
1541 | 617 | error_out('requirements repo must be cloned before ' | 671 | error_out('requirements repo must be cloned before ' |
1542 | 618 | 'updating from global requirements.') | 672 | 'updating from global requirements.') |
1544 | 619 | _git_update_requirements(repo_dir, requirements_dir) | 673 | _git_update_requirements(venv, repo_dir, requirements_dir) |
1545 | 620 | 674 | ||
1546 | 621 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) | 675 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
1547 | 622 | if http_proxy: | 676 | if http_proxy: |
1550 | 623 | pip_install(repo_dir, proxy=http_proxy, | 677 | pip_install(repo_dir, proxy=http_proxy, venv=venv) |
1549 | 624 | venv=os.path.join(parent_dir, 'venv')) | ||
1551 | 625 | else: | 678 | else: |
1554 | 626 | pip_install(repo_dir, | 679 | pip_install(repo_dir, venv=venv) |
1553 | 627 | venv=os.path.join(parent_dir, 'venv')) | ||
1555 | 628 | 680 | ||
1556 | 629 | return repo_dir | 681 | return repo_dir |
1557 | 630 | 682 | ||
1558 | 631 | 683 | ||
1560 | 632 | def _git_update_requirements(package_dir, reqs_dir): | 684 | def _git_update_requirements(venv, package_dir, reqs_dir): |
1561 | 633 | """ | 685 | """ |
1562 | 634 | Update from global requirements. | 686 | Update from global requirements. |
1563 | 635 | 687 | ||
1564 | @@ -638,12 +690,14 @@ | |||
1565 | 638 | """ | 690 | """ |
1566 | 639 | orig_dir = os.getcwd() | 691 | orig_dir = os.getcwd() |
1567 | 640 | os.chdir(reqs_dir) | 692 | os.chdir(reqs_dir) |
1569 | 641 | cmd = ['python', 'update.py', package_dir] | 693 | python = os.path.join(venv, 'bin/python') |
1570 | 694 | cmd = [python, 'update.py', package_dir] | ||
1571 | 642 | try: | 695 | try: |
1572 | 643 | subprocess.check_call(cmd) | 696 | subprocess.check_call(cmd) |
1573 | 644 | except subprocess.CalledProcessError: | 697 | except subprocess.CalledProcessError: |
1574 | 645 | package = os.path.basename(package_dir) | 698 | package = os.path.basename(package_dir) |
1576 | 646 | error_out("Error updating {} from global-requirements.txt".format(package)) | 699 | error_out("Error updating {} from " |
1577 | 700 | "global-requirements.txt".format(package)) | ||
1578 | 647 | os.chdir(orig_dir) | 701 | os.chdir(orig_dir) |
1579 | 648 | 702 | ||
1580 | 649 | 703 | ||
1581 | 650 | 704 | ||
1582 | === modified file 'charmhelpers/contrib/peerstorage/__init__.py' | |||
1583 | --- charmhelpers/contrib/peerstorage/__init__.py 2015-06-03 13:09:25 +0000 | |||
1584 | +++ charmhelpers/contrib/peerstorage/__init__.py 2015-08-13 08:33:21 +0000 | |||
1585 | @@ -59,7 +59,7 @@ | |||
1586 | 59 | """ | 59 | """ |
1587 | 60 | 60 | ||
1588 | 61 | 61 | ||
1590 | 62 | def leader_get(attribute=None): | 62 | def leader_get(attribute=None, rid=None): |
1591 | 63 | """Wrapper to ensure that settings are migrated from the peer relation. | 63 | """Wrapper to ensure that settings are migrated from the peer relation. |
1592 | 64 | 64 | ||
1593 | 65 | This is to support upgrading an environment that does not support | 65 | This is to support upgrading an environment that does not support |
1594 | @@ -94,7 +94,8 @@ | |||
1595 | 94 | # If attribute not present in leader db, check if this unit has set | 94 | # If attribute not present in leader db, check if this unit has set |
1596 | 95 | # the attribute in the peer relation | 95 | # the attribute in the peer relation |
1597 | 96 | if not leader_settings: | 96 | if not leader_settings: |
1599 | 97 | peer_setting = relation_get(attribute=attribute, unit=local_unit()) | 97 | peer_setting = _relation_get(attribute=attribute, unit=local_unit(), |
1600 | 98 | rid=rid) | ||
1601 | 98 | if peer_setting: | 99 | if peer_setting: |
1602 | 99 | leader_set(settings={attribute: peer_setting}) | 100 | leader_set(settings={attribute: peer_setting}) |
1603 | 100 | leader_settings = peer_setting | 101 | leader_settings = peer_setting |
1604 | @@ -103,7 +104,7 @@ | |||
1605 | 103 | settings_migrated = True | 104 | settings_migrated = True |
1606 | 104 | migrated.add(attribute) | 105 | migrated.add(attribute) |
1607 | 105 | else: | 106 | else: |
1609 | 106 | r_settings = relation_get(unit=local_unit()) | 107 | r_settings = _relation_get(unit=local_unit(), rid=rid) |
1610 | 107 | if r_settings: | 108 | if r_settings: |
1611 | 108 | for key in set(r_settings.keys()).difference(migrated): | 109 | for key in set(r_settings.keys()).difference(migrated): |
1612 | 109 | # Leader setting wins | 110 | # Leader setting wins |
1613 | @@ -151,7 +152,7 @@ | |||
1614 | 151 | """ | 152 | """ |
1615 | 152 | try: | 153 | try: |
1616 | 153 | if rid in relation_ids('cluster'): | 154 | if rid in relation_ids('cluster'): |
1618 | 154 | return leader_get(attribute) | 155 | return leader_get(attribute, rid) |
1619 | 155 | else: | 156 | else: |
1620 | 156 | raise NotImplementedError | 157 | raise NotImplementedError |
1621 | 157 | except NotImplementedError: | 158 | except NotImplementedError: |
1622 | 158 | 159 | ||
1623 | === modified file 'charmhelpers/contrib/python/packages.py' | |||
1624 | --- charmhelpers/contrib/python/packages.py 2015-05-07 18:12:54 +0000 | |||
1625 | +++ charmhelpers/contrib/python/packages.py 2015-08-13 08:33:21 +0000 | |||
1626 | @@ -36,6 +36,8 @@ | |||
1627 | 36 | def parse_options(given, available): | 36 | def parse_options(given, available): |
1628 | 37 | """Given a set of options, check if available""" | 37 | """Given a set of options, check if available""" |
1629 | 38 | for key, value in sorted(given.items()): | 38 | for key, value in sorted(given.items()): |
1630 | 39 | if not value: | ||
1631 | 40 | continue | ||
1632 | 39 | if key in available: | 41 | if key in available: |
1633 | 40 | yield "--{0}={1}".format(key, value) | 42 | yield "--{0}={1}".format(key, value) |
1634 | 41 | 43 | ||
1635 | 42 | 44 | ||
1636 | === modified file 'charmhelpers/contrib/storage/linux/ceph.py' | |||
1637 | --- charmhelpers/contrib/storage/linux/ceph.py 2015-01-22 06:11:15 +0000 | |||
1638 | +++ charmhelpers/contrib/storage/linux/ceph.py 2015-08-13 08:33:21 +0000 | |||
1639 | @@ -60,12 +60,12 @@ | |||
1640 | 60 | KEYFILE = '/etc/ceph/ceph.client.{}.key' | 60 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
1641 | 61 | 61 | ||
1642 | 62 | CEPH_CONF = """[global] | 62 | CEPH_CONF = """[global] |
1649 | 63 | auth supported = {auth} | 63 | auth supported = {auth} |
1650 | 64 | keyring = {keyring} | 64 | keyring = {keyring} |
1651 | 65 | mon host = {mon_hosts} | 65 | mon host = {mon_hosts} |
1652 | 66 | log to syslog = {use_syslog} | 66 | log to syslog = {use_syslog} |
1653 | 67 | err to syslog = {use_syslog} | 67 | err to syslog = {use_syslog} |
1654 | 68 | clog to syslog = {use_syslog} | 68 | clog to syslog = {use_syslog} |
1655 | 69 | """ | 69 | """ |
1656 | 70 | 70 | ||
1657 | 71 | 71 | ||
1658 | 72 | 72 | ||
1659 | === modified file 'charmhelpers/contrib/storage/linux/utils.py' | |||
1660 | --- charmhelpers/contrib/storage/linux/utils.py 2015-01-22 06:06:03 +0000 | |||
1661 | +++ charmhelpers/contrib/storage/linux/utils.py 2015-08-13 08:33:21 +0000 | |||
1662 | @@ -43,9 +43,10 @@ | |||
1663 | 43 | 43 | ||
1664 | 44 | :param block_device: str: Full path of block device to clean. | 44 | :param block_device: str: Full path of block device to clean. |
1665 | 45 | ''' | 45 | ''' |
1666 | 46 | # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b | ||
1667 | 46 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up | 47 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
1670 | 47 | call(['sgdisk', '--zap-all', '--mbrtogpt', | 48 | call(['sgdisk', '--zap-all', '--', block_device]) |
1671 | 48 | '--clear', block_device]) | 49 | call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) |
1672 | 49 | dev_end = check_output(['blockdev', '--getsz', | 50 | dev_end = check_output(['blockdev', '--getsz', |
1673 | 50 | block_device]).decode('UTF-8') | 51 | block_device]).decode('UTF-8') |
1674 | 51 | gpt_end = int(dev_end.split()[0]) - 100 | 52 | gpt_end = int(dev_end.split()[0]) - 100 |
1675 | @@ -67,4 +68,4 @@ | |||
1676 | 67 | out = check_output(['mount']).decode('UTF-8') | 68 | out = check_output(['mount']).decode('UTF-8') |
1677 | 68 | if is_partition: | 69 | if is_partition: |
1678 | 69 | return bool(re.search(device + r"\b", out)) | 70 | return bool(re.search(device + r"\b", out)) |
1680 | 70 | return bool(re.search(device + r"[0-9]+\b", out)) | 71 | return bool(re.search(device + r"[0-9]*\b", out)) |
1681 | 71 | 72 | ||
1682 | === modified file 'charmhelpers/contrib/unison/__init__.py' | |||
1683 | --- charmhelpers/contrib/unison/__init__.py 2015-04-03 15:23:46 +0000 | |||
1684 | +++ charmhelpers/contrib/unison/__init__.py 2015-08-13 08:33:21 +0000 | |||
1685 | @@ -16,7 +16,7 @@ | |||
1686 | 16 | 16 | ||
1687 | 17 | # Easy file synchronization among peer units using ssh + unison. | 17 | # Easy file synchronization among peer units using ssh + unison. |
1688 | 18 | # | 18 | # |
1690 | 19 | # From *both* peer relation -joined and -changed, add a call to | 19 | # For the -joined, -changed, and -departed peer relations, add a call to |
1691 | 20 | # ssh_authorized_peers() describing the peer relation and the desired | 20 | # ssh_authorized_peers() describing the peer relation and the desired |
1692 | 21 | # user + group. After all peer relations have settled, all hosts should | 21 | # user + group. After all peer relations have settled, all hosts should |
1693 | 22 | # be able to connect to on another via key auth'd ssh as the specified user. | 22 | # be able to connect to on another via key auth'd ssh as the specified user. |
1694 | @@ -30,14 +30,21 @@ | |||
1695 | 30 | # ... | 30 | # ... |
1696 | 31 | # ssh_authorized_peers(peer_interface='cluster', | 31 | # ssh_authorized_peers(peer_interface='cluster', |
1697 | 32 | # user='juju_ssh', group='juju_ssh', | 32 | # user='juju_ssh', group='juju_ssh', |
1699 | 33 | # ensure_user=True) | 33 | # ensure_local_user=True) |
1700 | 34 | # ... | 34 | # ... |
1701 | 35 | # | 35 | # |
1702 | 36 | # cluster-relation-changed: | 36 | # cluster-relation-changed: |
1703 | 37 | # ... | 37 | # ... |
1704 | 38 | # ssh_authorized_peers(peer_interface='cluster', | 38 | # ssh_authorized_peers(peer_interface='cluster', |
1705 | 39 | # user='juju_ssh', group='juju_ssh', | 39 | # user='juju_ssh', group='juju_ssh', |
1707 | 40 | # ensure_user=True) | 40 | # ensure_local_user=True) |
1708 | 41 | # ... | ||
1709 | 42 | # | ||
1710 | 43 | # cluster-relation-departed: | ||
1711 | 44 | # ... | ||
1712 | 45 | # ssh_authorized_peers(peer_interface='cluster', | ||
1713 | 46 | # user='juju_ssh', group='juju_ssh', | ||
1714 | 47 | # ensure_local_user=True) | ||
1715 | 41 | # ... | 48 | # ... |
1716 | 42 | # | 49 | # |
1717 | 43 | # Hooks are now free to sync files as easily as: | 50 | # Hooks are now free to sync files as easily as: |
1718 | @@ -92,11 +99,18 @@ | |||
1719 | 92 | raise Exception | 99 | raise Exception |
1720 | 93 | 100 | ||
1721 | 94 | 101 | ||
1723 | 95 | def create_private_key(user, priv_key_path): | 102 | def create_private_key(user, priv_key_path, key_type='rsa'): |
1724 | 103 | types_bits = { | ||
1725 | 104 | 'rsa': '2048', | ||
1726 | 105 | 'ecdsa': '521', | ||
1727 | 106 | } | ||
1728 | 107 | if key_type not in types_bits: | ||
1729 | 108 | log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR) | ||
1730 | 109 | key_type = 'rsa' | ||
1731 | 96 | if not os.path.isfile(priv_key_path): | 110 | if not os.path.isfile(priv_key_path): |
1732 | 97 | log('Generating new SSH key for user %s.' % user) | 111 | log('Generating new SSH key for user %s.' % user) |
1735 | 98 | cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', | 112 | cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type, |
1736 | 99 | '-f', priv_key_path] | 113 | '-b', types_bits[key_type], '-f', priv_key_path] |
1737 | 100 | check_call(cmd) | 114 | check_call(cmd) |
1738 | 101 | else: | 115 | else: |
1739 | 102 | log('SSH key already exists at %s.' % priv_key_path) | 116 | log('SSH key already exists at %s.' % priv_key_path) |
1740 | @@ -152,7 +166,7 @@ | |||
1741 | 152 | known_hosts = os.path.join(ssh_dir, 'known_hosts') | 166 | known_hosts = os.path.join(ssh_dir, 'known_hosts') |
1742 | 153 | khosts = [] | 167 | khosts = [] |
1743 | 154 | for host in hosts: | 168 | for host in hosts: |
1745 | 155 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] | 169 | cmd = ['ssh-keyscan', host] |
1746 | 156 | remote_key = check_output(cmd, universal_newlines=True).strip() | 170 | remote_key = check_output(cmd, universal_newlines=True).strip() |
1747 | 157 | khosts.append(remote_key) | 171 | khosts.append(remote_key) |
1748 | 158 | log('Syncing known_hosts @ %s.' % known_hosts) | 172 | log('Syncing known_hosts @ %s.' % known_hosts) |
1749 | @@ -179,7 +193,8 @@ | |||
1750 | 179 | hook = hook_name() | 193 | hook = hook_name() |
1751 | 180 | if hook == '%s-relation-joined' % peer_interface: | 194 | if hook == '%s-relation-joined' % peer_interface: |
1752 | 181 | relation_set(ssh_pub_key=pub_key) | 195 | relation_set(ssh_pub_key=pub_key) |
1754 | 182 | elif hook == '%s-relation-changed' % peer_interface: | 196 | elif hook == '%s-relation-changed' % peer_interface or \ |
1755 | 197 | hook == '%s-relation-departed' % peer_interface: | ||
1756 | 183 | hosts = [] | 198 | hosts = [] |
1757 | 184 | keys = [] | 199 | keys = [] |
1758 | 185 | 200 | ||
1759 | 186 | 201 | ||
1760 | === added file 'charmhelpers/coordinator.py' | |||
1761 | --- charmhelpers/coordinator.py 1970-01-01 00:00:00 +0000 | |||
1762 | +++ charmhelpers/coordinator.py 2015-08-13 08:33:21 +0000 | |||
1763 | @@ -0,0 +1,607 @@ | |||
1764 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
1765 | 2 | # | ||
1766 | 3 | # This file is part of charm-helpers. | ||
1767 | 4 | # | ||
1768 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
1769 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
1770 | 7 | # published by the Free Software Foundation. | ||
1771 | 8 | # | ||
1772 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
1773 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1774 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1775 | 12 | # GNU Lesser General Public License for more details. | ||
1776 | 13 | # | ||
1777 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
1778 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
1779 | 16 | ''' | ||
1780 | 17 | The coordinator module allows you to use Juju's leadership feature to | ||
1781 | 18 | coordinate operations between units of a service. | ||
1782 | 19 | |||
1783 | 20 | Behavior is defined in subclasses of coordinator.BaseCoordinator. | ||
1784 | 21 | One implementation is provided (coordinator.Serial), which allows an | ||
1785 | 22 | operation to be run on a single unit at a time, on a first come, first | ||
1786 | 23 | served basis. You can trivially define more complex behavior by | ||
1787 | 24 | subclassing BaseCoordinator or Serial. | ||
1788 | 25 | |||
1789 | 26 | :author: Stuart Bishop <stuart.bishop@canonical.com> | ||
1790 | 27 | |||
1791 | 28 | |||
1792 | 29 | Services Framework Usage | ||
1793 | 30 | ======================== | ||
1794 | 31 | |||
1795 | 32 | Ensure a peer relation is defined in metadata.yaml. Instantiate a | ||
1796 | 33 | BaseCoordinator subclass before invoking ServiceManager.manage(). | ||
1797 | 34 | Ensure that ServiceManager.manage() is wired up to the leader-elected, | ||
1798 | 35 | leader-settings-changed, peer relation-changed and peer | ||
1799 | 36 | relation-departed hooks in addition to any other hooks you need, or your | ||
1800 | 37 | service will deadlock. | ||
1801 | 38 | |||
1802 | 39 | Ensure calls to acquire() are guarded, so that locks are only requested | ||
1803 | 40 | when they are really needed (and thus hooks only triggered when necessary). | ||
1804 | 41 | Failing to do this and calling acquire() unconditionally will put your unit | ||
1805 | 42 | into a hook loop. Calls to granted() do not need to be guarded. | ||
1806 | 43 | |||
1807 | 44 | For example:: | ||
1808 | 45 | |||
1809 | 46 | from charmhelpers.core import hookenv, services | ||
1810 | 47 | from charmhelpers import coordinator | ||
1811 | 48 | |||
1812 | 49 | def maybe_restart(servicename): | ||
1813 | 50 | serial = coordinator.Serial() | ||
1814 | 51 | if needs_restart(): | ||
1815 | 52 | serial.acquire('restart') | ||
1816 | 53 | if serial.granted('restart'): | ||
1817 | 54 | hookenv.service_restart(servicename) | ||
1818 | 55 | |||
1819 | 56 | services = [dict(service='servicename', | ||
1820 | 57 | data_ready=[maybe_restart])] | ||
1821 | 58 | |||
1822 | 59 | if __name__ == '__main__': | ||
1823 | 60 | _ = coordinator.Serial() # Must instantiate before manager.manage() | ||
1824 | 61 | manager = services.ServiceManager(services) | ||
1825 | 62 | manager.manage() | ||
1826 | 63 | |||
1827 | 64 | |||
1828 | 65 | You can implement a similar pattern using a decorator. If the lock has | ||
1829 | 66 | not been granted, an attempt to acquire() it will be made if the guard | ||
1830 | 67 | function returns True. If the lock has been granted, the decorated function | ||
1831 | 68 | is run as normal:: | ||
1832 | 69 | |||
1833 | 70 | from charmhelpers.core import hookenv, services | ||
1834 | 71 | from charmhelpers import coordinator | ||
1835 | 72 | |||
1836 | 73 | serial = coordinator.Serial() # Global, instatiated on module import. | ||
1837 | 74 | |||
1838 | 75 | def needs_restart(): | ||
1839 | 76 | [ ... Introspect state. Return True if restart is needed ... ] | ||
1840 | 77 | |||
1841 | 78 | @serial.require('restart', needs_restart) | ||
1842 | 79 | def maybe_restart(servicename): | ||
1843 | 80 | hookenv.service_restart(servicename) | ||
1844 | 81 | |||
1845 | 82 | services = [dict(service='servicename', | ||
1846 | 83 | data_ready=[maybe_restart])] | ||
1847 | 84 | |||
1848 | 85 | if __name__ == '__main__': | ||
1849 | 86 | manager = services.ServiceManager(services) | ||
1850 | 87 | manager.manage() | ||
1851 | 88 | |||
1852 | 89 | |||
1853 | 90 | Traditional Usage | ||
1854 | 91 | ================= | ||
1855 | 92 | |||
1856 | 93 | Ensure a peer relationis defined in metadata.yaml. | ||
1857 | 94 | |||
1858 | 95 | If you are using charmhelpers.core.hookenv.Hooks, ensure that a | ||
1859 | 96 | BaseCoordinator subclass is instantiated before calling Hooks.execute. | ||
1860 | 97 | |||
1861 | 98 | If you are not using charmhelpers.core.hookenv.Hooks, ensure | ||
1862 | 99 | that a BaseCoordinator subclass is instantiated and its handle() | ||
1863 | 100 | method called at the start of all your hooks. | ||
1864 | 101 | |||
1865 | 102 | For example:: | ||
1866 | 103 | |||
1867 | 104 | import sys | ||
1868 | 105 | from charmhelpers.core import hookenv | ||
1869 | 106 | from charmhelpers import coordinator | ||
1870 | 107 | |||
1871 | 108 | hooks = hookenv.Hooks() | ||
1872 | 109 | |||
1873 | 110 | def maybe_restart(): | ||
1874 | 111 | serial = coordinator.Serial() | ||
1875 | 112 | if serial.granted('restart'): | ||
1876 | 113 | hookenv.service_restart('myservice') | ||
1877 | 114 | |||
1878 | 115 | @hooks.hook | ||
1879 | 116 | def config_changed(): | ||
1880 | 117 | update_config() | ||
1881 | 118 | serial = coordinator.Serial() | ||
1882 | 119 | if needs_restart(): | ||
1883 | 120 | serial.acquire('restart'): | ||
1884 | 121 | maybe_restart() | ||
1885 | 122 | |||
1886 | 123 | # Cluster hooks must be wired up. | ||
1887 | 124 | @hooks.hook('cluster-relation-changed', 'cluster-relation-departed') | ||
1888 | 125 | def cluster_relation_changed(): | ||
1889 | 126 | maybe_restart() | ||
1890 | 127 | |||
1891 | 128 | # Leader hooks must be wired up. | ||
1892 | 129 | @hooks.hook('leader-elected', 'leader-settings-changed') | ||
1893 | 130 | def leader_settings_changed(): | ||
1894 | 131 | maybe_restart() | ||
1895 | 132 | |||
1896 | 133 | [ ... repeat for *all* other hooks you are using ... ] | ||
1897 | 134 | |||
1898 | 135 | if __name__ == '__main__': | ||
1899 | 136 | _ = coordinator.Serial() # Must instantiate before execute() | ||
1900 | 137 | hooks.execute(sys.argv) | ||
1901 | 138 | |||
1902 | 139 | |||
1903 | 140 | You can also use the require decorator. If the lock has not been granted, | ||
1904 | 141 | an attempt to acquire() it will be made if the guard function returns True. | ||
1905 | 142 | If the lock has been granted, the decorated function is run as normal:: | ||
1906 | 143 | |||
1907 | 144 | from charmhelpers.core import hookenv | ||
1908 | 145 | |||
1909 | 146 | hooks = hookenv.Hooks() | ||
1910 | 147 | serial = coordinator.Serial() # Must instantiate before execute() | ||
1911 | 148 | |||
1912 | 149 | @require('restart', needs_restart) | ||
1913 | 150 | def maybe_restart(): | ||
1914 | 151 | hookenv.service_restart('myservice') | ||
1915 | 152 | |||
1916 | 153 | @hooks.hook('install', 'config-changed', 'upgrade-charm', | ||
1917 | 154 | # Peer and leader hooks must be wired up. | ||
1918 | 155 | 'cluster-relation-changed', 'cluster-relation-departed', | ||
1919 | 156 | 'leader-elected', 'leader-settings-changed') | ||
1920 | 157 | def default_hook(): | ||
1921 | 158 | [...] | ||
1922 | 159 | maybe_restart() | ||
1923 | 160 | |||
1924 | 161 | if __name__ == '__main__': | ||
1925 | 162 | hooks.execute() | ||
1926 | 163 | |||
1927 | 164 | |||
1928 | 165 | Details | ||
1929 | 166 | ======= | ||
1930 | 167 | |||
1931 | 168 | A simple API is provided similar to traditional locking APIs. A lock | ||
1932 | 169 | may be requested using the acquire() method, and the granted() method | ||
1933 | 170 | may be used do to check if a lock previously requested by acquire() has | ||
1934 | 171 | been granted. It doesn't matter how many times acquire() is called in a | ||
1935 | 172 | hook. | ||
1936 | 173 | |||
1937 | 174 | Locks are released at the end of the hook they are acquired in. This may | ||
1938 | 175 | be the current hook if the unit is leader and the lock is free. It is | ||
1939 | 176 | more likely a future hook (probably leader-settings-changed, possibly | ||
1940 | 177 | the peer relation-changed or departed hook, potentially any hook). | ||
1941 | 178 | |||
1942 | 179 | Whenever a charm needs to perform a coordinated action it will acquire() | ||
1943 | 180 | the lock and perform the action immediately if acquisition is | ||
1944 | 181 | successful. It will also need to perform the same action in every other | ||
1945 | 182 | hook if the lock has been granted. | ||
1946 | 183 | |||
1947 | 184 | |||
1948 | 185 | Grubby Details | ||
1949 | 186 | -------------- | ||
1950 | 187 | |||
1951 | 188 | Why do you need to be able to perform the same action in every hook? | ||
1952 | 189 | If the unit is the leader, then it may be able to grant its own lock | ||
1953 | 190 | and perform the action immediately in the source hook. If the unit is | ||
1954 | 191 | the leader and cannot immediately grant the lock, then its only | ||
1955 | 192 | guaranteed chance of acquiring the lock is in the peer relation-joined, | ||
1956 | 193 | relation-changed or peer relation-departed hooks when another unit has | ||
1957 | 194 | released it (the only channel to communicate to the leader is the peer | ||
1958 | 195 | relation). If the unit is not the leader, then it is unlikely the lock | ||
1959 | 196 | is granted in the source hook (a previous hook must have also made the | ||
1960 | 197 | request for this to happen). A non-leader is notified about the lock via | ||
1961 | 198 | leader settings. These changes may be visible in any hook, even before | ||
1962 | 199 | the leader-settings-changed hook has been invoked. Or the requesting | ||
1963 | 200 | unit may be promoted to leader after making a request, in which case the | ||
1964 | 201 | lock may be granted in leader-elected or in a future peer | ||
1965 | 202 | relation-changed or relation-departed hook. | ||
1966 | 203 | |||
1967 | 204 | This could be simpler if leader-settings-changed was invoked on the | ||
1968 | 205 | leader. We could then never grant locks except in | ||
1969 | 206 | leader-settings-changed hooks giving one place for the operation to be | ||
1970 | 207 | performed. Unfortunately this is not the case with Juju 1.23 leadership. | ||
1971 | 208 | |||
1972 | 209 | But of course, this doesn't really matter to most people as most people | ||
1973 | 210 | seem to prefer the Services Framework or similar reset-the-world | ||
1974 | 211 | approaches, rather than the twisty maze of attempting to deduce what | ||
1975 | 212 | should be done based on what hook happens to be running (which always | ||
1976 | 213 | seems to evolve into reset-the-world anyway when the charm grows beyond | ||
1977 | 214 | the trivial). | ||
1978 | 215 | |||
1979 | 216 | I chose not to implement a callback model, where a callback was passed | ||
1980 | 217 | to acquire to be executed when the lock is granted, because the callback | ||
1981 | 218 | may become invalid between making the request and the lock being granted | ||
1982 | 219 | due to an upgrade-charm being run in the interim. And it would create | ||
1983 | 220 | restrictions, such no lambdas, callback defined at the top level of a | ||
1984 | 221 | module, etc. Still, we could implement it on top of what is here, eg. | ||
1985 | 222 | by adding a defer decorator that stores a pickle of itself to disk and | ||
1986 | 223 | have BaseCoordinator unpickle and execute them when the locks are granted. | ||
1987 | 224 | ''' | ||
1988 | 225 | from datetime import datetime | ||
1989 | 226 | from functools import wraps | ||
1990 | 227 | import json | ||
1991 | 228 | import os.path | ||
1992 | 229 | |||
1993 | 230 | from six import with_metaclass | ||
1994 | 231 | |||
1995 | 232 | from charmhelpers.core import hookenv | ||
1996 | 233 | |||
1997 | 234 | |||
1998 | 235 | # We make BaseCoordinator and subclasses singletons, so that if we | ||
1999 | 236 | # need to spill to local storage then only a single instance does so, | ||
2000 | 237 | # rather than having multiple instances stomp over each other. | ||
2001 | 238 | class Singleton(type): | ||
2002 | 239 | _instances = {} | ||
2003 | 240 | |||
2004 | 241 | def __call__(cls, *args, **kwargs): | ||
2005 | 242 | if cls not in cls._instances: | ||
2006 | 243 | cls._instances[cls] = super(Singleton, cls).__call__(*args, | ||
2007 | 244 | **kwargs) | ||
2008 | 245 | return cls._instances[cls] | ||
2009 | 246 | |||
2010 | 247 | |||
2011 | 248 | class BaseCoordinator(with_metaclass(Singleton, object)): | ||
2012 | 249 | relid = None # Peer relation-id, set by __init__ | ||
2013 | 250 | relname = None | ||
2014 | 251 | |||
2015 | 252 | grants = None # self.grants[unit][lock] == timestamp | ||
2016 | 253 | requests = None # self.requests[unit][lock] == timestamp | ||
2017 | 254 | |||
2018 | 255 | def __init__(self, relation_key='coordinator', peer_relation_name=None): | ||
2019 | 256 | '''Instatiate a Coordinator. | ||
2020 | 257 | |||
2021 | 258 | Data is stored on the peer relation and in leadership storage | ||
2022 | 259 | under the provided relation_key. | ||
2023 | 260 | |||
2024 | 261 | The peer relation is identified by peer_relation_name, and defaults | ||
2025 | 262 | to the first one found in metadata.yaml. | ||
2026 | 263 | ''' | ||
2027 | 264 | # Most initialization is deferred, since invoking hook tools from | ||
2028 | 265 | # the constructor makes testing hard. | ||
2029 | 266 | self.key = relation_key | ||
2030 | 267 | self.relname = peer_relation_name | ||
2031 | 268 | hookenv.atstart(self.initialize) | ||
2032 | 269 | |||
2033 | 270 | # Ensure that handle() is called, without placing that burden on | ||
2034 | 271 | # the charm author. They still need to do this manually if they | ||
2035 | 272 | # are not using a hook framework. | ||
2036 | 273 | hookenv.atstart(self.handle) | ||
2037 | 274 | |||
2038 | 275 | def initialize(self): | ||
2039 | 276 | if self.requests is not None: | ||
2040 | 277 | return # Already initialized. | ||
2041 | 278 | |||
2042 | 279 | assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+' | ||
2043 | 280 | |||
2044 | 281 | if self.relname is None: | ||
2045 | 282 | self.relname = _implicit_peer_relation_name() | ||
2046 | 283 | |||
2047 | 284 | relids = hookenv.relation_ids(self.relname) | ||
2048 | 285 | if relids: | ||
2049 | 286 | self.relid = sorted(relids)[0] | ||
2050 | 287 | |||
2051 | 288 | # Load our state, from leadership, the peer relationship, and maybe | ||
2052 | 289 | # local state as a fallback. Populates self.requests and self.grants. | ||
2053 | 290 | self._load_state() | ||
2054 | 291 | self._emit_state() | ||
2055 | 292 | |||
2056 | 293 | # Save our state if the hook completes successfully. | ||
2057 | 294 | hookenv.atexit(self._save_state) | ||
2058 | 295 | |||
2059 | 296 | # Schedule release of granted locks for the end of the hook. | ||
2060 | 297 | # This needs to be the last of our atexit callbacks to ensure | ||
2061 | 298 | # it will be run first when the hook is complete, because there | ||
2062 | 299 | # is no point mutating our state after it has been saved. | ||
2063 | 300 | hookenv.atexit(self._release_granted) | ||
2064 | 301 | |||
2065 | 302 | def acquire(self, lock): | ||
2066 | 303 | '''Acquire the named lock, non-blocking. | ||
2067 | 304 | |||
2068 | 305 | The lock may be granted immediately, or in a future hook. | ||
2069 | 306 | |||
2070 | 307 | Returns True if the lock has been granted. The lock will be | ||
2071 | 308 | automatically released at the end of the hook in which it is | ||
2072 | 309 | granted. | ||
2073 | 310 | |||
2074 | 311 | Do not mindlessly call this method, as it triggers a cascade of | ||
2075 | 312 | hooks. For example, if you call acquire() every time in your | ||
2076 | 313 | peer relation-changed hook you will end up with an infinite loop | ||
2077 | 314 | of hooks. It should almost always be guarded by some condition. | ||
2078 | 315 | ''' | ||
2079 | 316 | unit = hookenv.local_unit() | ||
2080 | 317 | ts = self.requests[unit].get(lock) | ||
2081 | 318 | if not ts: | ||
2082 | 319 | # If there is no outstanding request on the peer relation, | ||
2083 | 320 | # create one. | ||
2084 | 321 | self.requests.setdefault(lock, {}) | ||
2085 | 322 | self.requests[unit][lock] = _timestamp() | ||
2086 | 323 | self.msg('Requested {}'.format(lock)) | ||
2087 | 324 | |||
2088 | 325 | # If the leader has granted the lock, yay. | ||
2089 | 326 | if self.granted(lock): | ||
2090 | 327 | self.msg('Acquired {}'.format(lock)) | ||
2091 | 328 | return True | ||
2092 | 329 | |||
2093 | 330 | # If the unit making the request also happens to be the | ||
2094 | 331 | # leader, it must handle the request now. Even though the | ||
2095 | 332 | # request has been stored on the peer relation, the peer | ||
2096 | 333 | # relation-changed hook will not be triggered. | ||
2097 | 334 | if hookenv.is_leader(): | ||
2098 | 335 | return self.grant(lock, unit) | ||
2099 | 336 | |||
2100 | 337 | return False # Can't acquire lock, yet. Maybe next hook. | ||
2101 | 338 | |||
2102 | 339 | def granted(self, lock): | ||
2103 | 340 | '''Return True if a previously requested lock has been granted''' | ||
2104 | 341 | unit = hookenv.local_unit() | ||
2105 | 342 | ts = self.requests[unit].get(lock) | ||
2106 | 343 | if ts and self.grants.get(unit, {}).get(lock) == ts: | ||
2107 | 344 | return True | ||
2108 | 345 | return False | ||
2109 | 346 | |||
2110 | 347 | def requested(self, lock): | ||
2111 | 348 | '''Return True if we are in the queue for the lock''' | ||
2112 | 349 | return lock in self.requests[hookenv.local_unit()] | ||
2113 | 350 | |||
2114 | 351 | def request_timestamp(self, lock): | ||
2115 | 352 | '''Return the timestamp of our outstanding request for lock, or None. | ||
2116 | 353 | |||
2117 | 354 | Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute. | ||
2118 | 355 | ''' | ||
2119 | 356 | ts = self.requests[hookenv.local_unit()].get(lock, None) | ||
2120 | 357 | if ts is not None: | ||
2121 | 358 | return datetime.strptime(ts, _timestamp_format) | ||
2122 | 359 | |||
2123 | 360 | def handle(self): | ||
2124 | 361 | if not hookenv.is_leader(): | ||
2125 | 362 | return # Only the leader can grant requests. | ||
2126 | 363 | |||
2127 | 364 | self.msg('Leader handling coordinator requests') | ||
2128 | 365 | |||
2129 | 366 | # Clear our grants that have been released. | ||
2130 | 367 | for unit in self.grants.keys(): | ||
2131 | 368 | for lock, grant_ts in list(self.grants[unit].items()): | ||
2132 | 369 | req_ts = self.requests.get(unit, {}).get(lock) | ||
2133 | 370 | if req_ts != grant_ts: | ||
2134 | 371 | # The request timestamp does not match the granted | ||
2135 | 372 | # timestamp. Several hooks on 'unit' may have run | ||
2136 | 373 | # before the leader got a chance to make a decision, | ||
2137 | 374 | # and 'unit' may have released its lock and attempted | ||
2138 | 375 | # to reacquire it. This will change the timestamp, | ||
2139 | 376 | # and we correctly revoke the old grant putting it | ||
2140 | 377 | # to the end of the queue. | ||
2141 | 378 | ts = datetime.strptime(self.grants[unit][lock], | ||
2142 | 379 | _timestamp_format) | ||
2143 | 380 | del self.grants[unit][lock] | ||
2144 | 381 | self.released(unit, lock, ts) | ||
2145 | 382 | |||
2146 | 383 | # Grant locks | ||
2147 | 384 | for unit in self.requests.keys(): | ||
2148 | 385 | for lock in self.requests[unit]: | ||
2149 | 386 | self.grant(lock, unit) | ||
2150 | 387 | |||
2151 | 388 | def grant(self, lock, unit): | ||
2152 | 389 | '''Maybe grant the lock to a unit. | ||
2153 | 390 | |||
2154 | 391 | The decision to grant the lock or not is made for $lock | ||
2155 | 392 | by a corresponding method grant_$lock, which you may define | ||
2156 | 393 | in a subclass. If no such method is defined, the default_grant | ||
2157 | 394 | method is used. See Serial.default_grant() for details. | ||
2158 | 395 | ''' | ||
2159 | 396 | if not hookenv.is_leader(): | ||
2160 | 397 | return False # Not the leader, so we cannot grant. | ||
2161 | 398 | |||
2162 | 399 | # Set of units already granted the lock. | ||
2163 | 400 | granted = set() | ||
2164 | 401 | for u in self.grants: | ||
2165 | 402 | if lock in self.grants[u]: | ||
2166 | 403 | granted.add(u) | ||
2167 | 404 | if unit in granted: | ||
2168 | 405 | return True # Already granted. | ||
2169 | 406 | |||
2170 | 407 | # Ordered list of units waiting for the lock. | ||
2171 | 408 | reqs = set() | ||
2172 | 409 | for u in self.requests: | ||
2173 | 410 | if u in granted: | ||
2174 | 411 | continue # In the granted set. Not wanted in the req list. | ||
2175 | 412 | for l, ts in self.requests[u].items(): | ||
2176 | 413 | if l == lock: | ||
2177 | 414 | reqs.add((ts, u)) | ||
2178 | 415 | queue = [t[1] for t in sorted(reqs)] | ||
2179 | 416 | if unit not in queue: | ||
2180 | 417 | return False # Unit has not requested the lock. | ||
2181 | 418 | |||
2182 | 419 | # Locate custom logic, or fallback to the default. | ||
2183 | 420 | grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant) | ||
2184 | 421 | |||
2185 | 422 | if grant_func(lock, unit, granted, queue): | ||
2186 | 423 | # Grant the lock. | ||
2187 | 424 | self.msg('Leader grants {} to {}'.format(lock, unit)) | ||
2188 | 425 | self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock] | ||
2189 | 426 | return True | ||
2190 | 427 | |||
2191 | 428 | return False | ||
2192 | 429 | |||
2193 | 430 | def released(self, unit, lock, timestamp): | ||
2194 | 431 | '''Called on the leader when it has released a lock. | ||
2195 | 432 | |||
2196 | 433 | By default, does nothing but log messages. Override if you | ||
2197 | 434 | need to perform additional housekeeping when a lock is released, | ||
2198 | 435 | for example recording timestamps. | ||
2199 | 436 | ''' | ||
2200 | 437 | interval = _utcnow() - timestamp | ||
2201 | 438 | self.msg('Leader released {} from {}, held {}'.format(lock, unit, | ||
2202 | 439 | interval)) | ||
2203 | 440 | |||
2204 | 441 | def require(self, lock, guard_func, *guard_args, **guard_kw): | ||
2205 | 442 | """Decorate a function to be run only when a lock is acquired. | ||
2206 | 443 | |||
2207 | 444 | The lock is requested if the guard function returns True. | ||
2208 | 445 | |||
2209 | 446 | The decorated function is called if the lock has been granted. | ||
2210 | 447 | """ | ||
2211 | 448 | def decorator(f): | ||
2212 | 449 | @wraps(f) | ||
2213 | 450 | def wrapper(*args, **kw): | ||
2214 | 451 | if self.granted(lock): | ||
2215 | 452 | self.msg('Granted {}'.format(lock)) | ||
2216 | 453 | return f(*args, **kw) | ||
2217 | 454 | if guard_func(*guard_args, **guard_kw) and self.acquire(lock): | ||
2218 | 455 | return f(*args, **kw) | ||
2219 | 456 | return None | ||
2220 | 457 | return wrapper | ||
2221 | 458 | return decorator | ||
2222 | 459 | |||
2223 | 460 | def msg(self, msg): | ||
2224 | 461 | '''Emit a message. Override to customize log spam.''' | ||
2225 | 462 | hookenv.log('coordinator.{} {}'.format(self._name(), msg), | ||
2226 | 463 | level=hookenv.INFO) | ||
2227 | 464 | |||
2228 | 465 | def _name(self): | ||
2229 | 466 | return self.__class__.__name__ | ||
2230 | 467 | |||
2231 | 468 | def _load_state(self): | ||
2232 | 469 | self.msg('Loading state'.format(self._name())) | ||
2233 | 470 | |||
2234 | 471 | # All responses must be stored in the leadership settings. | ||
2235 | 472 | # The leader cannot use local state, as a different unit may | ||
2236 | 473 | # be leader next time. Which is fine, as the leadership | ||
2237 | 474 | # settings are always available. | ||
2238 | 475 | self.grants = json.loads(hookenv.leader_get(self.key) or '{}') | ||
2239 | 476 | |||
2240 | 477 | local_unit = hookenv.local_unit() | ||
2241 | 478 | |||
2242 | 479 | # All requests must be stored on the peer relation. This is | ||
2243 | 480 | # the only channel units have to communicate with the leader. | ||
2244 | 481 | # Even the leader needs to store its requests here, as a | ||
2245 | 482 | # different unit may be leader by the time the request can be | ||
2246 | 483 | # granted. | ||
2247 | 484 | if self.relid is None: | ||
2248 | 485 | # The peer relation is not available. Maybe we are early in | ||
2249 | 486 | # the units's lifecycle. Maybe this unit is standalone. | ||
2250 | 487 | # Fallback to using local state. | ||
2251 | 488 | self.msg('No peer relation. Loading local state') | ||
2252 | 489 | self.requests = {local_unit: self._load_local_state()} | ||
2253 | 490 | else: | ||
2254 | 491 | self.requests = self._load_peer_state() | ||
2255 | 492 | if local_unit not in self.requests: | ||
2256 | 493 | # The peer relation has just been joined. Update any state | ||
2257 | 494 | # loaded from our peers with our local state. | ||
2258 | 495 | self.msg('New peer relation. Merging local state') | ||
2259 | 496 | self.requests[local_unit] = self._load_local_state() | ||
2260 | 497 | |||
2261 | 498 | def _emit_state(self): | ||
2262 | 499 | # Emit this units lock status. | ||
2263 | 500 | for lock in sorted(self.requests[hookenv.local_unit()].keys()): | ||
2264 | 501 | if self.granted(lock): | ||
2265 | 502 | self.msg('Granted {}'.format(lock)) | ||
2266 | 503 | else: | ||
2267 | 504 | self.msg('Waiting on {}'.format(lock)) | ||
2268 | 505 | |||
2269 | 506 | def _save_state(self): | ||
2270 | 507 | self.msg('Publishing state'.format(self._name())) | ||
2271 | 508 | if hookenv.is_leader(): | ||
2272 | 509 | # sort_keys to ensure stability. | ||
2273 | 510 | raw = json.dumps(self.grants, sort_keys=True) | ||
2274 | 511 | hookenv.leader_set({self.key: raw}) | ||
2275 | 512 | |||
2276 | 513 | local_unit = hookenv.local_unit() | ||
2277 | 514 | |||
2278 | 515 | if self.relid is None: | ||
2279 | 516 | # No peer relation yet. Fallback to local state. | ||
2280 | 517 | self.msg('No peer relation. Saving local state') | ||
2281 | 518 | self._save_local_state(self.requests[local_unit]) | ||
2282 | 519 | else: | ||
2283 | 520 | # sort_keys to ensure stability. | ||
2284 | 521 | raw = json.dumps(self.requests[local_unit], sort_keys=True) | ||
2285 | 522 | hookenv.relation_set(self.relid, relation_settings={self.key: raw}) | ||
2286 | 523 | |||
2287 | 524 | def _load_peer_state(self): | ||
2288 | 525 | requests = {} | ||
2289 | 526 | units = set(hookenv.related_units(self.relid)) | ||
2290 | 527 | units.add(hookenv.local_unit()) | ||
2291 | 528 | for unit in units: | ||
2292 | 529 | raw = hookenv.relation_get(self.key, unit, self.relid) | ||
2293 | 530 | if raw: | ||
2294 | 531 | requests[unit] = json.loads(raw) | ||
2295 | 532 | return requests | ||
2296 | 533 | |||
2297 | 534 | def _local_state_filename(self): | ||
2298 | 535 | # Include the class name. We allow multiple BaseCoordinator | ||
2299 | 536 | # subclasses to be instantiated, and they are singletons, so | ||
2300 | 537 | # this avoids conflicts (unless someone creates and uses two | ||
2301 | 538 | # BaseCoordinator subclasses with the same class name, so don't | ||
2302 | 539 | # do that). | ||
2303 | 540 | return '.charmhelpers.coordinator.{}'.format(self._name()) | ||
2304 | 541 | |||
2305 | 542 | def _load_local_state(self): | ||
2306 | 543 | fn = self._local_state_filename() | ||
2307 | 544 | if os.path.exists(fn): | ||
2308 | 545 | with open(fn, 'r') as f: | ||
2309 | 546 | return json.load(f) | ||
2310 | 547 | return {} | ||
2311 | 548 | |||
2312 | 549 | def _save_local_state(self, state): | ||
2313 | 550 | fn = self._local_state_filename() | ||
2314 | 551 | with open(fn, 'w') as f: | ||
2315 | 552 | json.dump(state, f) | ||
2316 | 553 | |||
2317 | 554 | def _release_granted(self): | ||
2318 | 555 | # At the end of every hook, release all locks granted to | ||
2319 | 556 | # this unit. If a hook neglects to make use of what it | ||
2320 | 557 | # requested, it will just have to make the request again. | ||
2321 | 558 | # Implicit release is the only way this will work, as | ||
2322 | 559 | # if the unit is standalone there may be no future triggers | ||
2323 | 560 | # called to do a manual release. | ||
2324 | 561 | unit = hookenv.local_unit() | ||
2325 | 562 | for lock in list(self.requests[unit].keys()): | ||
2326 | 563 | if self.granted(lock): | ||
2327 | 564 | self.msg('Released local {} lock'.format(lock)) | ||
2328 | 565 | del self.requests[unit][lock] | ||
2329 | 566 | |||
2330 | 567 | |||
2331 | 568 | class Serial(BaseCoordinator): | ||
2332 | 569 | def default_grant(self, lock, unit, granted, queue): | ||
2333 | 570 | '''Default logic to grant a lock to a unit. Unless overridden, | ||
2334 | 571 | only one unit may hold the lock and it will be granted to the | ||
2335 | 572 | earliest queued request. | ||
2336 | 573 | |||
2337 | 574 | To define custom logic for $lock, create a subclass and | ||
2338 | 575 | define a grant_$lock method. | ||
2339 | 576 | |||
2340 | 577 | `unit` is the unit name making the request. | ||
2341 | 578 | |||
2342 | 579 | `granted` is the set of units already granted the lock. It will | ||
2343 | 580 | never include `unit`. It may be empty. | ||
2344 | 581 | |||
2345 | 582 | `queue` is the list of units waiting for the lock, ordered by time | ||
2346 | 583 | of request. It will always include `unit`, but `unit` is not | ||
2347 | 584 | necessarily first. | ||
2348 | 585 | |||
2349 | 586 | Returns True if the lock should be granted to `unit`. | ||
2350 | 587 | ''' | ||
2351 | 588 | return unit == queue[0] and not granted | ||
2352 | 589 | |||
2353 | 590 | |||
2354 | 591 | def _implicit_peer_relation_name(): | ||
2355 | 592 | md = hookenv.metadata() | ||
2356 | 593 | assert 'peers' in md, 'No peer relations in metadata.yaml' | ||
2357 | 594 | return sorted(md['peers'].keys())[0] | ||
2358 | 595 | |||
2359 | 596 | |||
2360 | 597 | # A human readable, sortable UTC timestamp format. | ||
2361 | 598 | _timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ' | ||
2362 | 599 | |||
2363 | 600 | |||
2364 | 601 | def _utcnow(): # pragma: no cover | ||
2365 | 602 | # This wrapper exists as mocking datetime methods is problematic. | ||
2366 | 603 | return datetime.utcnow() | ||
2367 | 604 | |||
2368 | 605 | |||
2369 | 606 | def _timestamp(): | ||
2370 | 607 | return _utcnow().strftime(_timestamp_format) | ||
2371 | 0 | 608 | ||
2372 | === added file 'charmhelpers/core/files.py' | |||
2373 | --- charmhelpers/core/files.py 1970-01-01 00:00:00 +0000 | |||
2374 | +++ charmhelpers/core/files.py 2015-08-13 08:33:21 +0000 | |||
2375 | @@ -0,0 +1,45 @@ | |||
2376 | 1 | #!/usr/bin/env python | ||
2377 | 2 | # -*- coding: utf-8 -*- | ||
2378 | 3 | |||
2379 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
2380 | 5 | # | ||
2381 | 6 | # This file is part of charm-helpers. | ||
2382 | 7 | # | ||
2383 | 8 | # charm-helpers is free software: you can redistribute it and/or modify | ||
2384 | 9 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
2385 | 10 | # published by the Free Software Foundation. | ||
2386 | 11 | # | ||
2387 | 12 | # charm-helpers is distributed in the hope that it will be useful, | ||
2388 | 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2389 | 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2390 | 15 | # GNU Lesser General Public License for more details. | ||
2391 | 16 | # | ||
2392 | 17 | # You should have received a copy of the GNU Lesser General Public License | ||
2393 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
2394 | 19 | |||
2395 | 20 | __author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' | ||
2396 | 21 | |||
2397 | 22 | import os | ||
2398 | 23 | import subprocess | ||
2399 | 24 | |||
2400 | 25 | |||
2401 | 26 | def sed(filename, before, after, flags='g'): | ||
2402 | 27 | """ | ||
2403 | 28 | Search and replaces the given pattern on filename. | ||
2404 | 29 | |||
2405 | 30 | :param filename: relative or absolute file path. | ||
2406 | 31 | :param before: expression to be replaced (see 'man sed') | ||
2407 | 32 | :param after: expression to replace with (see 'man sed') | ||
2408 | 33 | :param flags: sed-compatible regex flags in example, to make | ||
2409 | 34 | the search and replace case insensitive, specify ``flags="i"``. | ||
2410 | 35 | The ``g`` flag is always specified regardless, so you do not | ||
2411 | 36 | need to remember to include it when overriding this parameter. | ||
2412 | 37 | :returns: If the sed command exit code was zero then return, | ||
2413 | 38 | otherwise raise CalledProcessError. | ||
2414 | 39 | """ | ||
2415 | 40 | expression = r's/{0}/{1}/{2}'.format(before, | ||
2416 | 41 | after, flags) | ||
2417 | 42 | |||
2418 | 43 | return subprocess.check_call(["sed", "-i", "-r", "-e", | ||
2419 | 44 | expression, | ||
2420 | 45 | os.path.expanduser(filename)]) | ||
2421 | 0 | 46 | ||
2422 | === modified file 'charmhelpers/core/hookenv.py' | |||
2423 | --- charmhelpers/core/hookenv.py 2015-06-02 13:46:29 +0000 | |||
2424 | +++ charmhelpers/core/hookenv.py 2015-08-13 08:33:21 +0000 | |||
2425 | @@ -21,7 +21,10 @@ | |||
2426 | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
2427 | 22 | 22 | ||
2428 | 23 | from __future__ import print_function | 23 | from __future__ import print_function |
2429 | 24 | import copy | ||
2430 | 25 | from distutils.version import LooseVersion | ||
2431 | 24 | from functools import wraps | 26 | from functools import wraps |
2432 | 27 | import glob | ||
2433 | 25 | import os | 28 | import os |
2434 | 26 | import json | 29 | import json |
2435 | 27 | import yaml | 30 | import yaml |
2436 | @@ -71,6 +74,7 @@ | |||
2437 | 71 | res = func(*args, **kwargs) | 74 | res = func(*args, **kwargs) |
2438 | 72 | cache[key] = res | 75 | cache[key] = res |
2439 | 73 | return res | 76 | return res |
2440 | 77 | wrapper._wrapped = func | ||
2441 | 74 | return wrapper | 78 | return wrapper |
2442 | 75 | 79 | ||
2443 | 76 | 80 | ||
2444 | @@ -170,9 +174,19 @@ | |||
2445 | 170 | return os.environ.get('JUJU_RELATION', None) | 174 | return os.environ.get('JUJU_RELATION', None) |
2446 | 171 | 175 | ||
2447 | 172 | 176 | ||
2451 | 173 | def relation_id(): | 177 | @cached |
2452 | 174 | """The relation ID for the current relation hook""" | 178 | def relation_id(relation_name=None, service_or_unit=None): |
2453 | 175 | return os.environ.get('JUJU_RELATION_ID', None) | 179 | """The relation ID for the current or a specified relation""" |
2454 | 180 | if not relation_name and not service_or_unit: | ||
2455 | 181 | return os.environ.get('JUJU_RELATION_ID', None) | ||
2456 | 182 | elif relation_name and service_or_unit: | ||
2457 | 183 | service_name = service_or_unit.split('/')[0] | ||
2458 | 184 | for relid in relation_ids(relation_name): | ||
2459 | 185 | remote_service = remote_service_name(relid) | ||
2460 | 186 | if remote_service == service_name: | ||
2461 | 187 | return relid | ||
2462 | 188 | else: | ||
2463 | 189 | raise ValueError('Must specify neither or both of relation_name and service_or_unit') | ||
2464 | 176 | 190 | ||
2465 | 177 | 191 | ||
2466 | 178 | def local_unit(): | 192 | def local_unit(): |
2467 | @@ -190,9 +204,20 @@ | |||
2468 | 190 | return local_unit().split('/')[0] | 204 | return local_unit().split('/')[0] |
2469 | 191 | 205 | ||
2470 | 192 | 206 | ||
2471 | 207 | @cached | ||
2472 | 208 | def remote_service_name(relid=None): | ||
2473 | 209 | """The remote service name for a given relation-id (or the current relation)""" | ||
2474 | 210 | if relid is None: | ||
2475 | 211 | unit = remote_unit() | ||
2476 | 212 | else: | ||
2477 | 213 | units = related_units(relid) | ||
2478 | 214 | unit = units[0] if units else None | ||
2479 | 215 | return unit.split('/')[0] if unit else None | ||
2480 | 216 | |||
2481 | 217 | |||
2482 | 193 | def hook_name(): | 218 | def hook_name(): |
2483 | 194 | """The name of the currently executing hook""" | 219 | """The name of the currently executing hook""" |
2485 | 195 | return os.path.basename(sys.argv[0]) | 220 | return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
2486 | 196 | 221 | ||
2487 | 197 | 222 | ||
2488 | 198 | class Config(dict): | 223 | class Config(dict): |
2489 | @@ -242,29 +267,7 @@ | |||
2490 | 242 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | 267 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
2491 | 243 | if os.path.exists(self.path): | 268 | if os.path.exists(self.path): |
2492 | 244 | self.load_previous() | 269 | self.load_previous() |
2516 | 245 | 270 | atexit(self._implicit_save) | |
2494 | 246 | def __getitem__(self, key): | ||
2495 | 247 | """For regular dict lookups, check the current juju config first, | ||
2496 | 248 | then the previous (saved) copy. This ensures that user-saved values | ||
2497 | 249 | will be returned by a dict lookup. | ||
2498 | 250 | |||
2499 | 251 | """ | ||
2500 | 252 | try: | ||
2501 | 253 | return dict.__getitem__(self, key) | ||
2502 | 254 | except KeyError: | ||
2503 | 255 | return (self._prev_dict or {})[key] | ||
2504 | 256 | |||
2505 | 257 | def get(self, key, default=None): | ||
2506 | 258 | try: | ||
2507 | 259 | return self[key] | ||
2508 | 260 | except KeyError: | ||
2509 | 261 | return default | ||
2510 | 262 | |||
2511 | 263 | def keys(self): | ||
2512 | 264 | prev_keys = [] | ||
2513 | 265 | if self._prev_dict is not None: | ||
2514 | 266 | prev_keys = self._prev_dict.keys() | ||
2515 | 267 | return list(set(prev_keys + list(dict.keys(self)))) | ||
2517 | 268 | 271 | ||
2518 | 269 | def load_previous(self, path=None): | 272 | def load_previous(self, path=None): |
2519 | 270 | """Load previous copy of config from disk. | 273 | """Load previous copy of config from disk. |
2520 | @@ -283,6 +286,9 @@ | |||
2521 | 283 | self.path = path or self.path | 286 | self.path = path or self.path |
2522 | 284 | with open(self.path) as f: | 287 | with open(self.path) as f: |
2523 | 285 | self._prev_dict = json.load(f) | 288 | self._prev_dict = json.load(f) |
2524 | 289 | for k, v in copy.deepcopy(self._prev_dict).items(): | ||
2525 | 290 | if k not in self: | ||
2526 | 291 | self[k] = v | ||
2527 | 286 | 292 | ||
2528 | 287 | def changed(self, key): | 293 | def changed(self, key): |
2529 | 288 | """Return True if the current value for this key is different from | 294 | """Return True if the current value for this key is different from |
2530 | @@ -314,13 +320,13 @@ | |||
2531 | 314 | instance. | 320 | instance. |
2532 | 315 | 321 | ||
2533 | 316 | """ | 322 | """ |
2534 | 317 | if self._prev_dict: | ||
2535 | 318 | for k, v in six.iteritems(self._prev_dict): | ||
2536 | 319 | if k not in self: | ||
2537 | 320 | self[k] = v | ||
2538 | 321 | with open(self.path, 'w') as f: | 323 | with open(self.path, 'w') as f: |
2539 | 322 | json.dump(self, f) | 324 | json.dump(self, f) |
2540 | 323 | 325 | ||
2541 | 326 | def _implicit_save(self): | ||
2542 | 327 | if self.implicit_save: | ||
2543 | 328 | self.save() | ||
2544 | 329 | |||
2545 | 324 | 330 | ||
2546 | 325 | @cached | 331 | @cached |
2547 | 326 | def config(scope=None): | 332 | def config(scope=None): |
2548 | @@ -485,6 +491,63 @@ | |||
2549 | 485 | 491 | ||
2550 | 486 | 492 | ||
2551 | 487 | @cached | 493 | @cached |
2552 | 494 | def relation_to_interface(relation_name): | ||
2553 | 495 | """ | ||
2554 | 496 | Given the name of a relation, return the interface that relation uses. | ||
2555 | 497 | |||
2556 | 498 | :returns: The interface name, or ``None``. | ||
2557 | 499 | """ | ||
2558 | 500 | return relation_to_role_and_interface(relation_name)[1] | ||
2559 | 501 | |||
2560 | 502 | |||
2561 | 503 | @cached | ||
2562 | 504 | def relation_to_role_and_interface(relation_name): | ||
2563 | 505 | """ | ||
2564 | 506 | Given the name of a relation, return the role and the name of the interface | ||
2565 | 507 | that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). | ||
2566 | 508 | |||
2567 | 509 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | ||
2568 | 510 | """ | ||
2569 | 511 | _metadata = metadata() | ||
2570 | 512 | for role in ('provides', 'requires', 'peer'): | ||
2571 | 513 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | ||
2572 | 514 | if interface: | ||
2573 | 515 | return role, interface | ||
2574 | 516 | return None, None | ||
2575 | 517 | |||
2576 | 518 | |||
2577 | 519 | @cached | ||
2578 | 520 | def role_and_interface_to_relations(role, interface_name): | ||
2579 | 521 | """ | ||
2580 | 522 | Given a role and interface name, return a list of relation names for the | ||
2581 | 523 | current charm that use that interface under that role (where role is one | ||
2582 | 524 | of ``provides``, ``requires``, or ``peer``). | ||
2583 | 525 | |||
2584 | 526 | :returns: A list of relation names. | ||
2585 | 527 | """ | ||
2586 | 528 | _metadata = metadata() | ||
2587 | 529 | results = [] | ||
2588 | 530 | for relation_name, relation in _metadata.get(role, {}).items(): | ||
2589 | 531 | if relation['interface'] == interface_name: | ||
2590 | 532 | results.append(relation_name) | ||
2591 | 533 | return results | ||
2592 | 534 | |||
2593 | 535 | |||
2594 | 536 | @cached | ||
2595 | 537 | def interface_to_relations(interface_name): | ||
2596 | 538 | """ | ||
2597 | 539 | Given an interface, return a list of relation names for the current | ||
2598 | 540 | charm that use that interface. | ||
2599 | 541 | |||
2600 | 542 | :returns: A list of relation names. | ||
2601 | 543 | """ | ||
2602 | 544 | results = [] | ||
2603 | 545 | for role in ('provides', 'requires', 'peer'): | ||
2604 | 546 | results.extend(role_and_interface_to_relations(role, interface_name)) | ||
2605 | 547 | return results | ||
2606 | 548 | |||
2607 | 549 | |||
2608 | 550 | @cached | ||
2609 | 488 | def charm_name(): | 551 | def charm_name(): |
2610 | 489 | """Get the name of the current charm as is specified on metadata.yaml""" | 552 | """Get the name of the current charm as is specified on metadata.yaml""" |
2611 | 490 | return metadata().get('name') | 553 | return metadata().get('name') |
2612 | @@ -587,10 +650,14 @@ | |||
2613 | 587 | hooks.execute(sys.argv) | 650 | hooks.execute(sys.argv) |
2614 | 588 | """ | 651 | """ |
2615 | 589 | 652 | ||
2617 | 590 | def __init__(self, config_save=True): | 653 | def __init__(self, config_save=None): |
2618 | 591 | super(Hooks, self).__init__() | 654 | super(Hooks, self).__init__() |
2619 | 592 | self._hooks = {} | 655 | self._hooks = {} |
2621 | 593 | self._config_save = config_save | 656 | |
2622 | 657 | # For unknown reasons, we allow the Hooks constructor to override | ||
2623 | 658 | # config().implicit_save. | ||
2624 | 659 | if config_save is not None: | ||
2625 | 660 | config().implicit_save = config_save | ||
2626 | 594 | 661 | ||
2627 | 595 | def register(self, name, function): | 662 | def register(self, name, function): |
2628 | 596 | """Register a hook""" | 663 | """Register a hook""" |
2629 | @@ -598,13 +665,16 @@ | |||
2630 | 598 | 665 | ||
2631 | 599 | def execute(self, args): | 666 | def execute(self, args): |
2632 | 600 | """Execute a registered hook based on args[0]""" | 667 | """Execute a registered hook based on args[0]""" |
2633 | 668 | _run_atstart() | ||
2634 | 601 | hook_name = os.path.basename(args[0]) | 669 | hook_name = os.path.basename(args[0]) |
2635 | 602 | if hook_name in self._hooks: | 670 | if hook_name in self._hooks: |
2641 | 603 | self._hooks[hook_name]() | 671 | try: |
2642 | 604 | if self._config_save: | 672 | self._hooks[hook_name]() |
2643 | 605 | cfg = config() | 673 | except SystemExit as x: |
2644 | 606 | if cfg.implicit_save: | 674 | if x.code is None or x.code == 0: |
2645 | 607 | cfg.save() | 675 | _run_atexit() |
2646 | 676 | raise | ||
2647 | 677 | _run_atexit() | ||
2648 | 608 | else: | 678 | else: |
2649 | 609 | raise UnregisteredHookError(hook_name) | 679 | raise UnregisteredHookError(hook_name) |
2650 | 610 | 680 | ||
2651 | @@ -653,6 +723,21 @@ | |||
2652 | 653 | subprocess.check_call(['action-fail', message]) | 723 | subprocess.check_call(['action-fail', message]) |
2653 | 654 | 724 | ||
2654 | 655 | 725 | ||
2655 | 726 | def action_name(): | ||
2656 | 727 | """Get the name of the currently executing action.""" | ||
2657 | 728 | return os.environ.get('JUJU_ACTION_NAME') | ||
2658 | 729 | |||
2659 | 730 | |||
2660 | 731 | def action_uuid(): | ||
2661 | 732 | """Get the UUID of the currently executing action.""" | ||
2662 | 733 | return os.environ.get('JUJU_ACTION_UUID') | ||
2663 | 734 | |||
2664 | 735 | |||
2665 | 736 | def action_tag(): | ||
2666 | 737 | """Get the tag for the currently executing action.""" | ||
2667 | 738 | return os.environ.get('JUJU_ACTION_TAG') | ||
2668 | 739 | |||
2669 | 740 | |||
2670 | 656 | def status_set(workload_state, message): | 741 | def status_set(workload_state, message): |
2671 | 657 | """Set the workload state with a message | 742 | """Set the workload state with a message |
2672 | 658 | 743 | ||
2673 | @@ -732,13 +817,80 @@ | |||
2674 | 732 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | 817 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
2675 | 733 | def leader_set(settings=None, **kwargs): | 818 | def leader_set(settings=None, **kwargs): |
2676 | 734 | """Juju leader set value(s)""" | 819 | """Juju leader set value(s)""" |
2678 | 735 | log("Juju leader-set '%s'" % (settings), level=DEBUG) | 820 | # Don't log secrets. |
2679 | 821 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
2680 | 736 | cmd = ['leader-set'] | 822 | cmd = ['leader-set'] |
2681 | 737 | settings = settings or {} | 823 | settings = settings or {} |
2682 | 738 | settings.update(kwargs) | 824 | settings.update(kwargs) |
2684 | 739 | for k, v in settings.iteritems(): | 825 | for k, v in settings.items(): |
2685 | 740 | if v is None: | 826 | if v is None: |
2686 | 741 | cmd.append('{}='.format(k)) | 827 | cmd.append('{}='.format(k)) |
2687 | 742 | else: | 828 | else: |
2688 | 743 | cmd.append('{}={}'.format(k, v)) | 829 | cmd.append('{}={}'.format(k, v)) |
2689 | 744 | subprocess.check_call(cmd) | 830 | subprocess.check_call(cmd) |
2690 | 831 | |||
2691 | 832 | |||
2692 | 833 | @cached | ||
2693 | 834 | def juju_version(): | ||
2694 | 835 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
2695 | 836 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
2696 | 837 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
2697 | 838 | return subprocess.check_output([jujud, 'version'], | ||
2698 | 839 | universal_newlines=True).strip() | ||
2699 | 840 | |||
2700 | 841 | |||
2701 | 842 | @cached | ||
2702 | 843 | def has_juju_version(minimum_version): | ||
2703 | 844 | """Return True if the Juju version is at least the provided version""" | ||
2704 | 845 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
2705 | 846 | |||
2706 | 847 | |||
2707 | 848 | _atexit = [] | ||
2708 | 849 | _atstart = [] | ||
2709 | 850 | |||
2710 | 851 | |||
2711 | 852 | def atstart(callback, *args, **kwargs): | ||
2712 | 853 | '''Schedule a callback to run before the main hook. | ||
2713 | 854 | |||
2714 | 855 | Callbacks are run in the order they were added. | ||
2715 | 856 | |||
2716 | 857 | This is useful for modules and classes to perform initialization | ||
2717 | 858 | and inject behavior. In particular: | ||
2718 | 859 | |||
2719 | 860 | - Run common code before all of your hooks, such as logging | ||
2720 | 861 | the hook name or interesting relation data. | ||
2721 | 862 | - Defer object or module initialization that requires a hook | ||
2722 | 863 | context until we know there actually is a hook context, | ||
2723 | 864 | making testing easier. | ||
2724 | 865 | - Rather than requiring charm authors to include boilerplate to | ||
2725 | 866 | invoke your helper's behavior, have it run automatically if | ||
2726 | 867 | your object is instantiated or module imported. | ||
2727 | 868 | |||
2728 | 869 | This is not at all useful after your hook framework as been launched. | ||
2729 | 870 | ''' | ||
2730 | 871 | global _atstart | ||
2731 | 872 | _atstart.append((callback, args, kwargs)) | ||
2732 | 873 | |||
2733 | 874 | |||
2734 | 875 | def atexit(callback, *args, **kwargs): | ||
2735 | 876 | '''Schedule a callback to run on successful hook completion. | ||
2736 | 877 | |||
2737 | 878 | Callbacks are run in the reverse order that they were added.''' | ||
2738 | 879 | _atexit.append((callback, args, kwargs)) | ||
2739 | 880 | |||
2740 | 881 | |||
2741 | 882 | def _run_atstart(): | ||
2742 | 883 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
2743 | 884 | global _atstart | ||
2744 | 885 | for callback, args, kwargs in _atstart: | ||
2745 | 886 | callback(*args, **kwargs) | ||
2746 | 887 | del _atstart[:] | ||
2747 | 888 | |||
2748 | 889 | |||
2749 | 890 | def _run_atexit(): | ||
2750 | 891 | '''Hook frameworks must invoke this after the main hook body has | ||
2751 | 892 | successfully completed. Do not invoke it if the hook fails.''' | ||
2752 | 893 | global _atexit | ||
2753 | 894 | for callback, args, kwargs in reversed(_atexit): | ||
2754 | 895 | callback(*args, **kwargs) | ||
2755 | 896 | del _atexit[:] | ||
2756 | 745 | 897 | ||
2757 | === modified file 'charmhelpers/core/host.py' | |||
2758 | --- charmhelpers/core/host.py 2015-06-11 09:03:58 +0000 | |||
2759 | +++ charmhelpers/core/host.py 2015-08-13 08:33:21 +0000 | |||
2760 | @@ -63,6 +63,36 @@ | |||
2761 | 63 | return service_result | 63 | return service_result |
2762 | 64 | 64 | ||
2763 | 65 | 65 | ||
2764 | 66 | def service_pause(service_name, init_dir=None): | ||
2765 | 67 | """Pause a system service. | ||
2766 | 68 | |||
2767 | 69 | Stop it, and prevent it from starting again at boot.""" | ||
2768 | 70 | if init_dir is None: | ||
2769 | 71 | init_dir = "/etc/init" | ||
2770 | 72 | stopped = service_stop(service_name) | ||
2771 | 73 | # XXX: Support systemd too | ||
2772 | 74 | override_path = os.path.join( | ||
2773 | 75 | init_dir, '{}.override'.format(service_name)) | ||
2774 | 76 | with open(override_path, 'w') as fh: | ||
2775 | 77 | fh.write("manual\n") | ||
2776 | 78 | return stopped | ||
2777 | 79 | |||
2778 | 80 | |||
2779 | 81 | def service_resume(service_name, init_dir=None): | ||
2780 | 82 | """Resume a system service. | ||
2781 | 83 | |||
2782 | 84 | Reenable starting again at boot. Start the service""" | ||
2783 | 85 | # XXX: Support systemd too | ||
2784 | 86 | if init_dir is None: | ||
2785 | 87 | init_dir = "/etc/init" | ||
2786 | 88 | override_path = os.path.join( | ||
2787 | 89 | init_dir, '{}.override'.format(service_name)) | ||
2788 | 90 | if os.path.exists(override_path): | ||
2789 | 91 | os.unlink(override_path) | ||
2790 | 92 | started = service_start(service_name) | ||
2791 | 93 | return started | ||
2792 | 94 | |||
2793 | 95 | |||
2794 | 66 | def service(action, service_name): | 96 | def service(action, service_name): |
2795 | 67 | """Control a system service""" | 97 | """Control a system service""" |
2796 | 68 | cmd = ['service', service_name, action] | 98 | cmd = ['service', service_name, action] |
2797 | @@ -149,11 +179,7 @@ | |||
2798 | 149 | 179 | ||
2799 | 150 | def add_user_to_group(username, group): | 180 | def add_user_to_group(username, group): |
2800 | 151 | """Add a user to a group""" | 181 | """Add a user to a group""" |
2806 | 152 | cmd = [ | 182 | cmd = ['gpasswd', '-a', username, group] |
2802 | 153 | 'gpasswd', '-a', | ||
2803 | 154 | username, | ||
2804 | 155 | group | ||
2805 | 156 | ] | ||
2807 | 157 | log("Adding user {} to group {}".format(username, group)) | 183 | log("Adding user {} to group {}".format(username, group)) |
2808 | 158 | subprocess.check_call(cmd) | 184 | subprocess.check_call(cmd) |
2809 | 159 | 185 | ||
2810 | 160 | 186 | ||
2811 | === modified file 'charmhelpers/core/services/base.py' | |||
2812 | --- charmhelpers/core/services/base.py 2015-05-20 14:52:29 +0000 | |||
2813 | +++ charmhelpers/core/services/base.py 2015-08-13 08:33:21 +0000 | |||
2814 | @@ -128,15 +128,18 @@ | |||
2815 | 128 | """ | 128 | """ |
2816 | 129 | Handle the current hook by doing The Right Thing with the registered services. | 129 | Handle the current hook by doing The Right Thing with the registered services. |
2817 | 130 | """ | 130 | """ |
2827 | 131 | hook_name = hookenv.hook_name() | 131 | hookenv._run_atstart() |
2828 | 132 | if hook_name == 'stop': | 132 | try: |
2829 | 133 | self.stop_services() | 133 | hook_name = hookenv.hook_name() |
2830 | 134 | else: | 134 | if hook_name == 'stop': |
2831 | 135 | self.reconfigure_services() | 135 | self.stop_services() |
2832 | 136 | self.provide_data() | 136 | else: |
2833 | 137 | cfg = hookenv.config() | 137 | self.reconfigure_services() |
2834 | 138 | if cfg.implicit_save: | 138 | self.provide_data() |
2835 | 139 | cfg.save() | 139 | except SystemExit as x: |
2836 | 140 | if x.code is None or x.code == 0: | ||
2837 | 141 | hookenv._run_atexit() | ||
2838 | 142 | hookenv._run_atexit() | ||
2839 | 140 | 143 | ||
2840 | 141 | def provide_data(self): | 144 | def provide_data(self): |
2841 | 142 | """ | 145 | """ |
2842 | 143 | 146 | ||
2843 | === modified file 'charmhelpers/core/services/helpers.py' | |||
2844 | --- charmhelpers/core/services/helpers.py 2015-06-12 11:27:22 +0000 | |||
2845 | +++ charmhelpers/core/services/helpers.py 2015-08-13 08:33:21 +0000 | |||
2846 | @@ -240,8 +240,7 @@ | |||
2847 | 240 | action. | 240 | action. |
2848 | 241 | 241 | ||
2849 | 242 | :param str source: The template source file, relative to | 242 | :param str source: The template source file, relative to |
2852 | 243 | `$CHARM_DIR/templates` | 243 | `$CHARM_DIR/templates` |
2851 | 244 | |||
2853 | 245 | :param str target: The target to write the rendered template to | 244 | :param str target: The target to write the rendered template to |
2854 | 246 | :param str owner: The owner of the rendered file | 245 | :param str owner: The owner of the rendered file |
2855 | 247 | :param str group: The group of the rendered file | 246 | :param str group: The group of the rendered file |
2856 | 248 | 247 | ||
2857 | === modified file 'charmhelpers/core/unitdata.py' | |||
2858 | --- charmhelpers/core/unitdata.py 2015-03-18 15:51:22 +0000 | |||
2859 | +++ charmhelpers/core/unitdata.py 2015-08-13 08:33:21 +0000 | |||
2860 | @@ -152,6 +152,7 @@ | |||
2861 | 152 | import collections | 152 | import collections |
2862 | 153 | import contextlib | 153 | import contextlib |
2863 | 154 | import datetime | 154 | import datetime |
2864 | 155 | import itertools | ||
2865 | 155 | import json | 156 | import json |
2866 | 156 | import os | 157 | import os |
2867 | 157 | import pprint | 158 | import pprint |
2868 | @@ -164,8 +165,7 @@ | |||
2869 | 164 | class Storage(object): | 165 | class Storage(object): |
2870 | 165 | """Simple key value database for local unit state within charms. | 166 | """Simple key value database for local unit state within charms. |
2871 | 166 | 167 | ||
2874 | 167 | Modifications are automatically committed at hook exit. That's | 168 | Modifications are not persisted unless :meth:`flush` is called. |
2873 | 168 | currently regardless of exit code. | ||
2875 | 169 | 169 | ||
2876 | 170 | To support dicts, lists, integer, floats, and booleans values | 170 | To support dicts, lists, integer, floats, and booleans values |
2877 | 171 | are automatically json encoded/decoded. | 171 | are automatically json encoded/decoded. |
2878 | @@ -173,8 +173,11 @@ | |||
2879 | 173 | def __init__(self, path=None): | 173 | def __init__(self, path=None): |
2880 | 174 | self.db_path = path | 174 | self.db_path = path |
2881 | 175 | if path is None: | 175 | if path is None: |
2884 | 176 | self.db_path = os.path.join( | 176 | if 'UNIT_STATE_DB' in os.environ: |
2885 | 177 | os.environ.get('CHARM_DIR', ''), '.unit-state.db') | 177 | self.db_path = os.environ['UNIT_STATE_DB'] |
2886 | 178 | else: | ||
2887 | 179 | self.db_path = os.path.join( | ||
2888 | 180 | os.environ.get('CHARM_DIR', ''), '.unit-state.db') | ||
2889 | 178 | self.conn = sqlite3.connect('%s' % self.db_path) | 181 | self.conn = sqlite3.connect('%s' % self.db_path) |
2890 | 179 | self.cursor = self.conn.cursor() | 182 | self.cursor = self.conn.cursor() |
2891 | 180 | self.revision = None | 183 | self.revision = None |
2892 | @@ -189,15 +192,8 @@ | |||
2893 | 189 | self.conn.close() | 192 | self.conn.close() |
2894 | 190 | self._closed = True | 193 | self._closed = True |
2895 | 191 | 194 | ||
2896 | 192 | def _scoped_query(self, stmt, params=None): | ||
2897 | 193 | if params is None: | ||
2898 | 194 | params = [] | ||
2899 | 195 | return stmt, params | ||
2900 | 196 | |||
2901 | 197 | def get(self, key, default=None, record=False): | 195 | def get(self, key, default=None, record=False): |
2905 | 198 | self.cursor.execute( | 196 | self.cursor.execute('select data from kv where key=?', [key]) |
2903 | 199 | *self._scoped_query( | ||
2904 | 200 | 'select data from kv where key=?', [key])) | ||
2906 | 201 | result = self.cursor.fetchone() | 197 | result = self.cursor.fetchone() |
2907 | 202 | if not result: | 198 | if not result: |
2908 | 203 | return default | 199 | return default |
2909 | @@ -206,33 +202,81 @@ | |||
2910 | 206 | return json.loads(result[0]) | 202 | return json.loads(result[0]) |
2911 | 207 | 203 | ||
2912 | 208 | def getrange(self, key_prefix, strip=False): | 204 | def getrange(self, key_prefix, strip=False): |
2915 | 209 | stmt = "select key, data from kv where key like '%s%%'" % key_prefix | 205 | """ |
2916 | 210 | self.cursor.execute(*self._scoped_query(stmt)) | 206 | Get a range of keys starting with a common prefix as a mapping of |
2917 | 207 | keys to values. | ||
2918 | 208 | |||
2919 | 209 | :param str key_prefix: Common prefix among all keys | ||
2920 | 210 | :param bool strip: Optionally strip the common prefix from the key | ||
2921 | 211 | names in the returned dict | ||
2922 | 212 | :return dict: A (possibly empty) dict of key-value mappings | ||
2923 | 213 | """ | ||
2924 | 214 | self.cursor.execute("select key, data from kv where key like ?", | ||
2925 | 215 | ['%s%%' % key_prefix]) | ||
2926 | 211 | result = self.cursor.fetchall() | 216 | result = self.cursor.fetchall() |
2927 | 212 | 217 | ||
2928 | 213 | if not result: | 218 | if not result: |
2930 | 214 | return None | 219 | return {} |
2931 | 215 | if not strip: | 220 | if not strip: |
2932 | 216 | key_prefix = '' | 221 | key_prefix = '' |
2933 | 217 | return dict([ | 222 | return dict([ |
2934 | 218 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) | 223 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
2935 | 219 | 224 | ||
2936 | 220 | def update(self, mapping, prefix=""): | 225 | def update(self, mapping, prefix=""): |
2937 | 226 | """ | ||
2938 | 227 | Set the values of multiple keys at once. | ||
2939 | 228 | |||
2940 | 229 | :param dict mapping: Mapping of keys to values | ||
2941 | 230 | :param str prefix: Optional prefix to apply to all keys in `mapping` | ||
2942 | 231 | before setting | ||
2943 | 232 | """ | ||
2944 | 221 | for k, v in mapping.items(): | 233 | for k, v in mapping.items(): |
2945 | 222 | self.set("%s%s" % (prefix, k), v) | 234 | self.set("%s%s" % (prefix, k), v) |
2946 | 223 | 235 | ||
2947 | 224 | def unset(self, key): | 236 | def unset(self, key): |
2948 | 237 | """ | ||
2949 | 238 | Remove a key from the database entirely. | ||
2950 | 239 | """ | ||
2951 | 225 | self.cursor.execute('delete from kv where key=?', [key]) | 240 | self.cursor.execute('delete from kv where key=?', [key]) |
2952 | 226 | if self.revision and self.cursor.rowcount: | 241 | if self.revision and self.cursor.rowcount: |
2953 | 227 | self.cursor.execute( | 242 | self.cursor.execute( |
2954 | 228 | 'insert into kv_revisions values (?, ?, ?)', | 243 | 'insert into kv_revisions values (?, ?, ?)', |
2955 | 229 | [key, self.revision, json.dumps('DELETED')]) | 244 | [key, self.revision, json.dumps('DELETED')]) |
2956 | 230 | 245 | ||
2957 | 246 | def unsetrange(self, keys=None, prefix=""): | ||
2958 | 247 | """ | ||
2959 | 248 | Remove a range of keys starting with a common prefix, from the database | ||
2960 | 249 | entirely. | ||
2961 | 250 | |||
2962 | 251 | :param list keys: List of keys to remove. | ||
2963 | 252 | :param str prefix: Optional prefix to apply to all keys in ``keys`` | ||
2964 | 253 | before removing. | ||
2965 | 254 | """ | ||
2966 | 255 | if keys is not None: | ||
2967 | 256 | keys = ['%s%s' % (prefix, key) for key in keys] | ||
2968 | 257 | self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) | ||
2969 | 258 | if self.revision and self.cursor.rowcount: | ||
2970 | 259 | self.cursor.execute( | ||
2971 | 260 | 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), | ||
2972 | 261 | list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) | ||
2973 | 262 | else: | ||
2974 | 263 | self.cursor.execute('delete from kv where key like ?', | ||
2975 | 264 | ['%s%%' % prefix]) | ||
2976 | 265 | if self.revision and self.cursor.rowcount: | ||
2977 | 266 | self.cursor.execute( | ||
2978 | 267 | 'insert into kv_revisions values (?, ?, ?)', | ||
2979 | 268 | ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) | ||
2980 | 269 | |||
2981 | 231 | def set(self, key, value): | 270 | def set(self, key, value): |
2982 | 271 | """ | ||
2983 | 272 | Set a value in the database. | ||
2984 | 273 | |||
2985 | 274 | :param str key: Key to set the value for | ||
2986 | 275 | :param value: Any JSON-serializable value to be set | ||
2987 | 276 | """ | ||
2988 | 232 | serialized = json.dumps(value) | 277 | serialized = json.dumps(value) |
2989 | 233 | 278 | ||
2992 | 234 | self.cursor.execute( | 279 | self.cursor.execute('select data from kv where key=?', [key]) |
2991 | 235 | 'select data from kv where key=?', [key]) | ||
2993 | 236 | exists = self.cursor.fetchone() | 280 | exists = self.cursor.fetchone() |
2994 | 237 | 281 | ||
2995 | 238 | # Skip mutations to the same value | 282 | # Skip mutations to the same value |
2996 | 239 | 283 | ||
2997 | === modified file 'charmhelpers/fetch/__init__.py' | |||
2998 | --- charmhelpers/fetch/__init__.py 2015-04-29 12:52:18 +0000 | |||
2999 | +++ charmhelpers/fetch/__init__.py 2015-08-13 08:33:21 +0000 | |||
3000 | @@ -90,6 +90,14 @@ | |||
3001 | 90 | 'kilo/proposed': 'trusty-proposed/kilo', | 90 | 'kilo/proposed': 'trusty-proposed/kilo', |
3002 | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
3003 | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
3004 | 93 | # Liberty | ||
3005 | 94 | 'liberty': 'trusty-updates/liberty', | ||
3006 | 95 | 'trusty-liberty': 'trusty-updates/liberty', | ||
3007 | 96 | 'trusty-liberty/updates': 'trusty-updates/liberty', | ||
3008 | 97 | 'trusty-updates/liberty': 'trusty-updates/liberty', | ||
3009 | 98 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
3010 | 99 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', | ||
3011 | 100 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | ||
3012 | 93 | } | 101 | } |
3013 | 94 | 102 | ||
3014 | 95 | # The order of this list is very important. Handlers should be listed in from | 103 | # The order of this list is very important. Handlers should be listed in from |
3015 | @@ -215,19 +223,27 @@ | |||
3016 | 215 | _run_apt_command(cmd, fatal) | 223 | _run_apt_command(cmd, fatal) |
3017 | 216 | 224 | ||
3018 | 217 | 225 | ||
3019 | 226 | def apt_mark(packages, mark, fatal=False): | ||
3020 | 227 | """Flag one or more packages using apt-mark""" | ||
3021 | 228 | cmd = ['apt-mark', mark] | ||
3022 | 229 | if isinstance(packages, six.string_types): | ||
3023 | 230 | cmd.append(packages) | ||
3024 | 231 | else: | ||
3025 | 232 | cmd.extend(packages) | ||
3026 | 233 | log("Holding {}".format(packages)) | ||
3027 | 234 | |||
3028 | 235 | if fatal: | ||
3029 | 236 | subprocess.check_call(cmd, universal_newlines=True) | ||
3030 | 237 | else: | ||
3031 | 238 | subprocess.call(cmd, universal_newlines=True) | ||
3032 | 239 | |||
3033 | 240 | |||
3034 | 218 | def apt_hold(packages, fatal=False): | 241 | def apt_hold(packages, fatal=False): |
3047 | 219 | """Hold one or more packages""" | 242 | return apt_mark(packages, 'hold', fatal=fatal) |
3048 | 220 | cmd = ['apt-mark', 'hold'] | 243 | |
3049 | 221 | if isinstance(packages, six.string_types): | 244 | |
3050 | 222 | cmd.append(packages) | 245 | def apt_unhold(packages, fatal=False): |
3051 | 223 | else: | 246 | return apt_mark(packages, 'unhold', fatal=fatal) |
3040 | 224 | cmd.extend(packages) | ||
3041 | 225 | log("Holding {}".format(packages)) | ||
3042 | 226 | |||
3043 | 227 | if fatal: | ||
3044 | 228 | subprocess.check_call(cmd) | ||
3045 | 229 | else: | ||
3046 | 230 | subprocess.call(cmd) | ||
3052 | 231 | 247 | ||
3053 | 232 | 248 | ||
3054 | 233 | def add_source(source, key=None): | 249 | def add_source(source, key=None): |
3055 | @@ -370,8 +386,9 @@ | |||
3056 | 370 | for handler in handlers: | 386 | for handler in handlers: |
3057 | 371 | try: | 387 | try: |
3058 | 372 | installed_to = handler.install(source, *args, **kwargs) | 388 | installed_to = handler.install(source, *args, **kwargs) |
3061 | 373 | except UnhandledSource: | 389 | except UnhandledSource as e: |
3062 | 374 | pass | 390 | log('Install source attempt unsuccessful: {}'.format(e), |
3063 | 391 | level='WARNING') | ||
3064 | 375 | if not installed_to: | 392 | if not installed_to: |
3065 | 376 | raise UnhandledSource("No handler found for source {}".format(source)) | 393 | raise UnhandledSource("No handler found for source {}".format(source)) |
3066 | 377 | return installed_to | 394 | return installed_to |
3067 | 378 | 395 | ||
3068 | === modified file 'charmhelpers/fetch/archiveurl.py' | |||
3069 | --- charmhelpers/fetch/archiveurl.py 2015-02-11 21:41:57 +0000 | |||
3070 | +++ charmhelpers/fetch/archiveurl.py 2015-08-13 08:33:21 +0000 | |||
3071 | @@ -77,6 +77,8 @@ | |||
3072 | 77 | def can_handle(self, source): | 77 | def can_handle(self, source): |
3073 | 78 | url_parts = self.parse_url(source) | 78 | url_parts = self.parse_url(source) |
3074 | 79 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): | 79 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
3075 | 80 | # XXX: Why is this returning a boolean and a string? It's | ||
3076 | 81 | # doomed to fail since "bool(can_handle('foo://'))" will be True. | ||
3077 | 80 | return "Wrong source type" | 82 | return "Wrong source type" |
3078 | 81 | if get_archive_handler(self.base_url(source)): | 83 | if get_archive_handler(self.base_url(source)): |
3079 | 82 | return True | 84 | return True |
3080 | @@ -155,7 +157,11 @@ | |||
3081 | 155 | else: | 157 | else: |
3082 | 156 | algorithms = hashlib.algorithms_available | 158 | algorithms = hashlib.algorithms_available |
3083 | 157 | if key in algorithms: | 159 | if key in algorithms: |
3085 | 158 | check_hash(dld_file, value, key) | 160 | if len(value) != 1: |
3086 | 161 | raise TypeError( | ||
3087 | 162 | "Expected 1 hash value, not %d" % len(value)) | ||
3088 | 163 | expected = value[0] | ||
3089 | 164 | check_hash(dld_file, expected, key) | ||
3090 | 159 | if checksum: | 165 | if checksum: |
3091 | 160 | check_hash(dld_file, checksum, hash_type) | 166 | check_hash(dld_file, checksum, hash_type) |
3092 | 161 | return extract(dld_file, dest) | 167 | return extract(dld_file, dest) |
3093 | 162 | 168 | ||
3094 | === modified file 'charmhelpers/fetch/giturl.py' | |||
3095 | --- charmhelpers/fetch/giturl.py 2015-05-27 12:55:44 +0000 | |||
3096 | +++ charmhelpers/fetch/giturl.py 2015-08-13 08:33:21 +0000 | |||
3097 | @@ -67,7 +67,7 @@ | |||
3098 | 67 | try: | 67 | try: |
3099 | 68 | self.clone(source, dest_dir, branch, depth) | 68 | self.clone(source, dest_dir, branch, depth) |
3100 | 69 | except GitCommandError as e: | 69 | except GitCommandError as e: |
3102 | 70 | raise UnhandledSource(e.message) | 70 | raise UnhandledSource(e) |
3103 | 71 | except OSError as e: | 71 | except OSError as e: |
3104 | 72 | raise UnhandledSource(e.strerror) | 72 | raise UnhandledSource(e.strerror) |
3105 | 73 | return dest_dir | 73 | return dest_dir |
3106 | 74 | 74 | ||
3107 | === added directory 'docs/_extensions' | |||
3108 | === added file 'docs/_extensions/automembersummary.py' | |||
3109 | --- docs/_extensions/automembersummary.py 1970-01-01 00:00:00 +0000 | |||
3110 | +++ docs/_extensions/automembersummary.py 2015-08-13 08:33:21 +0000 | |||
3111 | @@ -0,0 +1,86 @@ | |||
3112 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
3113 | 2 | # | ||
3114 | 3 | # This file is part of charm-helpers. | ||
3115 | 4 | # | ||
3116 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
3117 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
3118 | 7 | # published by the Free Software Foundation. | ||
3119 | 8 | # | ||
3120 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
3121 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
3122 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
3123 | 12 | # GNU Lesser General Public License for more details. | ||
3124 | 13 | # | ||
3125 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
3126 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
3127 | 16 | |||
3128 | 17 | |||
3129 | 18 | import inspect | ||
3130 | 19 | |||
3131 | 20 | from docutils.parsers.rst import directives | ||
3132 | 21 | from sphinx.ext.autosummary import Autosummary | ||
3133 | 22 | from sphinx.ext.autosummary import get_import_prefixes_from_env | ||
3134 | 23 | from sphinx.ext.autosummary import import_by_name | ||
3135 | 24 | |||
3136 | 25 | |||
3137 | 26 | class AutoMemberSummary(Autosummary): | ||
3138 | 27 | required_arguments = 0 | ||
3139 | 28 | optional_arguments = 0 | ||
3140 | 29 | final_argument_whitespace = False | ||
3141 | 30 | has_content = True | ||
3142 | 31 | option_spec = { | ||
3143 | 32 | 'toctree': directives.unchanged, | ||
3144 | 33 | 'nosignatures': directives.flag, | ||
3145 | 34 | 'template': directives.unchanged, | ||
3146 | 35 | } | ||
3147 | 36 | |||
3148 | 37 | def get_items(self, names): | ||
3149 | 38 | env = self.state.document.settings.env | ||
3150 | 39 | prefixes = get_import_prefixes_from_env(env) | ||
3151 | 40 | |||
3152 | 41 | items = [] | ||
3153 | 42 | prefix = '' | ||
3154 | 43 | shorten = '' | ||
3155 | 44 | |||
3156 | 45 | def _get_items(name): | ||
3157 | 46 | _items = super(AutoMemberSummary, self).get_items([shorten + name]) | ||
3158 | 47 | for dn, sig, summary, rn in _items: | ||
3159 | 48 | items.append(('%s%s' % (prefix, dn), sig, summary, rn)) | ||
3160 | 49 | |||
3161 | 50 | for name in names: | ||
3162 | 51 | if '~' in name: | ||
3163 | 52 | prefix, name = name.split('~') | ||
3164 | 53 | shorten = '~' | ||
3165 | 54 | else: | ||
3166 | 55 | prefix = '' | ||
3167 | 56 | shorten = '' | ||
3168 | 57 | |||
3169 | 58 | try: | ||
3170 | 59 | real_name, obj, parent, _ = import_by_name(name, prefixes=prefixes) | ||
3171 | 60 | except ImportError: | ||
3172 | 61 | self.warn('failed to import %s' % name) | ||
3173 | 62 | continue | ||
3174 | 63 | |||
3175 | 64 | if not inspect.ismodule(obj): | ||
3176 | 65 | _get_items(name) | ||
3177 | 66 | continue | ||
3178 | 67 | |||
3179 | 68 | for member in dir(obj): | ||
3180 | 69 | if member.startswith('_'): | ||
3181 | 70 | continue | ||
3182 | 71 | mobj = getattr(obj, member) | ||
3183 | 72 | if hasattr(mobj, '__module__'): | ||
3184 | 73 | if not mobj.__module__.startswith(real_name): | ||
3185 | 74 | continue # skip imported classes & functions | ||
3186 | 75 | elif hasattr(mobj, '__name__'): | ||
3187 | 76 | if not mobj.__name__.startswith(real_name): | ||
3188 | 77 | continue # skip imported modules | ||
3189 | 78 | else: | ||
3190 | 79 | continue # skip instances | ||
3191 | 80 | _get_items('%s.%s' % (name, member)) | ||
3192 | 81 | |||
3193 | 82 | return items | ||
3194 | 83 | |||
3195 | 84 | |||
3196 | 85 | def setup(app): | ||
3197 | 86 | app.add_directive('automembersummary', AutoMemberSummary) | ||
3198 | 0 | 87 | ||
3199 | === added file 'docs/api/charmhelpers.coordinator.rst' | |||
3200 | --- docs/api/charmhelpers.coordinator.rst 1970-01-01 00:00:00 +0000 | |||
3201 | +++ docs/api/charmhelpers.coordinator.rst 2015-08-13 08:33:21 +0000 | |||
3202 | @@ -0,0 +1,10 @@ | |||
3203 | 1 | charmhelpers.coordinator package | ||
3204 | 2 | ================================ | ||
3205 | 3 | |||
3206 | 4 | charmhelpers.coordinator module | ||
3207 | 5 | ------------------------------- | ||
3208 | 6 | |||
3209 | 7 | .. automodule:: charmhelpers.coordinator | ||
3210 | 8 | :members: | ||
3211 | 9 | :undoc-members: | ||
3212 | 10 | :show-inheritance: | ||
3213 | 0 | 11 | ||
3214 | === added file 'docs/api/charmhelpers.core.decorators.rst' | |||
3215 | --- docs/api/charmhelpers.core.decorators.rst 1970-01-01 00:00:00 +0000 | |||
3216 | +++ docs/api/charmhelpers.core.decorators.rst 2015-08-13 08:33:21 +0000 | |||
3217 | @@ -0,0 +1,7 @@ | |||
3218 | 1 | charmhelpers.core.decorators | ||
3219 | 2 | ============================ | ||
3220 | 3 | |||
3221 | 4 | .. automodule:: charmhelpers.core.decorators | ||
3222 | 5 | :members: | ||
3223 | 6 | :undoc-members: | ||
3224 | 7 | :show-inheritance: | ||
3225 | 0 | 8 | ||
3226 | === added file 'docs/api/charmhelpers.core.fstab.rst' | |||
3227 | --- docs/api/charmhelpers.core.fstab.rst 1970-01-01 00:00:00 +0000 | |||
3228 | +++ docs/api/charmhelpers.core.fstab.rst 2015-08-13 08:33:21 +0000 | |||
3229 | @@ -0,0 +1,7 @@ | |||
3230 | 1 | charmhelpers.core.fstab | ||
3231 | 2 | ======================= | ||
3232 | 3 | |||
3233 | 4 | .. automodule:: charmhelpers.core.fstab | ||
3234 | 5 | :members: | ||
3235 | 6 | :undoc-members: | ||
3236 | 7 | :show-inheritance: | ||
3237 | 0 | 8 | ||
3238 | === added file 'docs/api/charmhelpers.core.hookenv.rst' | |||
3239 | --- docs/api/charmhelpers.core.hookenv.rst 1970-01-01 00:00:00 +0000 | |||
3240 | +++ docs/api/charmhelpers.core.hookenv.rst 2015-08-13 08:33:21 +0000 | |||
3241 | @@ -0,0 +1,12 @@ | |||
3242 | 1 | charmhelpers.core.hookenv | ||
3243 | 2 | ========================= | ||
3244 | 3 | |||
3245 | 4 | .. automembersummary:: | ||
3246 | 5 | :nosignatures: | ||
3247 | 6 | |||
3248 | 7 | ~charmhelpers.core.hookenv | ||
3249 | 8 | |||
3250 | 9 | .. automodule:: charmhelpers.core.hookenv | ||
3251 | 10 | :members: | ||
3252 | 11 | :undoc-members: | ||
3253 | 12 | :show-inheritance: | ||
3254 | 0 | 13 | ||
3255 | === added file 'docs/api/charmhelpers.core.host.rst' | |||
3256 | --- docs/api/charmhelpers.core.host.rst 1970-01-01 00:00:00 +0000 | |||
3257 | +++ docs/api/charmhelpers.core.host.rst 2015-08-13 08:33:21 +0000 | |||
3258 | @@ -0,0 +1,12 @@ | |||
3259 | 1 | charmhelpers.core.host | ||
3260 | 2 | ====================== | ||
3261 | 3 | |||
3262 | 4 | .. automembersummary:: | ||
3263 | 5 | :nosignatures: | ||
3264 | 6 | |||
3265 | 7 | ~charmhelpers.core.host | ||
3266 | 8 | |||
3267 | 9 | .. automodule:: charmhelpers.core.host | ||
3268 | 10 | :members: | ||
3269 | 11 | :undoc-members: | ||
3270 | 12 | :show-inheritance: | ||
3271 | 0 | 13 | ||
3272 | === modified file 'docs/api/charmhelpers.core.rst' | |||
3273 | --- docs/api/charmhelpers.core.rst 2014-08-05 21:28:01 +0000 | |||
3274 | +++ docs/api/charmhelpers.core.rst 2015-08-13 08:33:21 +0000 | |||
3275 | @@ -1,44 +1,17 @@ | |||
3276 | 1 | charmhelpers.core package | 1 | charmhelpers.core package |
3277 | 2 | ========================= | 2 | ========================= |
3278 | 3 | 3 | ||
3317 | 4 | charmhelpers.core.fstab module | 4 | .. toctree:: |
3318 | 5 | ------------------------------ | 5 | |
3319 | 6 | 6 | charmhelpers.core.decorators | |
3320 | 7 | .. automodule:: charmhelpers.core.fstab | 7 | charmhelpers.core.fstab |
3321 | 8 | :members: | 8 | charmhelpers.core.hookenv |
3322 | 9 | :undoc-members: | 9 | charmhelpers.core.host |
3323 | 10 | :show-inheritance: | 10 | charmhelpers.core.strutils |
3324 | 11 | 11 | charmhelpers.core.sysctl | |
3325 | 12 | charmhelpers.core.hookenv module | 12 | charmhelpers.core.templating |
3326 | 13 | -------------------------------- | 13 | charmhelpers.core.unitdata |
3327 | 14 | 14 | charmhelpers.core.services | |
3290 | 15 | .. automodule:: charmhelpers.core.hookenv | ||
3291 | 16 | :members: | ||
3292 | 17 | :undoc-members: | ||
3293 | 18 | :show-inheritance: | ||
3294 | 19 | |||
3295 | 20 | charmhelpers.core.host module | ||
3296 | 21 | ----------------------------- | ||
3297 | 22 | |||
3298 | 23 | .. automodule:: charmhelpers.core.host | ||
3299 | 24 | :members: | ||
3300 | 25 | :undoc-members: | ||
3301 | 26 | :show-inheritance: | ||
3302 | 27 | |||
3303 | 28 | charmhelpers.core.services package | ||
3304 | 29 | ---------------------------------- | ||
3305 | 30 | |||
3306 | 31 | .. automodule:: charmhelpers.core.services.base | ||
3307 | 32 | :members: | ||
3308 | 33 | :undoc-members: | ||
3309 | 34 | :show-inheritance: | ||
3310 | 35 | :special-members: __init__ | ||
3311 | 36 | |||
3312 | 37 | .. automodule:: charmhelpers.core.services.helpers | ||
3313 | 38 | :members: | ||
3314 | 39 | :undoc-members: | ||
3315 | 40 | :show-inheritance: | ||
3316 | 41 | |||
3328 | 42 | 15 | ||
3329 | 43 | .. automodule:: charmhelpers.core | 16 | .. automodule:: charmhelpers.core |
3330 | 44 | :members: | 17 | :members: |
3331 | 45 | 18 | ||
3332 | === added file 'docs/api/charmhelpers.core.services.base.rst' | |||
3333 | --- docs/api/charmhelpers.core.services.base.rst 1970-01-01 00:00:00 +0000 | |||
3334 | +++ docs/api/charmhelpers.core.services.base.rst 2015-08-13 08:33:21 +0000 | |||
3335 | @@ -0,0 +1,12 @@ | |||
3336 | 1 | charmhelpers.core.services.base | ||
3337 | 2 | =============================== | ||
3338 | 3 | |||
3339 | 4 | .. automembersummary:: | ||
3340 | 5 | :nosignatures: | ||
3341 | 6 | |||
3342 | 7 | ~charmhelpers.core.services.base | ||
3343 | 8 | |||
3344 | 9 | .. automodule:: charmhelpers.core.services.base | ||
3345 | 10 | :members: | ||
3346 | 11 | :undoc-members: | ||
3347 | 12 | :show-inheritance: | ||
3348 | 0 | 13 | ||
3349 | === added file 'docs/api/charmhelpers.core.services.helpers.rst' | |||
3350 | --- docs/api/charmhelpers.core.services.helpers.rst 1970-01-01 00:00:00 +0000 | |||
3351 | +++ docs/api/charmhelpers.core.services.helpers.rst 2015-08-13 08:33:21 +0000 | |||
3352 | @@ -0,0 +1,12 @@ | |||
3353 | 1 | charmhelpers.core.services.helpers | ||
3354 | 2 | ================================== | ||
3355 | 3 | |||
3356 | 4 | .. automembersummary:: | ||
3357 | 5 | :nosignatures: | ||
3358 | 6 | |||
3359 | 7 | ~charmhelpers.core.services.helpers | ||
3360 | 8 | |||
3361 | 9 | .. automodule:: charmhelpers.core.services.helpers | ||
3362 | 10 | :members: | ||
3363 | 11 | :undoc-members: | ||
3364 | 12 | :show-inheritance: | ||
3365 | 0 | 13 | ||
3366 | === added file 'docs/api/charmhelpers.core.services.rst' | |||
3367 | --- docs/api/charmhelpers.core.services.rst 1970-01-01 00:00:00 +0000 | |||
3368 | +++ docs/api/charmhelpers.core.services.rst 2015-08-13 08:33:21 +0000 | |||
3369 | @@ -0,0 +1,12 @@ | |||
3370 | 1 | charmhelpers.core.services | ||
3371 | 2 | ========================== | ||
3372 | 3 | |||
3373 | 4 | .. toctree:: | ||
3374 | 5 | |||
3375 | 6 | charmhelpers.core.services.base | ||
3376 | 7 | charmhelpers.core.services.helpers | ||
3377 | 8 | |||
3378 | 9 | .. automodule:: charmhelpers.core.services | ||
3379 | 10 | :members: | ||
3380 | 11 | :undoc-members: | ||
3381 | 12 | :show-inheritance: | ||
3382 | 0 | 13 | ||
3383 | === added file 'docs/api/charmhelpers.core.strutils.rst' | |||
3384 | --- docs/api/charmhelpers.core.strutils.rst 1970-01-01 00:00:00 +0000 | |||
3385 | +++ docs/api/charmhelpers.core.strutils.rst 2015-08-13 08:33:21 +0000 | |||
3386 | @@ -0,0 +1,7 @@ | |||
3387 | 1 | charmhelpers.core.strutils | ||
3388 | 2 | ============================ | ||
3389 | 3 | |||
3390 | 4 | .. automodule:: charmhelpers.core.strutils | ||
3391 | 5 | :members: | ||
3392 | 6 | :undoc-members: | ||
3393 | 7 | :show-inheritance: | ||
3394 | 0 | 8 | ||
3395 | === added file 'docs/api/charmhelpers.core.sysctl.rst' | |||
3396 | --- docs/api/charmhelpers.core.sysctl.rst 1970-01-01 00:00:00 +0000 | |||
3397 | +++ docs/api/charmhelpers.core.sysctl.rst 2015-08-13 08:33:21 +0000 | |||
3398 | @@ -0,0 +1,7 @@ | |||
3399 | 1 | charmhelpers.core.sysctl | ||
3400 | 2 | ============================ | ||
3401 | 3 | |||
3402 | 4 | .. automodule:: charmhelpers.core.sysctl | ||
3403 | 5 | :members: | ||
3404 | 6 | :undoc-members: | ||
3405 | 7 | :show-inheritance: | ||
3406 | 0 | 8 | ||
3407 | === added file 'docs/api/charmhelpers.core.templating.rst' | |||
3408 | --- docs/api/charmhelpers.core.templating.rst 1970-01-01 00:00:00 +0000 | |||
3409 | +++ docs/api/charmhelpers.core.templating.rst 2015-08-13 08:33:21 +0000 | |||
3410 | @@ -0,0 +1,7 @@ | |||
3411 | 1 | charmhelpers.core.templating | ||
3412 | 2 | ============================ | ||
3413 | 3 | |||
3414 | 4 | .. automodule:: charmhelpers.core.templating | ||
3415 | 5 | :members: | ||
3416 | 6 | :undoc-members: | ||
3417 | 7 | :show-inheritance: | ||
3418 | 0 | 8 | ||
3419 | === added file 'docs/api/charmhelpers.core.unitdata.rst' | |||
3420 | --- docs/api/charmhelpers.core.unitdata.rst 1970-01-01 00:00:00 +0000 | |||
3421 | +++ docs/api/charmhelpers.core.unitdata.rst 2015-08-13 08:33:21 +0000 | |||
3422 | @@ -0,0 +1,7 @@ | |||
3423 | 1 | charmhelpers.core.unitdata | ||
3424 | 2 | ========================== | ||
3425 | 3 | |||
3426 | 4 | .. automodule:: charmhelpers.core.unitdata | ||
3427 | 5 | :members: | ||
3428 | 6 | :undoc-members: | ||
3429 | 7 | :show-inheritance: | ||
3430 | 0 | 8 | ||
3431 | === modified file 'docs/api/charmhelpers.rst' | |||
3432 | --- docs/api/charmhelpers.rst 2014-06-09 17:10:38 +0000 | |||
3433 | +++ docs/api/charmhelpers.rst 2015-08-13 08:33:21 +0000 | |||
3434 | @@ -2,12 +2,14 @@ | |||
3435 | 2 | ================= | 2 | ================= |
3436 | 3 | 3 | ||
3437 | 4 | .. toctree:: | 4 | .. toctree:: |
3439 | 5 | :maxdepth: 2 | 5 | :maxdepth: 3 |
3440 | 6 | 6 | ||
3441 | 7 | charmhelpers.core | ||
3442 | 7 | charmhelpers.contrib | 8 | charmhelpers.contrib |
3443 | 8 | charmhelpers.core | ||
3444 | 9 | charmhelpers.fetch | 9 | charmhelpers.fetch |
3445 | 10 | charmhelpers.payload | 10 | charmhelpers.payload |
3446 | 11 | charmhelpers.cli | ||
3447 | 12 | charmhelpers.coordinator | ||
3448 | 11 | 13 | ||
3449 | 12 | .. automodule:: charmhelpers | 14 | .. automodule:: charmhelpers |
3450 | 13 | :members: | 15 | :members: |
3451 | 14 | 16 | ||
3452 | === removed file 'docs/api/modules.rst' | |||
3453 | --- docs/api/modules.rst 2014-06-09 14:56:35 +0000 | |||
3454 | +++ docs/api/modules.rst 1970-01-01 00:00:00 +0000 | |||
3455 | @@ -1,7 +0,0 @@ | |||
3456 | 1 | charmhelpers | ||
3457 | 2 | ============ | ||
3458 | 3 | |||
3459 | 4 | .. toctree:: | ||
3460 | 5 | :maxdepth: 4 | ||
3461 | 6 | |||
3462 | 7 | charmhelpers | ||
3463 | 8 | 0 | ||
3464 | === modified file 'docs/conf.py' | |||
3465 | --- docs/conf.py 2014-09-23 16:34:54 +0000 | |||
3466 | +++ docs/conf.py 2015-08-13 08:33:21 +0000 | |||
3467 | @@ -19,6 +19,7 @@ | |||
3468 | 19 | # add these directories to sys.path here. If the directory is relative to the | 19 | # add these directories to sys.path here. If the directory is relative to the |
3469 | 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. | 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. |
3470 | 21 | sys.path.insert(0, os.path.abspath('../')) | 21 | sys.path.insert(0, os.path.abspath('../')) |
3471 | 22 | sys.path.append(os.path.abspath('_extensions/')) | ||
3472 | 22 | 23 | ||
3473 | 23 | # -- General configuration ------------------------------------------------ | 24 | # -- General configuration ------------------------------------------------ |
3474 | 24 | 25 | ||
3475 | @@ -30,6 +31,8 @@ | |||
3476 | 30 | # ones. | 31 | # ones. |
3477 | 31 | extensions = [ | 32 | extensions = [ |
3478 | 32 | 'sphinx.ext.autodoc', | 33 | 'sphinx.ext.autodoc', |
3479 | 34 | 'sphinx.ext.autosummary', | ||
3480 | 35 | 'automembersummary', | ||
3481 | 33 | ] | 36 | ] |
3482 | 34 | 37 | ||
3483 | 35 | # Add any paths that contain templates here, relative to this directory. | 38 | # Add any paths that contain templates here, relative to this directory. |
3484 | @@ -72,7 +75,7 @@ | |||
3485 | 72 | 75 | ||
3486 | 73 | # List of patterns, relative to source directory, that match files and | 76 | # List of patterns, relative to source directory, that match files and |
3487 | 74 | # directories to ignore when looking for source files. | 77 | # directories to ignore when looking for source files. |
3489 | 75 | exclude_patterns = ['_build'] | 78 | exclude_patterns = ['_build', '_extensions'] |
3490 | 76 | 79 | ||
3491 | 77 | # The reST default role (used for this markup: `text`) to use for all | 80 | # The reST default role (used for this markup: `text`) to use for all |
3492 | 78 | # documents. | 81 | # documents. |
3493 | 79 | 82 | ||
3494 | === modified file 'setup.py' | |||
3495 | --- setup.py 2015-03-04 16:15:18 +0000 | |||
3496 | +++ setup.py 2015-08-13 08:33:21 +0000 | |||
3497 | @@ -14,6 +14,13 @@ | |||
3498 | 14 | 'author': "Ubuntu Developers", | 14 | 'author': "Ubuntu Developers", |
3499 | 15 | 'author_email': "ubuntu-devel-discuss@lists.ubuntu.com", | 15 | 'author_email': "ubuntu-devel-discuss@lists.ubuntu.com", |
3500 | 16 | 'url': "https://code.launchpad.net/charm-helpers", | 16 | 'url': "https://code.launchpad.net/charm-helpers", |
3501 | 17 | 'install_requires': [ | ||
3502 | 18 | 'netaddr', | ||
3503 | 19 | 'PyYAML', | ||
3504 | 20 | 'Tempita', | ||
3505 | 21 | 'Jinja2', | ||
3506 | 22 | 'six', | ||
3507 | 23 | ], | ||
3508 | 17 | 'packages': [ | 24 | 'packages': [ |
3509 | 18 | "charmhelpers", | 25 | "charmhelpers", |
3510 | 19 | "charmhelpers.cli", | 26 | "charmhelpers.cli", |
3511 | @@ -22,13 +29,27 @@ | |||
3512 | 22 | "charmhelpers.fetch", | 29 | "charmhelpers.fetch", |
3513 | 23 | "charmhelpers.payload", | 30 | "charmhelpers.payload", |
3514 | 24 | "charmhelpers.contrib", | 31 | "charmhelpers.contrib", |
3515 | 32 | "charmhelpers.contrib.amulet", | ||
3516 | 25 | "charmhelpers.contrib.ansible", | 33 | "charmhelpers.contrib.ansible", |
3517 | 26 | "charmhelpers.contrib.benchmark", | 34 | "charmhelpers.contrib.benchmark", |
3518 | 27 | "charmhelpers.contrib.charmhelpers", | 35 | "charmhelpers.contrib.charmhelpers", |
3519 | 28 | "charmhelpers.contrib.charmsupport", | 36 | "charmhelpers.contrib.charmsupport", |
3520 | 37 | "charmhelpers.contrib.database", | ||
3521 | 38 | "charmhelpers.contrib.hahelpers", | ||
3522 | 39 | "charmhelpers.contrib.network", | ||
3523 | 40 | "charmhelpers.contrib.network.ovs", | ||
3524 | 41 | "charmhelpers.contrib.openstack", | ||
3525 | 42 | "charmhelpers.contrib.openstack.amulet", | ||
3526 | 43 | "charmhelpers.contrib.openstack.files", | ||
3527 | 44 | "charmhelpers.contrib.openstack.templates", | ||
3528 | 45 | "charmhelpers.contrib.peerstorage", | ||
3529 | 46 | "charmhelpers.contrib.python", | ||
3530 | 29 | "charmhelpers.contrib.saltstack", | 47 | "charmhelpers.contrib.saltstack", |
3532 | 30 | "charmhelpers.contrib.hahelpers", | 48 | "charmhelpers.contrib.ssl", |
3533 | 49 | "charmhelpers.contrib.storage", | ||
3534 | 50 | "charmhelpers.contrib.storage.linux", | ||
3535 | 31 | "charmhelpers.contrib.templating", | 51 | "charmhelpers.contrib.templating", |
3536 | 52 | "charmhelpers.contrib.unison", | ||
3537 | 32 | ], | 53 | ], |
3538 | 33 | 'scripts': [ | 54 | 'scripts': [ |
3539 | 34 | "bin/chlp", | 55 | "bin/chlp", |
3540 | 35 | 56 | ||
3541 | === modified file 'test_requirements.txt' | |||
3542 | --- test_requirements.txt 2014-11-25 15:07:02 +0000 | |||
3543 | +++ test_requirements.txt 2015-08-13 08:33:21 +0000 | |||
3544 | @@ -3,10 +3,12 @@ | |||
3545 | 3 | pip | 3 | pip |
3546 | 4 | distribute | 4 | distribute |
3547 | 5 | coverage>=3.6 | 5 | coverage>=3.6 |
3549 | 6 | mock>=1.0.1 | 6 | mock>=1.0.1,<1.1.0 |
3550 | 7 | nose>=1.3.1 | 7 | nose>=1.3.1 |
3551 | 8 | flake8 | 8 | flake8 |
3552 | 9 | testtools==0.9.14 # Before dependent on modern 'six' | 9 | testtools==0.9.14 # Before dependent on modern 'six' |
3553 | 10 | amulet | ||
3554 | 11 | distro-info | ||
3555 | 10 | # | 12 | # |
3556 | 11 | # Specify precise versions of runtime dependencies where possible. | 13 | # Specify precise versions of runtime dependencies where possible. |
3557 | 12 | netaddr==0.7.10 # trusty. precise is 0.7.5, but not in pypi. | 14 | netaddr==0.7.10 # trusty. precise is 0.7.5, but not in pypi. |
3558 | 13 | 15 | ||
3559 | === modified file 'tests/cli/test_cmdline.py' | |||
3560 | --- tests/cli/test_cmdline.py 2014-11-25 15:04:52 +0000 | |||
3561 | +++ tests/cli/test_cmdline.py 2015-08-13 08:33:21 +0000 | |||
3562 | @@ -5,6 +5,7 @@ | |||
3563 | 5 | from mock import ( | 5 | from mock import ( |
3564 | 6 | patch, | 6 | patch, |
3565 | 7 | MagicMock, | 7 | MagicMock, |
3566 | 8 | ANY, | ||
3567 | 8 | ) | 9 | ) |
3568 | 9 | import json | 10 | import json |
3569 | 10 | from pprint import pformat | 11 | from pprint import pformat |
3570 | @@ -87,15 +88,61 @@ | |||
3571 | 87 | @self.cl.subcommand() | 88 | @self.cl.subcommand() |
3572 | 88 | def bar(x, y=None, *vargs): | 89 | def bar(x, y=None, *vargs): |
3573 | 89 | "A function that does work." | 90 | "A function that does work." |
3583 | 90 | self.bar_called = True | 91 | self.assertEqual(x, 'baz') |
3584 | 91 | return "qux" | 92 | self.assertEqual(y, 'why') |
3585 | 92 | 93 | self.assertEqual(vargs, ('mux', 'zob')) | |
3586 | 93 | args = ['foo', 'bar', 'baz'] | 94 | self.bar_called = True |
3587 | 94 | self.cl.formatter = MagicMock() | 95 | return "qux" |
3588 | 95 | with patch("sys.argv", args): | 96 | |
3589 | 96 | self.cl.run() | 97 | args = ['chlp', 'bar', '--y', 'why', 'baz', 'mux', 'zob'] |
3590 | 97 | self.assertTrue(self.bar_called) | 98 | self.cl.formatter = MagicMock() |
3591 | 98 | self.assertTrue(self.cl.formatter.format_output.called) | 99 | with patch("sys.argv", args): |
3592 | 100 | with patch("charmhelpers.core.unitdata._KV") as _KV: | ||
3593 | 101 | self.cl.run() | ||
3594 | 102 | assert _KV.flush.called | ||
3595 | 103 | self.assertTrue(self.bar_called) | ||
3596 | 104 | self.cl.formatter.format_output.assert_called_once_with('qux', ANY) | ||
3597 | 105 | |||
3598 | 106 | def test_no_output(self): | ||
3599 | 107 | self.bar_called = False | ||
3600 | 108 | |||
3601 | 109 | @self.cl.subcommand() | ||
3602 | 110 | @self.cl.no_output | ||
3603 | 111 | def bar(x, y=None, *vargs): | ||
3604 | 112 | "A function that does work." | ||
3605 | 113 | self.bar_called = True | ||
3606 | 114 | return "qux" | ||
3607 | 115 | |||
3608 | 116 | args = ['foo', 'bar', 'baz'] | ||
3609 | 117 | self.cl.formatter = MagicMock() | ||
3610 | 118 | with patch("sys.argv", args): | ||
3611 | 119 | self.cl.run() | ||
3612 | 120 | self.assertTrue(self.bar_called) | ||
3613 | 121 | self.cl.formatter.format_output.assert_called_once_with('', ANY) | ||
3614 | 122 | |||
3615 | 123 | def test_test_command(self): | ||
3616 | 124 | self.bar_called = False | ||
3617 | 125 | self.bar_result = True | ||
3618 | 126 | |||
3619 | 127 | @self.cl.subcommand() | ||
3620 | 128 | @self.cl.test_command | ||
3621 | 129 | def bar(x, y=None, *vargs): | ||
3622 | 130 | "A function that does work." | ||
3623 | 131 | self.bar_called = True | ||
3624 | 132 | return self.bar_result | ||
3625 | 133 | |||
3626 | 134 | args = ['foo', 'bar', 'baz'] | ||
3627 | 135 | self.cl.formatter = MagicMock() | ||
3628 | 136 | with patch("sys.argv", args): | ||
3629 | 137 | self.cl.run() | ||
3630 | 138 | self.assertTrue(self.bar_called) | ||
3631 | 139 | self.assertEqual(self.cl.exit_code, 0) | ||
3632 | 140 | self.cl.formatter.format_output.assert_called_once_with('', ANY) | ||
3633 | 141 | |||
3634 | 142 | self.bar_result = False | ||
3635 | 143 | with patch("sys.argv", args): | ||
3636 | 144 | self.cl.run() | ||
3637 | 145 | self.assertEqual(self.cl.exit_code, 1) | ||
3638 | 99 | 146 | ||
3639 | 100 | 147 | ||
3640 | 101 | class OutputFormatterTest(TestCase): | 148 | class OutputFormatterTest(TestCase): |
3641 | 102 | 149 | ||
3642 | === added directory 'tests/contrib/amulet' | |||
3643 | === added file 'tests/contrib/amulet/test_utils.py' | |||
3644 | --- tests/contrib/amulet/test_utils.py 1970-01-01 00:00:00 +0000 | |||
3645 | +++ tests/contrib/amulet/test_utils.py 2015-08-13 08:33:21 +0000 | |||
3646 | @@ -0,0 +1,105 @@ | |||
3647 | 1 | # Copyright 2015 Canonical Ltd. | ||
3648 | 2 | # | ||
3649 | 3 | # Authors: | ||
3650 | 4 | # Adam Collard <adam.collard@canonical.com> | ||
3651 | 5 | |||
3652 | 6 | import unittest | ||
3653 | 7 | |||
3654 | 8 | from charmhelpers.contrib.amulet.utils import AmuletUtils | ||
3655 | 9 | |||
3656 | 10 | |||
3657 | 11 | class FakeSentry(object): | ||
3658 | 12 | |||
3659 | 13 | commands = {} | ||
3660 | 14 | |||
3661 | 15 | info = {"unit_name": "foo"} | ||
3662 | 16 | |||
3663 | 17 | def run(self, command): | ||
3664 | 18 | return self.commands[command] | ||
3665 | 19 | |||
3666 | 20 | |||
3667 | 21 | class ValidateServicesByNameTestCase(unittest.TestCase): | ||
3668 | 22 | |||
3669 | 23 | def setUp(self): | ||
3670 | 24 | self.utils = AmuletUtils() | ||
3671 | 25 | self.sentry_unit = FakeSentry() | ||
3672 | 26 | |||
3673 | 27 | def test_errors_for_unknown_upstart_service(self): | ||
3674 | 28 | """ | ||
3675 | 29 | Returns a message if the Upstart service is unknown. | ||
3676 | 30 | """ | ||
3677 | 31 | self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 | ||
3678 | 32 | self.sentry_unit.commands["sudo status foo"] = ( | ||
3679 | 33 | "status: Unknown job: foo", 1) | ||
3680 | 34 | |||
3681 | 35 | result = self.utils.validate_services_by_name( | ||
3682 | 36 | {self.sentry_unit: ["foo"]}) | ||
3683 | 37 | self.assertIsNotNone(result) | ||
3684 | 38 | |||
3685 | 39 | def test_none_for_started_upstart_service(self): | ||
3686 | 40 | """ | ||
3687 | 41 | Returns None if the Upstart service is running. | ||
3688 | 42 | """ | ||
3689 | 43 | self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 | ||
3690 | 44 | self.sentry_unit.commands["sudo status foo"] = ( | ||
3691 | 45 | "foo start/running, process 42", 0) | ||
3692 | 46 | |||
3693 | 47 | result = self.utils.validate_services_by_name( | ||
3694 | 48 | {self.sentry_unit: ["foo"]}) | ||
3695 | 49 | self.assertIsNone(result) | ||
3696 | 50 | |||
3697 | 51 | def test_errors_for_stopped_upstart_service(self): | ||
3698 | 52 | """ | ||
3699 | 53 | Returns a message if the Upstart service is stopped. | ||
3700 | 54 | """ | ||
3701 | 55 | self.sentry_unit.commands["lsb_release -cs"] = "trusty", 0 | ||
3702 | 56 | self.sentry_unit.commands["sudo status foo"] = "foo stop/waiting", 0 | ||
3703 | 57 | |||
3704 | 58 | result = self.utils.validate_services_by_name( | ||
3705 | 59 | {self.sentry_unit: ["foo"]}) | ||
3706 | 60 | self.assertIsNotNone(result) | ||
3707 | 61 | |||
3708 | 62 | def test_errors_for_unknown_systemd_service(self): | ||
3709 | 63 | """ | ||
3710 | 64 | Returns a message if a systemd service is unknown. | ||
3711 | 65 | """ | ||
3712 | 66 | self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 | ||
3713 | 67 | self.sentry_unit.commands["sudo service foo status"] = (u"""\ | ||
3714 | 68 | \u25cf foo.service | ||
3715 | 69 | Loaded: not-found (Reason: No such file or directory) | ||
3716 | 70 | Active: inactive (dead) | ||
3717 | 71 | """, 3) | ||
3718 | 72 | |||
3719 | 73 | result = self.utils.validate_services_by_name({ | ||
3720 | 74 | self.sentry_unit: ["foo"]}) | ||
3721 | 75 | self.assertIsNotNone(result) | ||
3722 | 76 | |||
3723 | 77 | def test_none_for_started_systemd_service(self): | ||
3724 | 78 | """ | ||
3725 | 79 | Returns None if a systemd service is running. | ||
3726 | 80 | """ | ||
3727 | 81 | self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 | ||
3728 | 82 | self.sentry_unit.commands["sudo service foo status"] = (u"""\ | ||
3729 | 83 | \u25cf foo.service - Foo | ||
3730 | 84 | Loaded: loaded (/lib/systemd/system/foo.service; enabled) | ||
3731 | 85 | Active: active (exited) since Thu 1970-01-01 00:00:00 UTC; 42h 42min ago | ||
3732 | 86 | Main PID: 3 (code=exited, status=0/SUCCESS) | ||
3733 | 87 | CGroup: /system.slice/foo.service | ||
3734 | 88 | """, 0) | ||
3735 | 89 | result = self.utils.validate_services_by_name( | ||
3736 | 90 | {self.sentry_unit: ["foo"]}) | ||
3737 | 91 | self.assertIsNone(result) | ||
3738 | 92 | |||
3739 | 93 | def test_errors_for_stopped_systemd_service(self): | ||
3740 | 94 | """ | ||
3741 | 95 | Returns a message if a systemd service is stopped. | ||
3742 | 96 | """ | ||
3743 | 97 | self.sentry_unit.commands["lsb_release -cs"] = "vivid", 0 | ||
3744 | 98 | self.sentry_unit.commands["sudo service foo status"] = (u"""\ | ||
3745 | 99 | \u25cf foo.service - Foo | ||
3746 | 100 | Loaded: loaded (/lib/systemd/system/foo.service; disabled) | ||
3747 | 101 | Active: inactive (dead) | ||
3748 | 102 | """, 3) | ||
3749 | 103 | result = self.utils.validate_services_by_name( | ||
3750 | 104 | {self.sentry_unit: ["foo"]}) | ||
3751 | 105 | self.assertIsNotNone(result) | ||
3752 | 0 | 106 | ||
3753 | === modified file 'tests/contrib/benchmark/test_benchmark.py' | |||
3754 | --- tests/contrib/benchmark/test_benchmark.py 2015-04-24 16:07:29 +0000 | |||
3755 | +++ tests/contrib/benchmark/test_benchmark.py 2015-08-13 08:33:21 +0000 | |||
3756 | @@ -1,3 +1,8 @@ | |||
3757 | 1 | from functools import partial | ||
3758 | 2 | from os.path import join | ||
3759 | 3 | from tempfile import mkdtemp | ||
3760 | 4 | from shutil import rmtree | ||
3761 | 5 | |||
3762 | 1 | import mock | 6 | import mock |
3763 | 2 | from testtools import TestCase | 7 | from testtools import TestCase |
3764 | 3 | # import unittest | 8 | # import unittest |
3765 | @@ -33,7 +38,8 @@ | |||
3766 | 33 | self.fake_relation = FakeRelation(FAKE_RELATION) | 38 | self.fake_relation = FakeRelation(FAKE_RELATION) |
3767 | 34 | # self.hook_name.return_value = 'benchmark-relation-changed' | 39 | # self.hook_name.return_value = 'benchmark-relation-changed' |
3768 | 35 | 40 | ||
3770 | 36 | self.relation_get.side_effect = self.fake_relation.get | 41 | self.relation_get.side_effect = partial( |
3771 | 42 | self.fake_relation.get, rid="benchmark:0", unit="benchmark/0") | ||
3772 | 37 | self.relation_ids.side_effect = self.fake_relation.relation_ids | 43 | self.relation_ids.side_effect = self.fake_relation.relation_ids |
3773 | 38 | 44 | ||
3774 | 39 | def _patch(self, method): | 45 | def _patch(self, method): |
3775 | @@ -87,34 +93,32 @@ | |||
3776 | 87 | check_call.assert_any_call(['action-set', 'baz.foo=1']) | 93 | check_call.assert_any_call(['action-set', 'baz.foo=1']) |
3777 | 88 | check_call.assert_any_call(['action-set', 'baz.bar=2']) | 94 | check_call.assert_any_call(['action-set', 'baz.bar=2']) |
3778 | 89 | 95 | ||
3779 | 90 | @mock.patch('charmhelpers.contrib.benchmark.relation_get') | ||
3780 | 91 | @mock.patch('charmhelpers.contrib.benchmark.relation_set') | ||
3781 | 92 | @mock.patch('charmhelpers.contrib.benchmark.relation_ids') | 96 | @mock.patch('charmhelpers.contrib.benchmark.relation_ids') |
3782 | 93 | @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook') | 97 | @mock.patch('charmhelpers.contrib.benchmark.in_relation_hook') |
3784 | 94 | def test_benchmark_init(self, in_relation_hook, relation_ids, relation_set, relation_get): | 98 | def test_benchmark_init(self, in_relation_hook, relation_ids): |
3785 | 95 | 99 | ||
3786 | 96 | in_relation_hook.return_value = True | 100 | in_relation_hook.return_value = True |
3787 | 97 | relation_ids.return_value = ['benchmark:0'] | 101 | relation_ids.return_value = ['benchmark:0'] |
3788 | 98 | actions = ['asdf', 'foobar'] | 102 | actions = ['asdf', 'foobar'] |
3789 | 99 | 103 | ||
3791 | 100 | with patch_open() as (_open, _file): | 104 | tempdir = mkdtemp(prefix=self.__class__.__name__) |
3792 | 105 | self.addCleanup(rmtree, tempdir) | ||
3793 | 106 | conf_path = join(tempdir, "benchmark.conf") | ||
3794 | 107 | with mock.patch.object(Benchmark, "BENCHMARK_CONF", conf_path): | ||
3795 | 101 | b = Benchmark(actions) | 108 | b = Benchmark(actions) |
3796 | 102 | 109 | ||
3797 | 103 | self.assertIsInstance(b, Benchmark) | 110 | self.assertIsInstance(b, Benchmark) |
3798 | 104 | 111 | ||
3801 | 105 | self.assertTrue(relation_get.called) | 112 | self.assertTrue(self.relation_get.called) |
3802 | 106 | self.assertTrue(relation_set.called) | 113 | self.assertTrue(self.relation_set.called) |
3803 | 107 | 114 | ||
3804 | 108 | relation_ids.assert_called_once_with('benchmark') | 115 | relation_ids.assert_called_once_with('benchmark') |
3805 | 109 | 116 | ||
3810 | 110 | for key in b.required_keys: | 117 | self.relation_set.assert_called_once_with( |
3807 | 111 | relation_get.assert_any_call(key) | ||
3808 | 112 | |||
3809 | 113 | relation_set.assert_called_once_with( | ||
3811 | 114 | relation_id='benchmark:0', | 118 | relation_id='benchmark:0', |
3812 | 115 | relation_settings={'benchmarks': ",".join(actions)} | 119 | relation_settings={'benchmarks': ",".join(actions)} |
3813 | 116 | ) | 120 | ) |
3814 | 117 | 121 | ||
3816 | 118 | _open.assert_called_with('/etc/benchmark.conf', 'w') | 122 | conf_contents = open(conf_path).readlines() |
3817 | 119 | for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()): | 123 | for key, val in iter(FAKE_RELATION['benchmark:0']['benchmark/0'].items()): |
3819 | 120 | _file.write.assert_any_called("%s=%s\n" % (key, val)) | 124 | self.assertIn("%s=%s\n" % (key, val), conf_contents) |
3820 | 121 | 125 | ||
3821 | === modified file 'tests/contrib/hahelpers/test_apache_utils.py' | |||
3822 | --- tests/contrib/hahelpers/test_apache_utils.py 2014-09-24 09:42:52 +0000 | |||
3823 | +++ tests/contrib/hahelpers/test_apache_utils.py 2015-08-13 08:33:21 +0000 | |||
3824 | @@ -115,4 +115,4 @@ | |||
3825 | 115 | apache_utils.install_ca_cert(cert) | 115 | apache_utils.install_ca_cert(cert) |
3826 | 116 | _open.assert_called_with('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', 'w') | 116 | _open.assert_called_with('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', 'w') |
3827 | 117 | _file.write.assert_called_with(cert) | 117 | _file.write.assert_called_with(cert) |
3829 | 118 | self.subprocess.assertCalledWith(['update-ca-certificates', '--fresh']) | 118 | self.subprocess.check_call.assert_called_with(['update-ca-certificates', '--fresh']) |
3830 | 119 | 119 | ||
3831 | === modified file 'tests/contrib/network/test_ufw.py' | |||
3832 | --- tests/contrib/network/test_ufw.py 2015-02-12 20:08:28 +0000 | |||
3833 | +++ tests/contrib/network/test_ufw.py 2015-08-13 08:33:21 +0000 | |||
3834 | @@ -31,6 +31,12 @@ | |||
3835 | 31 | xt_LOG 17702 0 | 31 | xt_LOG 17702 0 |
3836 | 32 | xt_limit 12711 0 | 32 | xt_limit 12711 0 |
3837 | 33 | """ | 33 | """ |
3838 | 34 | DEFAULT_POLICY_OUTPUT = """Default incoming policy changed to 'deny' | ||
3839 | 35 | (be sure to update your rules accordingly) | ||
3840 | 36 | """ | ||
3841 | 37 | DEFAULT_POLICY_OUTPUT_OUTGOING = """Default outgoing policy changed to 'allow' | ||
3842 | 38 | (be sure to update your rules accordingly) | ||
3843 | 39 | """ | ||
3844 | 34 | 40 | ||
3845 | 35 | 41 | ||
3846 | 36 | class TestUFW(unittest.TestCase): | 42 | class TestUFW(unittest.TestCase): |
3847 | @@ -194,6 +200,24 @@ | |||
3848 | 194 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') | 200 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') |
3849 | 195 | @mock.patch('charmhelpers.core.hookenv.log') | 201 | @mock.patch('charmhelpers.core.hookenv.log') |
3850 | 196 | @mock.patch('subprocess.Popen') | 202 | @mock.patch('subprocess.Popen') |
3851 | 203 | def test_modify_access_with_index(self, popen, log, is_enabled): | ||
3852 | 204 | is_enabled.return_value = True | ||
3853 | 205 | p = mock.Mock() | ||
3854 | 206 | p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), | ||
3855 | 207 | 'returncode': 0}) | ||
3856 | 208 | popen.return_value = p | ||
3857 | 209 | |||
3858 | 210 | ufw.modify_access('127.0.0.1', dst='127.0.0.1', port='80', index=1) | ||
3859 | 211 | popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from', | ||
3860 | 212 | '127.0.0.1', 'to', '127.0.0.1', 'port', '80'], | ||
3861 | 213 | stdout=subprocess.PIPE) | ||
3862 | 214 | log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 ' | ||
3863 | 215 | 'to 127.0.0.1 port 80'), level='DEBUG') | ||
3864 | 216 | log.assert_any_call('stdout', level='INFO') | ||
3865 | 217 | |||
3866 | 218 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') | ||
3867 | 219 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3868 | 220 | @mock.patch('subprocess.Popen') | ||
3869 | 197 | def test_grant_access(self, popen, log, is_enabled): | 221 | def test_grant_access(self, popen, log, is_enabled): |
3870 | 198 | is_enabled.return_value = True | 222 | is_enabled.return_value = True |
3871 | 199 | p = mock.Mock() | 223 | p = mock.Mock() |
3872 | @@ -212,6 +236,24 @@ | |||
3873 | 212 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') | 236 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') |
3874 | 213 | @mock.patch('charmhelpers.core.hookenv.log') | 237 | @mock.patch('charmhelpers.core.hookenv.log') |
3875 | 214 | @mock.patch('subprocess.Popen') | 238 | @mock.patch('subprocess.Popen') |
3876 | 239 | def test_grant_access_with_index(self, popen, log, is_enabled): | ||
3877 | 240 | is_enabled.return_value = True | ||
3878 | 241 | p = mock.Mock() | ||
3879 | 242 | p.configure_mock(**{'communicate.return_value': ('stdout', 'stderr'), | ||
3880 | 243 | 'returncode': 0}) | ||
3881 | 244 | popen.return_value = p | ||
3882 | 245 | |||
3883 | 246 | ufw.grant_access('127.0.0.1', dst='127.0.0.1', port='80', index=1) | ||
3884 | 247 | popen.assert_any_call(['ufw', 'insert', '1', 'allow', 'from', | ||
3885 | 248 | '127.0.0.1', 'to', '127.0.0.1', 'port', '80'], | ||
3886 | 249 | stdout=subprocess.PIPE) | ||
3887 | 250 | log.assert_any_call(('ufw allow: ufw insert 1 allow from 127.0.0.1 ' | ||
3888 | 251 | 'to 127.0.0.1 port 80'), level='DEBUG') | ||
3889 | 252 | log.assert_any_call('stdout', level='INFO') | ||
3890 | 253 | |||
3891 | 254 | @mock.patch('charmhelpers.contrib.network.ufw.is_enabled') | ||
3892 | 255 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3893 | 256 | @mock.patch('subprocess.Popen') | ||
3894 | 215 | def test_revoke_access(self, popen, log, is_enabled): | 257 | def test_revoke_access(self, popen, log, is_enabled): |
3895 | 216 | is_enabled.return_value = True | 258 | is_enabled.return_value = True |
3896 | 217 | p = mock.Mock() | 259 | p = mock.Mock() |
3897 | @@ -366,3 +408,33 @@ | |||
3898 | 366 | is_enabled.return_value = False | 408 | is_enabled.return_value = False |
3899 | 367 | isdir.return_value = True | 409 | isdir.return_value = True |
3900 | 368 | ufw.enable() | 410 | ufw.enable() |
3901 | 411 | |||
3902 | 412 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3903 | 413 | @mock.patch('subprocess.check_output') | ||
3904 | 414 | def test_change_default_policy(self, check_output, log): | ||
3905 | 415 | check_output.return_value = DEFAULT_POLICY_OUTPUT | ||
3906 | 416 | self.assertTrue(ufw.default_policy()) | ||
3907 | 417 | check_output.asser_any_call(['ufw', 'default', 'deny', 'incoming']) | ||
3908 | 418 | |||
3909 | 419 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3910 | 420 | @mock.patch('subprocess.check_output') | ||
3911 | 421 | def test_change_default_policy_allow_outgoing(self, check_output, log): | ||
3912 | 422 | check_output.return_value = DEFAULT_POLICY_OUTPUT_OUTGOING | ||
3913 | 423 | self.assertTrue(ufw.default_policy('allow', 'outgoing')) | ||
3914 | 424 | check_output.asser_any_call(['ufw', 'default', 'allow', 'outgoing']) | ||
3915 | 425 | |||
3916 | 426 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3917 | 427 | @mock.patch('subprocess.check_output') | ||
3918 | 428 | def test_change_default_policy_unexpected_output(self, check_output, log): | ||
3919 | 429 | check_output.return_value = "asdf" | ||
3920 | 430 | self.assertFalse(ufw.default_policy()) | ||
3921 | 431 | |||
3922 | 432 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3923 | 433 | @mock.patch('subprocess.check_output') | ||
3924 | 434 | def test_change_default_policy_wrong_policy(self, check_output, log): | ||
3925 | 435 | self.assertRaises(ufw.UFWError, ufw.default_policy, 'asdf') | ||
3926 | 436 | |||
3927 | 437 | @mock.patch('charmhelpers.core.hookenv.log') | ||
3928 | 438 | @mock.patch('subprocess.check_output') | ||
3929 | 439 | def test_change_default_policy_wrong_direction(self, check_output, log): | ||
3930 | 440 | self.assertRaises(ufw.UFWError, ufw.default_policy, 'allow', 'asdf') | ||
3931 | 369 | 441 | ||
3932 | === modified file 'tests/contrib/openstack/test_openstack_utils.py' | |||
3933 | --- tests/contrib/openstack/test_openstack_utils.py 2015-05-11 18:53:44 +0000 | |||
3934 | +++ tests/contrib/openstack/test_openstack_utils.py 2015-08-13 08:33:21 +0000 | |||
3935 | @@ -26,6 +26,16 @@ | |||
3936 | 26 | } | 26 | } |
3937 | 27 | 27 | ||
3938 | 28 | FAKE_REPO = { | 28 | FAKE_REPO = { |
3939 | 29 | 'neutron-common': { | ||
3940 | 30 | 'pkg_vers': '2:7.0.0-0ubuntu1', | ||
3941 | 31 | 'os_release': 'liberty', | ||
3942 | 32 | 'os_version': '2015.2' | ||
3943 | 33 | }, | ||
3944 | 34 | 'nova-common': { | ||
3945 | 35 | 'pkg_vers': '2:12.0.0~b1-0ubuntu1', | ||
3946 | 36 | 'os_release': 'liberty', | ||
3947 | 37 | 'os_version': '2015.2' | ||
3948 | 38 | }, | ||
3949 | 29 | 'nova-common': { | 39 | 'nova-common': { |
3950 | 30 | 'pkg_vers': '2012.2.3-0ubuntu2.1', | 40 | 'pkg_vers': '2012.2.3-0ubuntu2.1', |
3951 | 31 | 'os_release': 'folsom', | 41 | 'os_release': 'folsom', |
3952 | @@ -489,8 +499,8 @@ | |||
3953 | 489 | expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc' | 499 | expected_f = '/var/lib/juju/units/testing-foo-0/charm/scripts/scriptrc' |
3954 | 490 | _open.assert_called_with(expected_f, 'wb') | 500 | _open.assert_called_with(expected_f, 'wb') |
3955 | 491 | _mkdir.assert_called_with(os.path.dirname(expected_f)) | 501 | _mkdir.assert_called_with(os.path.dirname(expected_f)) |
3958 | 492 | for line in scriptrc: | 502 | _file.__enter__().write.assert_has_calls( |
3959 | 493 | _file.__enter__().write.assert_has_calls(call(line)) | 503 | list(call(line) for line in scriptrc), any_order=True) |
3960 | 494 | 504 | ||
3961 | 495 | @patch.object(openstack, 'lsb_release') | 505 | @patch.object(openstack, 'lsb_release') |
3962 | 496 | @patch.object(openstack, 'get_os_version_package') | 506 | @patch.object(openstack, 'get_os_version_package') |
3963 | @@ -642,11 +652,13 @@ | |||
3964 | 642 | error_out.assert_called_with( | 652 | error_out.assert_called_with( |
3965 | 643 | 'openstack-origin-git key \'%s\' is missing' % key) | 653 | 'openstack-origin-git key \'%s\' is missing' % key) |
3966 | 644 | 654 | ||
3967 | 655 | @patch('os.path.join') | ||
3968 | 645 | @patch.object(openstack, 'error_out') | 656 | @patch.object(openstack, 'error_out') |
3969 | 646 | @patch.object(openstack, '_git_clone_and_install_single') | 657 | @patch.object(openstack, '_git_clone_and_install_single') |
3970 | 658 | @patch.object(openstack, 'pip_install') | ||
3971 | 647 | @patch.object(openstack, 'pip_create_virtualenv') | 659 | @patch.object(openstack, 'pip_create_virtualenv') |
3974 | 648 | def test_git_clone_and_install_errors(self, pip_venv, git_install_single, | 660 | def test_git_clone_and_install_errors(self, pip_venv, pip_install, |
3975 | 649 | error_out): | 661 | git_install_single, error_out, join): |
3976 | 650 | git_missing_repos = """ | 662 | git_missing_repos = """ |
3977 | 651 | repostories: | 663 | repostories: |
3978 | 652 | - {name: requirements, | 664 | - {name: requirements, |
3979 | @@ -704,19 +716,26 @@ | |||
3980 | 704 | openstack.git_clone_and_install(git_wrong_order_2, 'keystone', depth=1) | 716 | openstack.git_clone_and_install(git_wrong_order_2, 'keystone', depth=1) |
3981 | 705 | error_out.assert_called_with('requirements git repo must be specified first') | 717 | error_out.assert_called_with('requirements git repo must be specified first') |
3982 | 706 | 718 | ||
3983 | 719 | @patch('os.path.join') | ||
3984 | 707 | @patch.object(openstack, 'charm_dir') | 720 | @patch.object(openstack, 'charm_dir') |
3985 | 708 | @patch.object(openstack, 'error_out') | 721 | @patch.object(openstack, 'error_out') |
3986 | 709 | @patch.object(openstack, '_git_clone_and_install_single') | 722 | @patch.object(openstack, '_git_clone_and_install_single') |
3987 | 723 | @patch.object(openstack, 'pip_install') | ||
3988 | 710 | @patch.object(openstack, 'pip_create_virtualenv') | 724 | @patch.object(openstack, 'pip_create_virtualenv') |
3991 | 711 | def test_git_clone_and_install_success(self, pip_venv, _git_install_single, | 725 | def test_git_clone_and_install_success(self, pip_venv, pip_install, |
3992 | 712 | error_out, charm_dir): | 726 | _git_install_single, error_out, |
3993 | 727 | charm_dir, join): | ||
3994 | 713 | proj = 'keystone' | 728 | proj = 'keystone' |
3995 | 714 | charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm' | 729 | charm_dir.return_value = '/var/lib/juju/units/testing-foo-0/charm' |
3996 | 715 | # the following sets the global requirements_dir | 730 | # the following sets the global requirements_dir |
3997 | 716 | _git_install_single.return_value = '/mnt/openstack-git/requirements' | 731 | _git_install_single.return_value = '/mnt/openstack-git/requirements' |
3998 | 732 | join.return_value = '/mnt/openstack-git/venv' | ||
3999 | 717 | 733 | ||
4000 | 718 | openstack.git_clone_and_install(openstack_origin_git, proj, depth=1) | 734 | openstack.git_clone_and_install(openstack_origin_git, proj, depth=1) |
4001 | 719 | self.assertTrue(pip_venv.called) | 735 | self.assertTrue(pip_venv.called) |
4002 | 736 | pip_install.assert_called_with('setuptools', upgrade=True, | ||
4003 | 737 | proxy=None, | ||
4004 | 738 | venv='/mnt/openstack-git/venv') | ||
4005 | 720 | self.assertTrue(_git_install_single.call_count == 2) | 739 | self.assertTrue(_git_install_single.call_count == 2) |
4006 | 721 | expected = [ | 740 | expected = [ |
4007 | 722 | call('git://git.openstack.org/openstack/requirements', | 741 | call('git://git.openstack.org/openstack/requirements', |
4008 | @@ -775,6 +794,7 @@ | |||
4009 | 775 | parent_dir = '/mnt/openstack-git/' | 794 | parent_dir = '/mnt/openstack-git/' |
4010 | 776 | http_proxy = 'http://squid-proxy-url' | 795 | http_proxy = 'http://squid-proxy-url' |
4011 | 777 | dest_dir = '/mnt/openstack-git' | 796 | dest_dir = '/mnt/openstack-git' |
4012 | 797 | venv_dir = '/mnt/openstack-git' | ||
4013 | 778 | reqs_dir = '/mnt/openstack-git/requirements-dir' | 798 | reqs_dir = '/mnt/openstack-git/requirements-dir' |
4014 | 779 | join.return_value = dest_dir | 799 | join.return_value = dest_dir |
4015 | 780 | openstack.requirements_dir = reqs_dir | 800 | openstack.requirements_dir = reqs_dir |
4016 | @@ -786,23 +806,27 @@ | |||
4017 | 786 | mkdir.assert_called_with(parent_dir) | 806 | mkdir.assert_called_with(parent_dir) |
4018 | 787 | install_remote.assert_called_with(repo, dest=parent_dir, depth=1, | 807 | install_remote.assert_called_with(repo, dest=parent_dir, depth=1, |
4019 | 788 | branch=branch) | 808 | branch=branch) |
4021 | 789 | _git_update_reqs.assert_called_with(dest_dir, reqs_dir) | 809 | _git_update_reqs.assert_called_with(venv_dir, dest_dir, reqs_dir) |
4022 | 790 | pip_install.assert_called_with(dest_dir, venv='/mnt/openstack-git', | 810 | pip_install.assert_called_with(dest_dir, venv='/mnt/openstack-git', |
4023 | 791 | proxy='http://squid-proxy-url') | 811 | proxy='http://squid-proxy-url') |
4024 | 792 | 812 | ||
4025 | 813 | @patch('os.path.join') | ||
4026 | 793 | @patch('os.getcwd') | 814 | @patch('os.getcwd') |
4027 | 794 | @patch('os.chdir') | 815 | @patch('os.chdir') |
4028 | 795 | @patch('subprocess.check_call') | 816 | @patch('subprocess.check_call') |
4030 | 796 | def test_git_update_requirements(self, check_call, chdir, getcwd): | 817 | def test_git_update_requirements(self, check_call, chdir, getcwd, join): |
4031 | 797 | pkg_dir = '/mnt/openstack-git/repo-dir' | 818 | pkg_dir = '/mnt/openstack-git/repo-dir' |
4032 | 798 | reqs_dir = '/mnt/openstack-git/reqs-dir' | 819 | reqs_dir = '/mnt/openstack-git/reqs-dir' |
4033 | 799 | orig_dir = '/var/lib/juju/units/testing-foo-0/charm' | 820 | orig_dir = '/var/lib/juju/units/testing-foo-0/charm' |
4034 | 821 | venv_dir = '/mnt/openstack-git/venv' | ||
4035 | 800 | getcwd.return_value = orig_dir | 822 | getcwd.return_value = orig_dir |
4036 | 823 | join.return_value = '/mnt/openstack-git/venv/python' | ||
4037 | 801 | 824 | ||
4039 | 802 | openstack._git_update_requirements(pkg_dir, reqs_dir) | 825 | openstack._git_update_requirements(venv_dir, pkg_dir, reqs_dir) |
4040 | 803 | expected = [call(reqs_dir), call(orig_dir)] | 826 | expected = [call(reqs_dir), call(orig_dir)] |
4041 | 804 | self.assertEquals(expected, chdir.call_args_list) | 827 | self.assertEquals(expected, chdir.call_args_list) |
4043 | 805 | check_call.assert_called_with(['python', 'update.py', pkg_dir]) | 828 | check_call.assert_called_with(['/mnt/openstack-git/venv/python', |
4044 | 829 | 'update.py', pkg_dir]) | ||
4045 | 806 | 830 | ||
4046 | 807 | @patch('os.path.join') | 831 | @patch('os.path.join') |
4047 | 808 | @patch('subprocess.check_call') | 832 | @patch('subprocess.check_call') |
4048 | 809 | 833 | ||
4049 | === modified file 'tests/contrib/openstack/test_os_contexts.py' | |||
4050 | --- tests/contrib/openstack/test_os_contexts.py 2015-04-29 12:52:18 +0000 | |||
4051 | +++ tests/contrib/openstack/test_os_contexts.py 2015-08-13 08:33:21 +0000 | |||
4052 | @@ -73,7 +73,11 @@ | |||
4053 | 73 | return None | 73 | return None |
4054 | 74 | 74 | ||
4055 | 75 | def relation_ids(self, relation): | 75 | def relation_ids(self, relation): |
4057 | 76 | return self.relation_data.keys() | 76 | rids = [] |
4058 | 77 | for rid in self.relation_data.keys(): | ||
4059 | 78 | if relation + ':' in rid: | ||
4060 | 79 | rids.append(rid) | ||
4061 | 80 | return rids | ||
4062 | 77 | 81 | ||
4063 | 78 | def relation_units(self, relation_id): | 82 | def relation_units(self, relation_id): |
4064 | 79 | if relation_id not in self.relation_data: | 83 | if relation_id not in self.relation_data: |
4065 | @@ -325,6 +329,25 @@ | |||
4066 | 325 | - [glance-key2, value2] | 329 | - [glance-key2, value2] |
4067 | 326 | """ | 330 | """ |
4068 | 327 | 331 | ||
4069 | 332 | NOVA_SUB_CONFIG1 = """ | ||
4070 | 333 | nova: | ||
4071 | 334 | /etc/nova/nova.conf: | ||
4072 | 335 | sections: | ||
4073 | 336 | DEFAULT: | ||
4074 | 337 | - [nova-key1, value1] | ||
4075 | 338 | - [nova-key2, value2] | ||
4076 | 339 | """ | ||
4077 | 340 | |||
4078 | 341 | |||
4079 | 342 | NOVA_SUB_CONFIG2 = """ | ||
4080 | 343 | nova-compute: | ||
4081 | 344 | /etc/nova/nova.conf: | ||
4082 | 345 | sections: | ||
4083 | 346 | DEFAULT: | ||
4084 | 347 | - [nova-key3, value3] | ||
4085 | 348 | - [nova-key4, value4] | ||
4086 | 349 | """ | ||
4087 | 350 | |||
4088 | 328 | CINDER_SUB_CONFIG1 = """ | 351 | CINDER_SUB_CONFIG1 = """ |
4089 | 329 | cinder: | 352 | cinder: |
4090 | 330 | /etc/cinder/cinder.conf: | 353 | /etc/cinder/cinder.conf: |
4091 | @@ -376,6 +399,21 @@ | |||
4092 | 376 | }, | 399 | }, |
4093 | 377 | } | 400 | } |
4094 | 378 | 401 | ||
4095 | 402 | SUB_CONFIG_RELATION2 = { | ||
4096 | 403 | 'nova-ceilometer:6': { | ||
4097 | 404 | 'ceilometer-agent/0': { | ||
4098 | 405 | 'private-address': 'nova_node1', | ||
4099 | 406 | 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG1)), | ||
4100 | 407 | }, | ||
4101 | 408 | }, | ||
4102 | 409 | 'neutron-plugin:3': { | ||
4103 | 410 | 'neutron-ovs-plugin/0': { | ||
4104 | 411 | 'private-address': 'nova_node1', | ||
4105 | 412 | 'subordinate_configuration': json.dumps(yaml.load(NOVA_SUB_CONFIG2)), | ||
4106 | 413 | }, | ||
4107 | 414 | } | ||
4108 | 415 | } | ||
4109 | 416 | |||
4110 | 379 | NONET_CONFIG = { | 417 | NONET_CONFIG = { |
4111 | 380 | 'vip': 'cinderhost1vip', | 418 | 'vip': 'cinderhost1vip', |
4112 | 381 | 'os-internal-network': None, | 419 | 'os-internal-network': None, |
4113 | @@ -2053,6 +2091,27 @@ | |||
4114 | 2053 | # subordinate supplies bad input | 2091 | # subordinate supplies bad input |
4115 | 2054 | self.assertEquals(foo_sub_ctxt(), {'sections': {}}) | 2092 | self.assertEquals(foo_sub_ctxt(), {'sections': {}}) |
4116 | 2055 | 2093 | ||
4117 | 2094 | def test_os_subordinate_config_context_multiple(self): | ||
4118 | 2095 | relation = FakeRelation(relation_data=SUB_CONFIG_RELATION2) | ||
4119 | 2096 | self.relation_get.side_effect = relation.get | ||
4120 | 2097 | self.relation_ids.side_effect = relation.relation_ids | ||
4121 | 2098 | self.related_units.side_effect = relation.relation_units | ||
4122 | 2099 | nova_sub_ctxt = context.SubordinateConfigContext( | ||
4123 | 2100 | service=['nova', 'nova-compute'], | ||
4124 | 2101 | config_file='/etc/nova/nova.conf', | ||
4125 | 2102 | interface=['nova-ceilometer', 'neutron-plugin'], | ||
4126 | 2103 | ) | ||
4127 | 2104 | self.assertEquals( | ||
4128 | 2105 | nova_sub_ctxt(), | ||
4129 | 2106 | {'sections': { | ||
4130 | 2107 | 'DEFAULT': [ | ||
4131 | 2108 | ['nova-key1', 'value1'], | ||
4132 | 2109 | ['nova-key2', 'value2'], | ||
4133 | 2110 | ['nova-key3', 'value3'], | ||
4134 | 2111 | ['nova-key4', 'value4']] | ||
4135 | 2112 | }} | ||
4136 | 2113 | ) | ||
4137 | 2114 | |||
4138 | 2056 | def test_syslog_context(self): | 2115 | def test_syslog_context(self): |
4139 | 2057 | self.config.side_effect = fake_config({'use-syslog': 'foo'}) | 2116 | self.config.side_effect = fake_config({'use-syslog': 'foo'}) |
4140 | 2058 | syslog = context.SyslogContext() | 2117 | syslog = context.SyslogContext() |
4141 | 2059 | 2118 | ||
4142 | === modified file 'tests/contrib/peerstorage/test_peerstorage.py' | |||
4143 | --- tests/contrib/peerstorage/test_peerstorage.py 2015-06-03 14:46:50 +0000 | |||
4144 | +++ tests/contrib/peerstorage/test_peerstorage.py 2015-08-13 08:33:21 +0000 | |||
4145 | @@ -202,7 +202,7 @@ | |||
4146 | 202 | l_settings = {'s3': 3} | 202 | l_settings = {'s3': 3} |
4147 | 203 | r_settings = {'s1': 1, 's2': 2} | 203 | r_settings = {'s1': 1, 's2': 2} |
4148 | 204 | 204 | ||
4150 | 205 | def mock_relation_get(attribute=None, unit=None): | 205 | def mock_relation_get(attribute=None, unit=None, rid=None): |
4151 | 206 | if attribute: | 206 | if attribute: |
4152 | 207 | if attribute in r_settings: | 207 | if attribute in r_settings: |
4153 | 208 | return r_settings.get(attribute) | 208 | return r_settings.get(attribute) |
4154 | @@ -237,11 +237,11 @@ | |||
4155 | 237 | self.assertEqual(_dicta, _dictb) | 237 | self.assertEqual(_dicta, _dictb) |
4156 | 238 | 238 | ||
4157 | 239 | migration_key = '__leader_get_migrated_settings__' | 239 | migration_key = '__leader_get_migrated_settings__' |
4159 | 240 | self.relation_get.side_effect = mock_relation_get | 240 | self._relation_get.side_effect = mock_relation_get |
4160 | 241 | self._leader_get.side_effect = mock_leader_get | 241 | self._leader_get.side_effect = mock_leader_get |
4161 | 242 | self.leader_set.side_effect = mock_leader_set | 242 | self.leader_set.side_effect = mock_leader_set |
4162 | 243 | 243 | ||
4164 | 244 | self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get()) | 244 | self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get()) |
4165 | 245 | self.assertEqual({'s3': 3}, peerstorage._leader_get()) | 245 | self.assertEqual({'s3': 3}, peerstorage._leader_get()) |
4166 | 246 | self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get()) | 246 | self.assertEqual({'s1': 1, 's2': 2, 's3': 3}, peerstorage.leader_get()) |
4167 | 247 | check_leader_db({'s1': 1, 's2': 2, 's3': 3, | 247 | check_leader_db({'s1': 1, 's2': 2, 's3': 3, |
4168 | @@ -274,7 +274,7 @@ | |||
4169 | 274 | 274 | ||
4170 | 275 | peerstorage.leader_set.reset_mock() | 275 | peerstorage.leader_set.reset_mock() |
4171 | 276 | self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3}, | 276 | self.assertEqual({'s1': 1, 's2': 2, 's3': 2, 's4': 3}, |
4173 | 277 | peerstorage.relation_get()) | 277 | peerstorage._relation_get()) |
4174 | 278 | check_leader_db({'s1': 1, 's3': 3, 's4': 4, | 278 | check_leader_db({'s1': 1, 's3': 3, 's4': 4, |
4175 | 279 | migration_key: '["s1", "s4"]'}, | 279 | migration_key: '["s1", "s4"]'}, |
4176 | 280 | peerstorage._leader_get()) | 280 | peerstorage._leader_get()) |
4177 | @@ -290,7 +290,7 @@ | |||
4178 | 290 | l_settings = {'s3': 3} | 290 | l_settings = {'s3': 3} |
4179 | 291 | r_settings = {'s1': 1, 's2': 2} | 291 | r_settings = {'s1': 1, 's2': 2} |
4180 | 292 | 292 | ||
4182 | 293 | def mock_relation_get(attribute=None, unit=None): | 293 | def mock_relation_get(attribute=None, unit=None, rid=None): |
4183 | 294 | if attribute: | 294 | if attribute: |
4184 | 295 | if attribute in r_settings: | 295 | if attribute in r_settings: |
4185 | 296 | return r_settings.get(attribute) | 296 | return r_settings.get(attribute) |
4186 | @@ -314,10 +314,10 @@ | |||
4187 | 314 | 314 | ||
4188 | 315 | l_settings.update(kwargs) | 315 | l_settings.update(kwargs) |
4189 | 316 | 316 | ||
4191 | 317 | self.relation_get.side_effect = mock_relation_get | 317 | self._relation_get.side_effect = mock_relation_get |
4192 | 318 | self._leader_get.side_effect = mock_leader_get | 318 | self._leader_get.side_effect = mock_leader_get |
4193 | 319 | self.leader_set.side_effect = mock_leader_set | 319 | self.leader_set.side_effect = mock_leader_set |
4195 | 320 | self.assertEqual({'s1': 1, 's2': 2}, peerstorage.relation_get()) | 320 | self.assertEqual({'s1': 1, 's2': 2}, peerstorage._relation_get()) |
4196 | 321 | self.assertEqual({'s3': 3}, peerstorage._leader_get()) | 321 | self.assertEqual({'s3': 3}, peerstorage._leader_get()) |
4197 | 322 | self.assertEqual({'s3': 3}, peerstorage.leader_get()) | 322 | self.assertEqual({'s3': 3}, peerstorage.leader_get()) |
4198 | 323 | self.assertEqual({'s3': 3}, l_settings) | 323 | self.assertEqual({'s3': 3}, l_settings) |
4199 | 324 | 324 | ||
4200 | === modified file 'tests/contrib/python/test_debug.py' (properties changed: -x to +x) | |||
4201 | --- tests/contrib/python/test_debug.py 2015-02-11 21:41:57 +0000 | |||
4202 | +++ tests/contrib/python/test_debug.py 2015-08-13 08:33:21 +0000 | |||
4203 | @@ -51,4 +51,4 @@ | |||
4204 | 51 | """ | 51 | """ |
4205 | 52 | self.set_trace() | 52 | self.set_trace() |
4206 | 53 | self.Rpdb.set_trace.side_effect = Exception() | 53 | self.Rpdb.set_trace.side_effect = Exception() |
4208 | 54 | self._error.assert_called_once() | 54 | self.assertTrue(self._error.called) |
4209 | 55 | 55 | ||
4210 | === modified file 'tests/contrib/storage/test_linux_ceph.py' | |||
4211 | --- tests/contrib/storage/test_linux_ceph.py 2015-01-13 11:17:57 +0000 | |||
4212 | +++ tests/contrib/storage/test_linux_ceph.py 2015-08-13 08:33:21 +0000 | |||
4213 | @@ -62,7 +62,7 @@ | |||
4214 | 62 | '''It creates a new ceph keyring''' | 62 | '''It creates a new ceph keyring''' |
4215 | 63 | _exists.return_value = True | 63 | _exists.return_value = True |
4216 | 64 | ceph_utils.create_keyring('cinder', 'cephkey') | 64 | ceph_utils.create_keyring('cinder', 'cephkey') |
4218 | 65 | self.log.assert_called() | 65 | self.assertTrue(self.log.called) |
4219 | 66 | self.check_call.assert_not_called() | 66 | self.check_call.assert_not_called() |
4220 | 67 | 67 | ||
4221 | 68 | @patch('os.remove') | 68 | @patch('os.remove') |
4222 | @@ -72,7 +72,7 @@ | |||
4223 | 72 | _exists.return_value = True | 72 | _exists.return_value = True |
4224 | 73 | ceph_utils.delete_keyring('cinder') | 73 | ceph_utils.delete_keyring('cinder') |
4225 | 74 | _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring') | 74 | _remove.assert_called_with('/etc/ceph/ceph.client.cinder.keyring') |
4227 | 75 | self.log.assert_called() | 75 | self.assertTrue(self.log.called) |
4228 | 76 | 76 | ||
4229 | 77 | @patch('os.remove') | 77 | @patch('os.remove') |
4230 | 78 | @patch('os.path.exists') | 78 | @patch('os.path.exists') |
4231 | @@ -80,7 +80,7 @@ | |||
4232 | 80 | '''It creates a new ceph keyring.''' | 80 | '''It creates a new ceph keyring.''' |
4233 | 81 | _exists.return_value = False | 81 | _exists.return_value = False |
4234 | 82 | ceph_utils.delete_keyring('cinder') | 82 | ceph_utils.delete_keyring('cinder') |
4236 | 83 | self.log.assert_called() | 83 | self.assertTrue(self.log.called) |
4237 | 84 | _remove.assert_not_called() | 84 | _remove.assert_not_called() |
4238 | 85 | 85 | ||
4239 | 86 | @patch('os.path.exists') | 86 | @patch('os.path.exists') |
4240 | @@ -90,14 +90,14 @@ | |||
4241 | 90 | with patch_open() as (_open, _file): | 90 | with patch_open() as (_open, _file): |
4242 | 91 | ceph_utils.create_key_file('cinder', 'cephkey') | 91 | ceph_utils.create_key_file('cinder', 'cephkey') |
4243 | 92 | _file.write.assert_called_with('cephkey') | 92 | _file.write.assert_called_with('cephkey') |
4245 | 93 | self.log.assert_called() | 93 | self.assertTrue(self.log.called) |
4246 | 94 | 94 | ||
4247 | 95 | @patch('os.path.exists') | 95 | @patch('os.path.exists') |
4248 | 96 | def test_create_key_file_already_exists(self, _exists): | 96 | def test_create_key_file_already_exists(self, _exists): |
4249 | 97 | '''It creates a new ceph keyring''' | 97 | '''It creates a new ceph keyring''' |
4250 | 98 | _exists.return_value = True | 98 | _exists.return_value = True |
4251 | 99 | ceph_utils.create_key_file('cinder', 'cephkey') | 99 | ceph_utils.create_key_file('cinder', 'cephkey') |
4253 | 100 | self.log.assert_called() | 100 | self.assertTrue(self.log.called) |
4254 | 101 | 101 | ||
4255 | 102 | @patch('os.mkdir') | 102 | @patch('os.mkdir') |
4256 | 103 | @patch.object(ceph_utils, 'apt_install') | 103 | @patch.object(ceph_utils, 'apt_install') |
4257 | @@ -171,7 +171,7 @@ | |||
4258 | 171 | self._patch('pool_exists') | 171 | self._patch('pool_exists') |
4259 | 172 | self.pool_exists.return_value = True | 172 | self.pool_exists.return_value = True |
4260 | 173 | ceph_utils.create_pool(service='cinder', name='foo') | 173 | ceph_utils.create_pool(service='cinder', name='foo') |
4262 | 174 | self.log.assert_called() | 174 | self.assertTrue(self.log.called) |
4263 | 175 | self.check_call.assert_not_called() | 175 | self.check_call.assert_not_called() |
4264 | 176 | 176 | ||
4265 | 177 | def test_keyring_path(self): | 177 | def test_keyring_path(self): |
4266 | @@ -202,14 +202,14 @@ | |||
4267 | 202 | def test_rbd_exists(self): | 202 | def test_rbd_exists(self): |
4268 | 203 | self.check_output.return_value = LS_RBDS | 203 | self.check_output.return_value = LS_RBDS |
4269 | 204 | self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1')) | 204 | self.assertTrue(ceph_utils.rbd_exists('service', 'pool', 'rbd1')) |
4271 | 205 | self.check_output.assert_call_with( | 205 | self.check_output.assert_called_with( |
4272 | 206 | ['rbd', 'list', '--id', 'service', '--pool', 'pool'] | 206 | ['rbd', 'list', '--id', 'service', '--pool', 'pool'] |
4273 | 207 | ) | 207 | ) |
4274 | 208 | 208 | ||
4275 | 209 | def test_rbd_does_not_exist(self): | 209 | def test_rbd_does_not_exist(self): |
4276 | 210 | self.check_output.return_value = LS_RBDS | 210 | self.check_output.return_value = LS_RBDS |
4277 | 211 | self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4')) | 211 | self.assertFalse(ceph_utils.rbd_exists('service', 'pool', 'rbd4')) |
4279 | 212 | self.check_output.assert_call_with( | 212 | self.check_output.assert_called_with( |
4280 | 213 | ['rbd', 'list', '--id', 'service', '--pool', 'pool'] | 213 | ['rbd', 'list', '--id', 'service', '--pool', 'pool'] |
4281 | 214 | ) | 214 | ) |
4282 | 215 | 215 | ||
4283 | @@ -304,7 +304,7 @@ | |||
4284 | 304 | _file.read.return_value = 'anothermod\n' | 304 | _file.read.return_value = 'anothermod\n' |
4285 | 305 | ceph_utils.modprobe('mymod') | 305 | ceph_utils.modprobe('mymod') |
4286 | 306 | _open.assert_called_with('/etc/modules', 'r+') | 306 | _open.assert_called_with('/etc/modules', 'r+') |
4288 | 307 | _file.read.assert_called() | 307 | _file.read.assert_called_with() |
4289 | 308 | _file.write.assert_called_with('mymod') | 308 | _file.write.assert_called_with('mymod') |
4290 | 309 | self.check_call.assert_called_with(['modprobe', 'mymod']) | 309 | self.check_call.assert_called_with(['modprobe', 'mymod']) |
4291 | 310 | 310 | ||
4292 | @@ -318,14 +318,14 @@ | |||
4293 | 318 | def test_make_filesystem(self, _exists): | 318 | def test_make_filesystem(self, _exists): |
4294 | 319 | _exists.return_value = True | 319 | _exists.return_value = True |
4295 | 320 | ceph_utils.make_filesystem('/dev/sdd') | 320 | ceph_utils.make_filesystem('/dev/sdd') |
4297 | 321 | self.log.assert_called() | 321 | self.assertTrue(self.log.called) |
4298 | 322 | self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd']) | 322 | self.check_call.assert_called_with(['mkfs', '-t', 'ext4', '/dev/sdd']) |
4299 | 323 | 323 | ||
4300 | 324 | @patch('os.path.exists') | 324 | @patch('os.path.exists') |
4301 | 325 | def test_make_filesystem_xfs(self, _exists): | 325 | def test_make_filesystem_xfs(self, _exists): |
4302 | 326 | _exists.return_value = True | 326 | _exists.return_value = True |
4303 | 327 | ceph_utils.make_filesystem('/dev/sdd', 'xfs') | 327 | ceph_utils.make_filesystem('/dev/sdd', 'xfs') |
4305 | 328 | self.log.assert_called() | 328 | self.assertTrue(self.log.called) |
4306 | 329 | self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd']) | 329 | self.check_call.assert_called_with(['mkfs', '-t', 'xfs', '/dev/sdd']) |
4307 | 330 | 330 | ||
4308 | 331 | @patch('os.chown') | 331 | @patch('os.chown') |
4309 | 332 | 332 | ||
4310 | === modified file 'tests/contrib/storage/test_linux_storage_utils.py' | |||
4311 | --- tests/contrib/storage/test_linux_storage_utils.py 2014-11-25 13:38:01 +0000 | |||
4312 | +++ tests/contrib/storage/test_linux_storage_utils.py 2015-08-13 08:33:21 +0000 | |||
4313 | @@ -16,8 +16,9 @@ | |||
4314 | 16 | '''It calls sgdisk correctly to zap disk''' | 16 | '''It calls sgdisk correctly to zap disk''' |
4315 | 17 | check_output.return_value = b'200\n' | 17 | check_output.return_value = b'200\n' |
4316 | 18 | storage_utils.zap_disk('/dev/foo') | 18 | storage_utils.zap_disk('/dev/foo') |
4319 | 19 | call.assert_any_call(['sgdisk', '--zap-all', '--mbrtogpt', | 19 | call.assert_any_call(['sgdisk', '--zap-all', '--', '/dev/foo']) |
4320 | 20 | '--clear', '/dev/foo']) | 20 | call.assert_any_call(['sgdisk', '--clear', '--mbrtogpt', |
4321 | 21 | '--', '/dev/foo']) | ||
4322 | 21 | check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo']) | 22 | check_output.assert_any_call(['blockdev', '--getsz', '/dev/foo']) |
4323 | 22 | check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo', | 23 | check_call.assert_any_call(['dd', 'if=/dev/zero', 'of=/dev/foo', |
4324 | 23 | 'bs=1M', 'count=1']) | 24 | 'bs=1M', 'count=1']) |
4325 | @@ -88,6 +89,14 @@ | |||
4326 | 88 | self.assertFalse(result) | 89 | self.assertFalse(result) |
4327 | 89 | 90 | ||
4328 | 90 | @patch(STORAGE_LINUX_UTILS + '.check_output') | 91 | @patch(STORAGE_LINUX_UTILS + '.check_output') |
4329 | 92 | def test_is_device_mounted_full_disks(self, check_output): | ||
4330 | 93 | '''It detects mounted full disks as mounted.''' | ||
4331 | 94 | check_output.return_value = ( | ||
4332 | 95 | b"/dev/sda on / type ext4 (rw,errors=remount-ro)\n") | ||
4333 | 96 | result = storage_utils.is_device_mounted('/dev/sda') | ||
4334 | 97 | self.assertTrue(result) | ||
4335 | 98 | |||
4336 | 99 | @patch(STORAGE_LINUX_UTILS + '.check_output') | ||
4337 | 91 | def test_is_device_mounted_cciss(self, check_output): | 100 | def test_is_device_mounted_cciss(self, check_output): |
4338 | 92 | '''It detects mounted cciss partitions as mounted.''' | 101 | '''It detects mounted cciss partitions as mounted.''' |
4339 | 93 | check_output.return_value = ( | 102 | check_output.return_value = ( |
4340 | 94 | 103 | ||
4341 | === modified file 'tests/contrib/unison/test_unison.py' | |||
4342 | --- tests/contrib/unison/test_unison.py 2015-04-03 15:23:46 +0000 | |||
4343 | +++ tests/contrib/unison/test_unison.py 2015-08-13 08:33:21 +0000 | |||
4344 | @@ -74,7 +74,7 @@ | |||
4345 | 74 | self.assertIn(call(_call), self.check_call.call_args_list) | 74 | self.assertIn(call(_call), self.check_call.call_args_list) |
4346 | 75 | 75 | ||
4347 | 76 | @patch('os.path.isfile') | 76 | @patch('os.path.isfile') |
4349 | 77 | def test_create_private_key(self, isfile): | 77 | def test_create_private_key_rsa(self, isfile): |
4350 | 78 | create_cmd = [ | 78 | create_cmd = [ |
4351 | 79 | 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', | 79 | 'ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', |
4352 | 80 | '-f', '/home/foo/.ssh/id_rsa'] | 80 | '-f', '/home/foo/.ssh/id_rsa'] |
4353 | @@ -100,6 +100,36 @@ | |||
4354 | 100 | _ensure_perms() | 100 | _ensure_perms() |
4355 | 101 | 101 | ||
4356 | 102 | @patch('os.path.isfile') | 102 | @patch('os.path.isfile') |
4357 | 103 | def test_create_private_key_ecdsa(self, isfile): | ||
4358 | 104 | create_cmd = [ | ||
4359 | 105 | 'ssh-keygen', '-q', '-N', '', '-t', 'ecdsa', '-b', '521', | ||
4360 | 106 | '-f', '/home/foo/.ssh/id_ecdsa'] | ||
4361 | 107 | |||
4362 | 108 | def _ensure_perms(): | ||
4363 | 109 | cmds = [ | ||
4364 | 110 | ['chown', 'foo', '/home/foo/.ssh/id_ecdsa'], | ||
4365 | 111 | ['chmod', '0600', '/home/foo/.ssh/id_ecdsa'], | ||
4366 | 112 | ] | ||
4367 | 113 | self._ensure_calls_in(cmds) | ||
4368 | 114 | |||
4369 | 115 | isfile.return_value = False | ||
4370 | 116 | unison.create_private_key( | ||
4371 | 117 | user='foo', | ||
4372 | 118 | priv_key_path='/home/foo/.ssh/id_ecdsa', | ||
4373 | 119 | key_type='ecdsa') | ||
4374 | 120 | self.assertIn(call(create_cmd), self.check_call.call_args_list) | ||
4375 | 121 | _ensure_perms() | ||
4376 | 122 | self.check_call.call_args_list = [] | ||
4377 | 123 | |||
4378 | 124 | isfile.return_value = True | ||
4379 | 125 | unison.create_private_key( | ||
4380 | 126 | user='foo', | ||
4381 | 127 | priv_key_path='/home/foo/.ssh/id_ecdsa', | ||
4382 | 128 | key_type='ecdsa') | ||
4383 | 129 | self.assertNotIn(call(create_cmd), self.check_call.call_args_list) | ||
4384 | 130 | _ensure_perms() | ||
4385 | 131 | |||
4386 | 132 | @patch('os.path.isfile') | ||
4387 | 103 | def test_create_public_key(self, isfile): | 133 | def test_create_public_key(self, isfile): |
4388 | 104 | create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa'] | 134 | create_cmd = ['ssh-keygen', '-y', '-f', '/home/foo/.ssh/id_rsa'] |
4389 | 105 | isfile.return_value = True | 135 | isfile.return_value = True |
4390 | @@ -273,6 +303,33 @@ | |||
4391 | 273 | write_hosts.assert_called_with('foo', ['host1', 'host2']) | 303 | write_hosts.assert_called_with('foo', ['host1', 'host2']) |
4392 | 274 | self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2') | 304 | self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2') |
4393 | 275 | 305 | ||
4394 | 306 | @patch.object(unison, 'write_known_hosts') | ||
4395 | 307 | @patch.object(unison, 'write_authorized_keys') | ||
4396 | 308 | @patch.object(unison, 'get_keypair') | ||
4397 | 309 | @patch.object(unison, 'ensure_user') | ||
4398 | 310 | def test_ssh_auth_peer_departed(self, ensure_user, get_keypair, | ||
4399 | 311 | write_keys, write_hosts): | ||
4400 | 312 | get_keypair.return_value = ('privkey', 'pubkey') | ||
4401 | 313 | |||
4402 | 314 | self.hook_name.return_value = 'cluster-relation-departed' | ||
4403 | 315 | |||
4404 | 316 | self.relation_get.side_effect = [ | ||
4405 | 317 | 'key1', | ||
4406 | 318 | 'host1', | ||
4407 | 319 | 'key2', | ||
4408 | 320 | 'host2', | ||
4409 | 321 | '', '' | ||
4410 | 322 | ] | ||
4411 | 323 | unison.ssh_authorized_peers(peer_interface='cluster', | ||
4412 | 324 | user='foo', group='foo', | ||
4413 | 325 | ensure_local_user=True) | ||
4414 | 326 | |||
4415 | 327 | ensure_user.assert_called_with('foo', 'foo') | ||
4416 | 328 | get_keypair.assert_called_with('foo') | ||
4417 | 329 | write_keys.assert_called_with('foo', ['key1', 'key2']) | ||
4418 | 330 | write_hosts.assert_called_with('foo', ['host1', 'host2']) | ||
4419 | 331 | self.relation_set.assert_called_with(ssh_authorized_hosts='host1:host2') | ||
4420 | 332 | |||
4421 | 276 | def test_collect_authed_hosts(self): | 333 | def test_collect_authed_hosts(self): |
4422 | 277 | # only one of the hosts in fake environment has auth'd | 334 | # only one of the hosts in fake environment has auth'd |
4423 | 278 | # the local peer | 335 | # the local peer |
4424 | 279 | 336 | ||
4425 | === added directory 'tests/coordinator' | |||
4426 | === added file 'tests/coordinator/__init__.py' | |||
4427 | === added file 'tests/coordinator/test_coordinator.py' | |||
4428 | --- tests/coordinator/test_coordinator.py 1970-01-01 00:00:00 +0000 | |||
4429 | +++ tests/coordinator/test_coordinator.py 2015-08-13 08:33:21 +0000 | |||
4430 | @@ -0,0 +1,535 @@ | |||
4431 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
4432 | 2 | # | ||
4433 | 3 | # This file is part of charm-helpers. | ||
4434 | 4 | # | ||
4435 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
4436 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
4437 | 7 | # published by the Free Software Foundation. | ||
4438 | 8 | # | ||
4439 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
4440 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4441 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4442 | 12 | # GNU Lesser General Public License for more details. | ||
4443 | 13 | # | ||
4444 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
4445 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
4446 | 16 | from datetime import datetime, timedelta | ||
4447 | 17 | import json | ||
4448 | 18 | import tempfile | ||
4449 | 19 | import unittest | ||
4450 | 20 | from mock import call, MagicMock, patch, sentinel | ||
4451 | 21 | |||
4452 | 22 | from charmhelpers import coordinator | ||
4453 | 23 | from charmhelpers.core import hookenv | ||
4454 | 24 | |||
4455 | 25 | |||
4456 | 26 | class TestCoordinator(unittest.TestCase): | ||
4457 | 27 | |||
4458 | 28 | def setUp(self): | ||
4459 | 29 | del hookenv._atstart[:] | ||
4460 | 30 | del hookenv._atexit[:] | ||
4461 | 31 | hookenv.cache.clear() | ||
4462 | 32 | coordinator.Singleton._instances.clear() | ||
4463 | 33 | |||
4464 | 34 | def install(patch): | ||
4465 | 35 | patch.start() | ||
4466 | 36 | self.addCleanup(patch.stop) | ||
4467 | 37 | |||
4468 | 38 | install(patch.object(hookenv, 'local_unit', return_value='foo/1')) | ||
4469 | 39 | install(patch.object(hookenv, 'is_leader', return_value=False)) | ||
4470 | 40 | install(patch.object(hookenv, 'metadata', | ||
4471 | 41 | return_value={'peers': {'cluster': None}})) | ||
4472 | 42 | install(patch.object(hookenv, 'log')) | ||
4473 | 43 | |||
4474 | 44 | # Ensure _timestamp always increases. | ||
4475 | 45 | install(patch.object(coordinator, '_utcnow', | ||
4476 | 46 | side_effect=self._utcnow)) | ||
4477 | 47 | |||
4478 | 48 | _last_utcnow = datetime(2015, 1, 1, 00, 00) | ||
4479 | 49 | |||
4480 | 50 | def _utcnow(self, ts=coordinator._timestamp): | ||
4481 | 51 | self._last_utcnow += timedelta(minutes=1) | ||
4482 | 52 | return self._last_utcnow | ||
4483 | 53 | |||
4484 | 54 | def test_is_singleton(self): | ||
4485 | 55 | # BaseCoordinator and subclasses are singletons. Placing this | ||
4486 | 56 | # burden on charm authors is impractical, particularly if | ||
4487 | 57 | # libraries start wanting to use coordinator instances. | ||
4488 | 58 | # With singletons, we don't need to worry about sharing state | ||
4489 | 59 | # between instances or have them stomping on each other when they | ||
4490 | 60 | # need to serialize their state. | ||
4491 | 61 | self.assertTrue(coordinator.BaseCoordinator() | ||
4492 | 62 | is coordinator.BaseCoordinator()) | ||
4493 | 63 | self.assertTrue(coordinator.Serial() is coordinator.Serial()) | ||
4494 | 64 | self.assertFalse(coordinator.BaseCoordinator() is coordinator.Serial()) | ||
4495 | 65 | |||
4496 | 66 | @patch.object(hookenv, 'atstart') | ||
4497 | 67 | def test_implicit_initialize_and_handle(self, atstart): | ||
4498 | 68 | # When you construct a BaseCoordinator(), its initialize() and | ||
4499 | 69 | # handle() method are invoked automatically every hook. This | ||
4500 | 70 | # is done using hookenv.atstart | ||
4501 | 71 | c = coordinator.BaseCoordinator() | ||
4502 | 72 | atstart.assert_has_calls([call(c.initialize), call(c.handle)]) | ||
4503 | 73 | |||
4504 | 74 | @patch.object(hookenv, 'has_juju_version', return_value=False) | ||
4505 | 75 | def test_initialize_enforces_juju_version(self, has_juju_version): | ||
4506 | 76 | c = coordinator.BaseCoordinator() | ||
4507 | 77 | with self.assertRaises(AssertionError): | ||
4508 | 78 | c.initialize() | ||
4509 | 79 | has_juju_version.assert_called_once_with('1.23') | ||
4510 | 80 | |||
4511 | 81 | @patch.object(hookenv, 'atexit') | ||
4512 | 82 | @patch.object(hookenv, 'has_juju_version', return_value=True) | ||
4513 | 83 | @patch.object(hookenv, 'relation_ids') | ||
4514 | 84 | def test_initialize(self, relation_ids, ver, atexit): | ||
4515 | 85 | # First initialization are done before there is a peer relation. | ||
4516 | 86 | relation_ids.return_value = [] | ||
4517 | 87 | c = coordinator.BaseCoordinator() | ||
4518 | 88 | |||
4519 | 89 | with patch.object(c, '_load_state') as _load_state, \ | ||
4520 | 90 | patch.object(c, '_emit_state') as _emit_state: # IGNORE: E127 | ||
4521 | 91 | c.initialize() | ||
4522 | 92 | _load_state.assert_called_once_with() | ||
4523 | 93 | _emit_state.assert_called_once_with() | ||
4524 | 94 | |||
4525 | 95 | self.assertEqual(c.relname, 'cluster') | ||
4526 | 96 | self.assertIsNone(c.relid) | ||
4527 | 97 | relation_ids.assert_called_once_with('cluster') | ||
4528 | 98 | |||
4529 | 99 | # Methods installed to save state and release locks if the | ||
4530 | 100 | # hook is successful. | ||
4531 | 101 | atexit.assert_has_calls([call(c._save_state), | ||
4532 | 102 | call(c._release_granted)]) | ||
4533 | 103 | |||
4534 | 104 | # If we have a peer relation, the id is stored. | ||
4535 | 105 | relation_ids.return_value = ['cluster:1'] | ||
4536 | 106 | c = coordinator.BaseCoordinator() | ||
4537 | 107 | with patch.object(c, '_load_state'), patch.object(c, '_emit_state'): | ||
4538 | 108 | c.initialize() | ||
4539 | 109 | self.assertEqual(c.relid, 'cluster:1') | ||
4540 | 110 | |||
4541 | 111 | # If we are already initialized, nothing happens. | ||
4542 | 112 | c.grants = {} | ||
4543 | 113 | c.requests = {} | ||
4544 | 114 | c.initialize() | ||
4545 | 115 | |||
4546 | 116 | def test_acquire(self): | ||
4547 | 117 | c = coordinator.BaseCoordinator() | ||
4548 | 118 | lock = 'mylock' | ||
4549 | 119 | c.grants = {} | ||
4550 | 120 | c.requests = {hookenv.local_unit(): {}} | ||
4551 | 121 | |||
4552 | 122 | # We are not the leader, so first acquire will return False. | ||
4553 | 123 | self.assertFalse(c.acquire(lock)) | ||
4554 | 124 | |||
4555 | 125 | # But the request is in the queue. | ||
4556 | 126 | self.assertTrue(c.requested(lock)) | ||
4557 | 127 | ts = c.request_timestamp(lock) | ||
4558 | 128 | |||
4559 | 129 | # A further attempts at acquiring the lock do nothing, | ||
4560 | 130 | # and the timestamp of the request remains unchanged. | ||
4561 | 131 | self.assertFalse(c.acquire(lock)) | ||
4562 | 132 | self.assertEqual(ts, c.request_timestamp(lock)) | ||
4563 | 133 | |||
4564 | 134 | # Once the leader has granted the lock, acquire returns True. | ||
4565 | 135 | with patch.object(c, 'granted') as granted: | ||
4566 | 136 | granted.return_value = True | ||
4567 | 137 | self.assertTrue(c.acquire(lock)) | ||
4568 | 138 | granted.assert_called_once_with(lock) | ||
4569 | 139 | |||
4570 | 140 | def test_acquire_leader(self): | ||
4571 | 141 | # When acquire() is called by the leader, it needs | ||
4572 | 142 | # to make a grant decision immediately. It can't defer | ||
4573 | 143 | # making the decision until a future hook, as no future | ||
4574 | 144 | # hooks will be triggered. | ||
4575 | 145 | hookenv.is_leader.return_value = True | ||
4576 | 146 | c = coordinator.Serial() # Not Base. Test hooks into default_grant. | ||
4577 | 147 | lock = 'mylock' | ||
4578 | 148 | unit = hookenv.local_unit() | ||
4579 | 149 | c.grants = {} | ||
4580 | 150 | c.requests = {unit: {}} | ||
4581 | 151 | with patch.object(c, 'default_grant') as default_grant: | ||
4582 | 152 | default_grant.side_effect = iter([False, True]) | ||
4583 | 153 | |||
4584 | 154 | self.assertFalse(c.acquire(lock)) | ||
4585 | 155 | ts = c.request_timestamp(lock) | ||
4586 | 156 | |||
4587 | 157 | self.assertTrue(c.acquire(lock)) | ||
4588 | 158 | self.assertEqual(ts, c.request_timestamp(lock)) | ||
4589 | 159 | |||
4590 | 160 | # If it it granted, the leader doesn't make a decision again. | ||
4591 | 161 | self.assertTrue(c.acquire(lock)) | ||
4592 | 162 | self.assertEqual(ts, c.request_timestamp(lock)) | ||
4593 | 163 | |||
4594 | 164 | self.assertEqual(default_grant.call_count, 2) | ||
4595 | 165 | |||
4596 | 166 | def test_granted(self): | ||
4597 | 167 | c = coordinator.BaseCoordinator() | ||
4598 | 168 | unit = hookenv.local_unit() | ||
4599 | 169 | lock = 'mylock' | ||
4600 | 170 | ts = coordinator._timestamp() | ||
4601 | 171 | c.grants = {} | ||
4602 | 172 | |||
4603 | 173 | # Unit makes a request, but it isn't granted | ||
4604 | 174 | c.requests = {unit: {lock: ts}} | ||
4605 | 175 | self.assertFalse(c.granted(lock)) | ||
4606 | 176 | |||
4607 | 177 | # Once the leader has granted the request, all good. | ||
4608 | 178 | # It does this by mirroring the request timestamp. | ||
4609 | 179 | c.grants = {unit: {lock: ts}} | ||
4610 | 180 | self.assertTrue(c.granted(lock)) | ||
4611 | 181 | |||
4612 | 182 | # The unit releases the lock by removing the request. | ||
4613 | 183 | c.requests = {unit: {}} | ||
4614 | 184 | self.assertFalse(c.granted(lock)) | ||
4615 | 185 | |||
4616 | 186 | # If the unit makes a new request before the leader | ||
4617 | 187 | # has had a chance to do its housekeeping, the timestamps | ||
4618 | 188 | # do not match and the lock not considered granted. | ||
4619 | 189 | ts = coordinator._timestamp() | ||
4620 | 190 | c.requests = {unit: {lock: ts}} | ||
4621 | 191 | self.assertFalse(c.granted(lock)) | ||
4622 | 192 | |||
4623 | 193 | # Until the leader gets around to its duties. | ||
4624 | 194 | c.grants = {unit: {lock: ts}} | ||
4625 | 195 | self.assertTrue(c.granted(lock)) | ||
4626 | 196 | |||
4627 | 197 | def test_requested(self): | ||
4628 | 198 | c = coordinator.BaseCoordinator() | ||
4629 | 199 | lock = 'mylock' | ||
4630 | 200 | c.requests = {hookenv.local_unit(): {}} | ||
4631 | 201 | c.grants = {} | ||
4632 | 202 | |||
4633 | 203 | self.assertFalse(c.requested(lock)) | ||
4634 | 204 | c.acquire(lock) | ||
4635 | 205 | self.assertTrue(c.requested(lock)) | ||
4636 | 206 | |||
4637 | 207 | def test_request_timestamp(self): | ||
4638 | 208 | c = coordinator.BaseCoordinator() | ||
4639 | 209 | lock = 'mylock' | ||
4640 | 210 | unit = hookenv.local_unit() | ||
4641 | 211 | |||
4642 | 212 | c.requests = {unit: {}} | ||
4643 | 213 | c.grants = {} | ||
4644 | 214 | self.assertIsNone(c.request_timestamp(lock)) | ||
4645 | 215 | |||
4646 | 216 | now = datetime.utcnow() | ||
4647 | 217 | fmt = coordinator._timestamp_format | ||
4648 | 218 | c.requests = {hookenv.local_unit(): {lock: now.strftime(fmt)}} | ||
4649 | 219 | |||
4650 | 220 | self.assertEqual(c.request_timestamp(lock), now) | ||
4651 | 221 | |||
4652 | 222 | def test_handle_not_leader(self): | ||
4653 | 223 | c = coordinator.BaseCoordinator() | ||
4654 | 224 | # If we are not the leader, handle does nothing. We know this, | ||
4655 | 225 | # because without mocks or initialization it would otherwise crash. | ||
4656 | 226 | c.handle() | ||
4657 | 227 | |||
4658 | 228 | def test_handle(self): | ||
4659 | 229 | hookenv.is_leader.return_value = True | ||
4660 | 230 | lock = 'mylock' | ||
4661 | 231 | c = coordinator.BaseCoordinator() | ||
4662 | 232 | c.relid = 'cluster:1' | ||
4663 | 233 | |||
4664 | 234 | ts = coordinator._timestamp | ||
4665 | 235 | ts1, ts2, ts3 = ts(), ts(), ts() | ||
4666 | 236 | |||
4667 | 237 | # Grant one of these requests. | ||
4668 | 238 | requests = {'foo/1': {lock: ts1}, | ||
4669 | 239 | 'foo/2': {lock: ts2}, | ||
4670 | 240 | 'foo/3': {lock: ts3}} | ||
4671 | 241 | c.requests = requests.copy() | ||
4672 | 242 | # Because the existing grant should be released. | ||
4673 | 243 | c.grants = {'foo/2': {lock: ts()}} # No request, release. | ||
4674 | 244 | |||
4675 | 245 | with patch.object(c, 'grant') as grant: | ||
4676 | 246 | c.handle() | ||
4677 | 247 | |||
4678 | 248 | # The requests are unchanged. This is normally state on the | ||
4679 | 249 | # peer relation, and only the units themselves can change it. | ||
4680 | 250 | self.assertDictEqual(requests, c.requests) | ||
4681 | 251 | |||
4682 | 252 | # The grant without a corresponding requests was released. | ||
4683 | 253 | self.assertDictEqual({'foo/2': {}}, c.grants) | ||
4684 | 254 | |||
4685 | 255 | # A potential grant was made for each of the outstanding requests. | ||
4686 | 256 | grant.assert_has_calls([call(lock, 'foo/1'), | ||
4687 | 257 | call(lock, 'foo/2'), | ||
4688 | 258 | call(lock, 'foo/3')], any_order=True) | ||
4689 | 259 | |||
4690 | 260 | def test_grant_not_leader(self): | ||
4691 | 261 | c = coordinator.BaseCoordinator() | ||
4692 | 262 | c.grant(sentinel.whatever, sentinel.whatever) # Nothing happens. | ||
4693 | 263 | |||
4694 | 264 | def test_grant(self): | ||
4695 | 265 | hookenv.is_leader.return_value = True | ||
4696 | 266 | c = coordinator.BaseCoordinator() | ||
4697 | 267 | c.default_grant = MagicMock() | ||
4698 | 268 | c.grant_other = MagicMock() | ||
4699 | 269 | |||
4700 | 270 | ts = coordinator._timestamp | ||
4701 | 271 | ts1, ts2 = ts(), ts() | ||
4702 | 272 | |||
4703 | 273 | c.requests = {'foo/1': {'mylock': ts1, 'other': ts()}, | ||
4704 | 274 | 'foo/2': {'mylock': ts2}, | ||
4705 | 275 | 'foo/3': {'mylock': ts()}} | ||
4706 | 276 | grants = {'foo/1': {'mylock': ts1}} | ||
4707 | 277 | c.grants = grants.copy() | ||
4708 | 278 | |||
4709 | 279 | # foo/1 already has a granted mylock, so returns True. | ||
4710 | 280 | self.assertTrue(c.grant('mylock', 'foo/1')) | ||
4711 | 281 | |||
4712 | 282 | # foo/2 does not have a granted mylock. default_grant will | ||
4713 | 283 | # be called to make a decision (no) | ||
4714 | 284 | c.default_grant.return_value = False | ||
4715 | 285 | self.assertFalse(c.grant('mylock', 'foo/2')) | ||
4716 | 286 | self.assertDictEqual(grants, c.grants) | ||
4717 | 287 | c.default_grant.assert_called_once_with('mylock', 'foo/2', | ||
4718 | 288 | set(['foo/1']), | ||
4719 | 289 | ['foo/2', 'foo/3']) | ||
4720 | 290 | c.default_grant.reset_mock() | ||
4721 | 291 | |||
4722 | 292 | # Lets say yes. | ||
4723 | 293 | c.default_grant.return_value = True | ||
4724 | 294 | self.assertTrue(c.grant('mylock', 'foo/2')) | ||
4725 | 295 | grants = {'foo/1': {'mylock': ts1}, 'foo/2': {'mylock': ts2}} | ||
4726 | 296 | self.assertDictEqual(grants, c.grants) | ||
4727 | 297 | c.default_grant.assert_called_once_with('mylock', 'foo/2', | ||
4728 | 298 | set(['foo/1']), | ||
4729 | 299 | ['foo/2', 'foo/3']) | ||
4730 | 300 | |||
4731 | 301 | # The other lock has custom logic, in the form of the overridden | ||
4732 | 302 | # grant_other method. | ||
4733 | 303 | c.grant_other.return_value = False | ||
4734 | 304 | self.assertFalse(c.grant('other', 'foo/1')) | ||
4735 | 305 | c.grant_other.assert_called_once_with('other', 'foo/1', | ||
4736 | 306 | set(), ['foo/1']) | ||
4737 | 307 | |||
4738 | 308 | # If there is no request, grant returns False | ||
4739 | 309 | c.grant_other.return_value = True | ||
4740 | 310 | self.assertFalse(c.grant('other', 'foo/2')) | ||
4741 | 311 | |||
4742 | 312 | def test_released(self): | ||
4743 | 313 | c = coordinator.BaseCoordinator() | ||
4744 | 314 | with patch.object(c, 'msg') as msg: | ||
4745 | 315 | c.released('foo/2', 'mylock', coordinator._utcnow()) | ||
4746 | 316 | expected = 'Leader released mylock from foo/2, held 0:01:00' | ||
4747 | 317 | msg.assert_called_once_with(expected) | ||
4748 | 318 | |||
4749 | 319 | def test_require(self): | ||
4750 | 320 | c = coordinator.BaseCoordinator() | ||
4751 | 321 | c.acquire = MagicMock() | ||
4752 | 322 | c.granted = MagicMock() | ||
4753 | 323 | guard = MagicMock() | ||
4754 | 324 | |||
4755 | 325 | wrapped = MagicMock() | ||
4756 | 326 | |||
4757 | 327 | @c.require('mylock', guard) | ||
4758 | 328 | def func(*args, **kw): | ||
4759 | 329 | wrapped(*args, **kw) | ||
4760 | 330 | |||
4761 | 331 | # If the lock is granted, the wrapped function is called. | ||
4762 | 332 | c.granted.return_value = True | ||
4763 | 333 | func(arg=True) | ||
4764 | 334 | wrapped.assert_called_once_with(arg=True) | ||
4765 | 335 | wrapped.reset_mock() | ||
4766 | 336 | |||
4767 | 337 | # If the lock is not granted, and the guard returns False, | ||
4768 | 338 | # the lock is not acquired. | ||
4769 | 339 | c.acquire.return_value = False | ||
4770 | 340 | c.granted.return_value = False | ||
4771 | 341 | guard.return_value = False | ||
4772 | 342 | func() | ||
4773 | 343 | self.assertFalse(wrapped.called) | ||
4774 | 344 | self.assertFalse(c.acquire.called) | ||
4775 | 345 | |||
4776 | 346 | # If the lock is not granted, and the guard returns True, | ||
4777 | 347 | # the lock is acquired. But the function still isn't called if | ||
4778 | 348 | # it cannot be acquired immediately. | ||
4779 | 349 | guard.return_value = True | ||
4780 | 350 | func() | ||
4781 | 351 | self.assertFalse(wrapped.called) | ||
4782 | 352 | c.acquire.assert_called_once_with('mylock') | ||
4783 | 353 | |||
4784 | 354 | # Finally, if the lock is not granted, and the guard returns True, | ||
4785 | 355 | # and the lock acquired immediately, the function is called. | ||
4786 | 356 | c.acquire.return_value = True | ||
4787 | 357 | func(sentinel.arg) | ||
4788 | 358 | wrapped.assert_called_once_with(sentinel.arg) | ||
4789 | 359 | |||
4790 | 360 | def test_msg(self): | ||
4791 | 361 | c = coordinator.BaseCoordinator() | ||
4792 | 362 | # Just a wrapper around hookenv.log | ||
4793 | 363 | c.msg('hi') | ||
4794 | 364 | hookenv.log.assert_called_once_with('coordinator.BaseCoordinator hi', | ||
4795 | 365 | level=hookenv.INFO) | ||
4796 | 366 | |||
4797 | 367 | def test_name(self): | ||
4798 | 368 | # We use the class name in a few places to avoid conflicts. | ||
4799 | 369 | # We assume we won't be using multiple BaseCoordinator subclasses | ||
4800 | 370 | # with the same name at the same time. | ||
4801 | 371 | c = coordinator.BaseCoordinator() | ||
4802 | 372 | self.assertEqual(c._name(), 'BaseCoordinator') | ||
4803 | 373 | c = coordinator.Serial() | ||
4804 | 374 | self.assertEqual(c._name(), 'Serial') | ||
4805 | 375 | |||
4806 | 376 | @patch.object(hookenv, 'leader_get') | ||
4807 | 377 | def test_load_state(self, leader_get): | ||
4808 | 378 | c = coordinator.BaseCoordinator() | ||
4809 | 379 | unit = hookenv.local_unit() | ||
4810 | 380 | |||
4811 | 381 | # c.granted is just the leader_get decoded. | ||
4812 | 382 | leader_get.return_value = '{"json": true}' | ||
4813 | 383 | c._load_state() | ||
4814 | 384 | self.assertDictEqual(c.grants, {'json': True}) | ||
4815 | 385 | |||
4816 | 386 | # With no relid, there is no peer relation so request state | ||
4817 | 387 | # is pulled from a local stash. | ||
4818 | 388 | with patch.object(c, '_load_local_state') as loc_state: | ||
4819 | 389 | loc_state.return_value = {'local': True} | ||
4820 | 390 | c._load_state() | ||
4821 | 391 | self.assertDictEqual(c.requests, {unit: {'local': True}}) | ||
4822 | 392 | |||
4823 | 393 | # With a relid, request details are pulled from the peer relation. | ||
4824 | 394 | # If there is no data in the peer relation from the local unit, | ||
4825 | 395 | # we still pull it from the local stash as it means this is the | ||
4826 | 396 | # first time we have joined. | ||
4827 | 397 | c.relid = 'cluster:1' | ||
4828 | 398 | with patch.object(c, '_load_local_state') as loc_state, \ | ||
4829 | 399 | patch.object(c, '_load_peer_state') as peer_state: | ||
4830 | 400 | loc_state.return_value = {'local': True} | ||
4831 | 401 | peer_state.return_value = {'foo/2': {'mylock': 'whatever'}} | ||
4832 | 402 | c._load_state() | ||
4833 | 403 | self.assertDictEqual(c.requests, {unit: {'local': True}, | ||
4834 | 404 | 'foo/2': {'mylock': 'whatever'}}) | ||
4835 | 405 | |||
4836 | 406 | # If there are local details in the peer relation, the local | ||
4837 | 407 | # stash is ignored. | ||
4838 | 408 | with patch.object(c, '_load_local_state') as loc_state, \ | ||
4839 | 409 | patch.object(c, '_load_peer_state') as peer_state: | ||
4840 | 410 | loc_state.return_value = {'local': True} | ||
4841 | 411 | peer_state.return_value = {unit: {}, | ||
4842 | 412 | 'foo/2': {'mylock': 'whatever'}} | ||
4843 | 413 | c._load_state() | ||
4844 | 414 | self.assertDictEqual(c.requests, {unit: {}, | ||
4845 | 415 | 'foo/2': {'mylock': 'whatever'}}) | ||
4846 | 416 | |||
4847 | 417 | def test_emit_state(self): | ||
4848 | 418 | c = coordinator.BaseCoordinator() | ||
4849 | 419 | unit = hookenv.local_unit() | ||
4850 | 420 | c.requests = {unit: {'lock_a': sentinel.ts, | ||
4851 | 421 | 'lock_b': sentinel.ts, | ||
4852 | 422 | 'lock_c': sentinel.ts}} | ||
4853 | 423 | c.grants = {unit: {'lock_a': sentinel.ts, | ||
4854 | 424 | 'lock_b': sentinel.ts2}} | ||
4855 | 425 | with patch.object(c, 'msg') as msg: | ||
4856 | 426 | c._emit_state() | ||
4857 | 427 | msg.assert_has_calls([call('Granted lock_a'), | ||
4858 | 428 | call('Waiting on lock_b'), | ||
4859 | 429 | call('Waiting on lock_c')], | ||
4860 | 430 | any_order=True) | ||
4861 | 431 | |||
4862 | 432 | @patch.object(hookenv, 'relation_set') | ||
4863 | 433 | @patch.object(hookenv, 'leader_set') | ||
4864 | 434 | def test_save_state(self, leader_set, relation_set): | ||
4865 | 435 | c = coordinator.BaseCoordinator() | ||
4866 | 436 | unit = hookenv.local_unit() | ||
4867 | 437 | c.grants = {'directdump': True} | ||
4868 | 438 | c.requests = {unit: 'data1', 'foo/2': 'data2'} | ||
4869 | 439 | |||
4870 | 440 | # grants is dumped to leadership settings, if the unit is leader. | ||
4871 | 441 | with patch.object(c, '_save_local_state') as save_loc: | ||
4872 | 442 | c._save_state() | ||
4873 | 443 | self.assertFalse(leader_set.called) | ||
4874 | 444 | hookenv.is_leader.return_value = True | ||
4875 | 445 | c._save_state() | ||
4876 | 446 | leader_set.assert_called_once_with({c.key: '{"directdump": true}'}) | ||
4877 | 447 | |||
4878 | 448 | # If there is no relation id, the local units requests is dumped | ||
4879 | 449 | # to a local stash. | ||
4880 | 450 | with patch.object(c, '_save_local_state') as save_loc: | ||
4881 | 451 | c._save_state() | ||
4882 | 452 | save_loc.assert_called_once_with('data1') | ||
4883 | 453 | |||
4884 | 454 | # If there is a relation id, the local units requests is dumped | ||
4885 | 455 | # to the peer relation. | ||
4886 | 456 | with patch.object(c, '_save_local_state') as save_loc: | ||
4887 | 457 | c.relid = 'cluster:1' | ||
4888 | 458 | c._save_state() | ||
4889 | 459 | self.assertFalse(save_loc.called) | ||
4890 | 460 | relation_set.assert_called_once_with( | ||
4891 | 461 | c.relid, relation_settings={c.key: '"data1"'}) # JSON encoded | ||
4892 | 462 | |||
4893 | 463 | @patch.object(hookenv, 'relation_get') | ||
4894 | 464 | @patch.object(hookenv, 'related_units') | ||
4895 | 465 | def test_load_peer_state(self, related_units, relation_get): | ||
4896 | 466 | # Standard relation-get loops, decoding results from JSON. | ||
4897 | 467 | c = coordinator.BaseCoordinator() | ||
4898 | 468 | c.key = sentinel.key | ||
4899 | 469 | c.relid = sentinel.relid | ||
4900 | 470 | related_units.return_value = ['foo/2', 'foo/3'] | ||
4901 | 471 | d = {'foo/1': {'foo/1': True}, | ||
4902 | 472 | 'foo/2': {'foo/2': True}, | ||
4903 | 473 | 'foo/3': {'foo/3': True}} | ||
4904 | 474 | |||
4905 | 475 | def _get(key, unit, relid): | ||
4906 | 476 | assert key == sentinel.key | ||
4907 | 477 | assert relid == sentinel.relid | ||
4908 | 478 | return json.dumps(d[unit]) | ||
4909 | 479 | relation_get.side_effect = _get | ||
4910 | 480 | |||
4911 | 481 | self.assertDictEqual(c._load_peer_state(), d) | ||
4912 | 482 | |||
4913 | 483 | def test_local_state_filename(self): | ||
4914 | 484 | c = coordinator.BaseCoordinator() | ||
4915 | 485 | self.assertEqual(c._local_state_filename(), | ||
4916 | 486 | '.charmhelpers.coordinator.BaseCoordinator') | ||
4917 | 487 | |||
4918 | 488 | def test_load_local_state(self): | ||
4919 | 489 | c = coordinator.BaseCoordinator() | ||
4920 | 490 | with tempfile.NamedTemporaryFile(mode='w') as f: | ||
4921 | 491 | with patch.object(c, '_local_state_filename') as fn: | ||
4922 | 492 | fn.return_value = f.name | ||
4923 | 493 | d = 'some data' | ||
4924 | 494 | json.dump(d, f) | ||
4925 | 495 | f.flush() | ||
4926 | 496 | d2 = c._load_local_state() | ||
4927 | 497 | self.assertEqual(d, d2) | ||
4928 | 498 | |||
4929 | 499 | def test_save_local_state(self): | ||
4930 | 500 | c = coordinator.BaseCoordinator() | ||
4931 | 501 | with tempfile.NamedTemporaryFile(mode='r') as f: | ||
4932 | 502 | with patch.object(c, '_local_state_filename') as fn: | ||
4933 | 503 | fn.return_value = f.name | ||
4934 | 504 | c._save_local_state('some data') | ||
4935 | 505 | self.assertEqual(json.load(f), 'some data') | ||
4936 | 506 | |||
4937 | 507 | def test_release_granted(self): | ||
4938 | 508 | c = coordinator.BaseCoordinator() | ||
4939 | 509 | unit = hookenv.local_unit() | ||
4940 | 510 | c.requests = {unit: {'lock1': sentinel.ts, 'lock2': sentinel.ts}, | ||
4941 | 511 | 'foo/2': {'lock1': sentinel.ts}} | ||
4942 | 512 | c.grants = {unit: {'lock1': sentinel.ts}, | ||
4943 | 513 | 'foo/2': {'lock1': sentinel.ts}} | ||
4944 | 514 | # The granted lock for the local unit is released. | ||
4945 | 515 | c._release_granted() | ||
4946 | 516 | self.assertDictEqual(c.requests, {unit: {'lock2': sentinel.ts}, | ||
4947 | 517 | 'foo/2': {'lock1': sentinel.ts}}) | ||
4948 | 518 | |||
4949 | 519 | def test_implicit_peer_relation_name(self): | ||
4950 | 520 | self.assertEqual(coordinator._implicit_peer_relation_name(), | ||
4951 | 521 | 'cluster') | ||
4952 | 522 | |||
4953 | 523 | def test_default_grant(self): | ||
4954 | 524 | c = coordinator.Serial() | ||
4955 | 525 | # Lock not granted. First in the queue. | ||
4956 | 526 | self.assertTrue(c.default_grant(sentinel.lock, sentinel.u1, | ||
4957 | 527 | set(), [sentinel.u1, sentinel.u2])) | ||
4958 | 528 | |||
4959 | 529 | # Lock not granted. Later in the queue. | ||
4960 | 530 | self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1, | ||
4961 | 531 | set(), [sentinel.u2, sentinel.u1])) | ||
4962 | 532 | |||
4963 | 533 | # Lock already granted | ||
4964 | 534 | self.assertFalse(c.default_grant(sentinel.lock, sentinel.u1, | ||
4965 | 535 | set([sentinel.u2]), [sentinel.u1])) | ||
4966 | 0 | 536 | ||
4967 | === added file 'tests/core/test_files.py' | |||
4968 | --- tests/core/test_files.py 1970-01-01 00:00:00 +0000 | |||
4969 | +++ tests/core/test_files.py 2015-08-13 08:33:21 +0000 | |||
4970 | @@ -0,0 +1,32 @@ | |||
4971 | 1 | #!/usr/bin/env python | ||
4972 | 2 | # -*- coding: utf-8 -*- | ||
4973 | 3 | |||
4974 | 4 | from charmhelpers.core import files | ||
4975 | 5 | |||
4976 | 6 | import mock | ||
4977 | 7 | import unittest | ||
4978 | 8 | import tempfile | ||
4979 | 9 | import os | ||
4980 | 10 | |||
4981 | 11 | |||
4982 | 12 | class FileTests(unittest.TestCase): | ||
4983 | 13 | |||
4984 | 14 | @mock.patch("subprocess.check_call") | ||
4985 | 15 | def test_sed(self, check_call): | ||
4986 | 16 | files.sed("/tmp/test-sed-file", "replace", "this") | ||
4987 | 17 | check_call.assert_called_once_with( | ||
4988 | 18 | ['sed', '-i', '-r', '-e', 's/replace/this/g', | ||
4989 | 19 | '/tmp/test-sed-file'] | ||
4990 | 20 | ) | ||
4991 | 21 | |||
4992 | 22 | def test_sed_file(self): | ||
4993 | 23 | tmp = tempfile.NamedTemporaryFile(mode='w', delete=False) | ||
4994 | 24 | tmp.write("IPV6=yes") | ||
4995 | 25 | tmp.close() | ||
4996 | 26 | |||
4997 | 27 | files.sed(tmp.name, "IPV6=.*", "IPV6=no") | ||
4998 | 28 | |||
4999 | 29 | with open(tmp.name) as tmp: | ||
5000 | 30 | self.assertEquals(tmp.read(), "IPV6=no") |
The diff has been truncated for viewing.
lgtm