Merge lp:~saviq/charms/trusty/openstack-dashboard/simplify-settings into lp:~sdn-charmers/charms/trusty/openstack-dashboard/add-settings
- Trusty Tahr (14.04)
- simplify-settings
- Merge into add-settings
Proposed by
Michał Sawicz
Status: | Needs review |
---|---|
Proposed branch: | lp:~saviq/charms/trusty/openstack-dashboard/simplify-settings |
Merge into: | lp:~sdn-charmers/charms/trusty/openstack-dashboard/add-settings |
Diff against target: |
5131 lines (+2708/-679) 45 files modified
charm-helpers-hooks.yaml (+1/-0) config.yaml (+25/-18) hooks/charmhelpers/cli/__init__.py (+191/-0) hooks/charmhelpers/cli/benchmark.py (+36/-0) hooks/charmhelpers/cli/commands.py (+32/-0) hooks/charmhelpers/cli/hookenv.py (+23/-0) hooks/charmhelpers/cli/host.py (+31/-0) hooks/charmhelpers/cli/unitdata.py (+39/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+47/-3) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+43/-6) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51) hooks/charmhelpers/contrib/openstack/context.py (+69/-48) hooks/charmhelpers/contrib/openstack/ip.py (+49/-44) hooks/charmhelpers/contrib/openstack/neutron.py (+40/-20) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-6) hooks/charmhelpers/contrib/openstack/templating.py (+13/-98) hooks/charmhelpers/contrib/openstack/utils.py (+139/-38) hooks/charmhelpers/contrib/python/packages.py (+30/-5) hooks/charmhelpers/contrib/storage/linux/ceph.py (+6/-6) hooks/charmhelpers/contrib/storage/linux/utils.py (+4/-3) hooks/charmhelpers/core/files.py (+45/-0) hooks/charmhelpers/core/hookenv.py (+331/-42) hooks/charmhelpers/core/host.py (+119/-17) hooks/charmhelpers/core/hugepage.py (+62/-0) hooks/charmhelpers/core/services/base.py (+43/-19) hooks/charmhelpers/core/services/helpers.py (+18/-2) hooks/charmhelpers/core/unitdata.py (+61/-17) hooks/charmhelpers/fetch/__init__.py (+32/-15) hooks/charmhelpers/fetch/archiveurl.py (+7/-1) hooks/charmhelpers/fetch/giturl.py (+8/-6) hooks/horizon_contexts.py (+0/-26) hooks/horizon_hooks.py (+2/-3) hooks/horizon_utils.py (+40/-31) templates/git/dashboard.conf (+1/-0) templates/icehouse/_{}_juju_{}.py (+0/-2) tests/00-setup (+1/-0) tests/018-basic-utopic-juno (+0/-9) tests/052-basic-trusty-kilo-git (+12/-0) tests/basic_deployment.py (+52/-15) tests/charmhelpers/contrib/amulet/utils.py (+282/-9) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+43/-6) tests/charmhelpers/contrib/openstack/amulet/utils.py (+361/-51) unit_tests/test_horizon_contexts.py (+0/-28) unit_tests/test_horizon_hooks.py (+2/-2) unit_tests/test_horizon_utils.py (+1/-32) |
To merge this branch: | bzr merge lp:~saviq/charms/trusty/openstack-dashboard/simplify-settings |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
SDN Charmers | Pending | ||
Review via email: mp+269339@code.launchpad.net |
Commit message
Let plugin subordinates handle the plugin files themselves
Description of the change
To post a comment you must log in.
- 83. By Michał Sawicz
-
Drop empty line
- 84. By Michał Sawicz
-
Merge next
Unmerged revisions
- 84. By Michał Sawicz
-
Merge next
- 83. By Michał Sawicz
-
Drop empty line
- 82. By Michał Sawicz
-
Undo test changes
- 81. By Michał Sawicz
-
Revert support for plugin files, let subordinates write those themselves
- 80. By Michał Sawicz
-
Revert charmhelpers' pattern support
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'charm-helpers-hooks.yaml' |
2 | --- charm-helpers-hooks.yaml 2015-04-07 13:58:41 +0000 |
3 | +++ charm-helpers-hooks.yaml 2015-08-27 15:02:41 +0000 |
4 | @@ -2,6 +2,7 @@ |
5 | destination: hooks/charmhelpers |
6 | include: |
7 | - core |
8 | + - cli |
9 | - fetch |
10 | - contrib.openstack|inc=* |
11 | - contrib.storage.linux |
12 | |
13 | === modified file 'config.yaml' |
14 | --- config.yaml 2015-04-13 15:34:13 +0000 |
15 | +++ config.yaml 2015-08-27 15:02:41 +0000 |
16 | @@ -7,9 +7,7 @@ |
17 | type: boolean |
18 | default: False |
19 | description: | |
20 | - By default, all services will log into their corresponding log |
21 | - files. Setting this to True will force all services to log to the |
22 | - syslog. |
23 | + Setting this to True will allow supporting services to log to syslog. |
24 | openstack-origin: |
25 | default: distro |
26 | type: string |
27 | @@ -18,17 +16,27 @@ |
28 | distro (default), ppa:somecustom/ppa, a deb url sources entry, |
29 | or a supported Cloud Archive release pocket. |
30 | |
31 | - Supported Cloud Archive sources include: cloud:precise-folsom, |
32 | - cloud:precise-folsom/updates, cloud:precise-folsom/staging, |
33 | - cloud:precise-folsom/proposed. |
34 | - |
35 | - Note that updating this setting to a source that is known to |
36 | - provide a later version of OpenStack will trigger a software |
37 | - upgrade. |
38 | - |
39 | - Note that when openstack-origin-git is specified, openstack |
40 | - specific packages will be installed from source rather than |
41 | - from the openstack-origin repository. |
42 | + Supported Cloud Archive sources include: |
43 | + |
44 | + cloud:<series>-<openstack-release> |
45 | + cloud:<series>-<openstack-release>/updates |
46 | + cloud:<series>-<openstack-release>/staging |
47 | + cloud:<series>-<openstack-release>/proposed |
48 | + |
49 | + For series=Precise we support cloud archives for openstack-release: |
50 | + * icehouse |
51 | + |
52 | + For series=Trusty we support cloud archives for openstack-release: |
53 | + * juno |
54 | + * kilo |
55 | + * ... |
56 | + |
57 | + NOTE: updating this setting to a source that is known to provide |
58 | + a later version of OpenStack will trigger a software upgrade. |
59 | + |
60 | + NOTE: when openstack-origin-git is specified, openstack specific |
61 | + packages will be installed from source rather than from the |
62 | + openstack-origin repository. |
63 | openstack-origin-git: |
64 | default: |
65 | type: string |
66 | @@ -88,10 +96,9 @@ |
67 | default: |
68 | description: | |
69 | Base64-encoded SSL certificate to install and use for Horizon. |
70 | - . |
71 | - juju set openstack-dashbaord ssl_cert="$(cat cert| base64)" \ |
72 | - ssl_key="$(cat key| base64)" |
73 | - . |
74 | + |
75 | + juju set openstack-dashbaord ssl_cert="$(cat cert| base64)" \ |
76 | + ssl_key="$(cat key| base64)" |
77 | ssl_key: |
78 | type: string |
79 | default: |
80 | |
81 | === added directory 'hooks/charmhelpers/cli' |
82 | === added file 'hooks/charmhelpers/cli/__init__.py' |
83 | --- hooks/charmhelpers/cli/__init__.py 1970-01-01 00:00:00 +0000 |
84 | +++ hooks/charmhelpers/cli/__init__.py 2015-08-27 15:02:41 +0000 |
85 | @@ -0,0 +1,191 @@ |
86 | +# Copyright 2014-2015 Canonical Limited. |
87 | +# |
88 | +# This file is part of charm-helpers. |
89 | +# |
90 | +# charm-helpers is free software: you can redistribute it and/or modify |
91 | +# it under the terms of the GNU Lesser General Public License version 3 as |
92 | +# published by the Free Software Foundation. |
93 | +# |
94 | +# charm-helpers is distributed in the hope that it will be useful, |
95 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
96 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
97 | +# GNU Lesser General Public License for more details. |
98 | +# |
99 | +# You should have received a copy of the GNU Lesser General Public License |
100 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
101 | + |
102 | +import inspect |
103 | +import argparse |
104 | +import sys |
105 | + |
106 | +from six.moves import zip |
107 | + |
108 | +from charmhelpers.core import unitdata |
109 | + |
110 | + |
111 | +class OutputFormatter(object): |
112 | + def __init__(self, outfile=sys.stdout): |
113 | + self.formats = ( |
114 | + "raw", |
115 | + "json", |
116 | + "py", |
117 | + "yaml", |
118 | + "csv", |
119 | + "tab", |
120 | + ) |
121 | + self.outfile = outfile |
122 | + |
123 | + def add_arguments(self, argument_parser): |
124 | + formatgroup = argument_parser.add_mutually_exclusive_group() |
125 | + choices = self.supported_formats |
126 | + formatgroup.add_argument("--format", metavar='FMT', |
127 | + help="Select output format for returned data, " |
128 | + "where FMT is one of: {}".format(choices), |
129 | + choices=choices, default='raw') |
130 | + for fmt in self.formats: |
131 | + fmtfunc = getattr(self, fmt) |
132 | + formatgroup.add_argument("-{}".format(fmt[0]), |
133 | + "--{}".format(fmt), action='store_const', |
134 | + const=fmt, dest='format', |
135 | + help=fmtfunc.__doc__) |
136 | + |
137 | + @property |
138 | + def supported_formats(self): |
139 | + return self.formats |
140 | + |
141 | + def raw(self, output): |
142 | + """Output data as raw string (default)""" |
143 | + if isinstance(output, (list, tuple)): |
144 | + output = '\n'.join(map(str, output)) |
145 | + self.outfile.write(str(output)) |
146 | + |
147 | + def py(self, output): |
148 | + """Output data as a nicely-formatted python data structure""" |
149 | + import pprint |
150 | + pprint.pprint(output, stream=self.outfile) |
151 | + |
152 | + def json(self, output): |
153 | + """Output data in JSON format""" |
154 | + import json |
155 | + json.dump(output, self.outfile) |
156 | + |
157 | + def yaml(self, output): |
158 | + """Output data in YAML format""" |
159 | + import yaml |
160 | + yaml.safe_dump(output, self.outfile) |
161 | + |
162 | + def csv(self, output): |
163 | + """Output data as excel-compatible CSV""" |
164 | + import csv |
165 | + csvwriter = csv.writer(self.outfile) |
166 | + csvwriter.writerows(output) |
167 | + |
168 | + def tab(self, output): |
169 | + """Output data in excel-compatible tab-delimited format""" |
170 | + import csv |
171 | + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) |
172 | + csvwriter.writerows(output) |
173 | + |
174 | + def format_output(self, output, fmt='raw'): |
175 | + fmtfunc = getattr(self, fmt) |
176 | + fmtfunc(output) |
177 | + |
178 | + |
179 | +class CommandLine(object): |
180 | + argument_parser = None |
181 | + subparsers = None |
182 | + formatter = None |
183 | + exit_code = 0 |
184 | + |
185 | + def __init__(self): |
186 | + if not self.argument_parser: |
187 | + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') |
188 | + if not self.formatter: |
189 | + self.formatter = OutputFormatter() |
190 | + self.formatter.add_arguments(self.argument_parser) |
191 | + if not self.subparsers: |
192 | + self.subparsers = self.argument_parser.add_subparsers(help='Commands') |
193 | + |
194 | + def subcommand(self, command_name=None): |
195 | + """ |
196 | + Decorate a function as a subcommand. Use its arguments as the |
197 | + command-line arguments""" |
198 | + def wrapper(decorated): |
199 | + cmd_name = command_name or decorated.__name__ |
200 | + subparser = self.subparsers.add_parser(cmd_name, |
201 | + description=decorated.__doc__) |
202 | + for args, kwargs in describe_arguments(decorated): |
203 | + subparser.add_argument(*args, **kwargs) |
204 | + subparser.set_defaults(func=decorated) |
205 | + return decorated |
206 | + return wrapper |
207 | + |
208 | + def test_command(self, decorated): |
209 | + """ |
210 | + Subcommand is a boolean test function, so bool return values should be |
211 | + converted to a 0/1 exit code. |
212 | + """ |
213 | + decorated._cli_test_command = True |
214 | + return decorated |
215 | + |
216 | + def no_output(self, decorated): |
217 | + """ |
218 | + Subcommand is not expected to return a value, so don't print a spurious None. |
219 | + """ |
220 | + decorated._cli_no_output = True |
221 | + return decorated |
222 | + |
223 | + def subcommand_builder(self, command_name, description=None): |
224 | + """ |
225 | + Decorate a function that builds a subcommand. Builders should accept a |
226 | + single argument (the subparser instance) and return the function to be |
227 | + run as the command.""" |
228 | + def wrapper(decorated): |
229 | + subparser = self.subparsers.add_parser(command_name) |
230 | + func = decorated(subparser) |
231 | + subparser.set_defaults(func=func) |
232 | + subparser.description = description or func.__doc__ |
233 | + return wrapper |
234 | + |
235 | + def run(self): |
236 | + "Run cli, processing arguments and executing subcommands." |
237 | + arguments = self.argument_parser.parse_args() |
238 | + argspec = inspect.getargspec(arguments.func) |
239 | + vargs = [] |
240 | + for arg in argspec.args: |
241 | + vargs.append(getattr(arguments, arg)) |
242 | + if argspec.varargs: |
243 | + vargs.extend(getattr(arguments, argspec.varargs)) |
244 | + output = arguments.func(*vargs) |
245 | + if getattr(arguments.func, '_cli_test_command', False): |
246 | + self.exit_code = 0 if output else 1 |
247 | + output = '' |
248 | + if getattr(arguments.func, '_cli_no_output', False): |
249 | + output = '' |
250 | + self.formatter.format_output(output, arguments.format) |
251 | + if unitdata._KV: |
252 | + unitdata._KV.flush() |
253 | + |
254 | + |
255 | +cmdline = CommandLine() |
256 | + |
257 | + |
258 | +def describe_arguments(func): |
259 | + """ |
260 | + Analyze a function's signature and return a data structure suitable for |
261 | + passing in as arguments to an argparse parser's add_argument() method.""" |
262 | + |
263 | + argspec = inspect.getargspec(func) |
264 | + # we should probably raise an exception somewhere if func includes **kwargs |
265 | + if argspec.defaults: |
266 | + positional_args = argspec.args[:-len(argspec.defaults)] |
267 | + keyword_names = argspec.args[-len(argspec.defaults):] |
268 | + for arg, default in zip(keyword_names, argspec.defaults): |
269 | + yield ('--{}'.format(arg),), {'default': default} |
270 | + else: |
271 | + positional_args = argspec.args |
272 | + |
273 | + for arg in positional_args: |
274 | + yield (arg,), {} |
275 | + if argspec.varargs: |
276 | + yield (argspec.varargs,), {'nargs': '*'} |
277 | |
278 | === added file 'hooks/charmhelpers/cli/benchmark.py' |
279 | --- hooks/charmhelpers/cli/benchmark.py 1970-01-01 00:00:00 +0000 |
280 | +++ hooks/charmhelpers/cli/benchmark.py 2015-08-27 15:02:41 +0000 |
281 | @@ -0,0 +1,36 @@ |
282 | +# Copyright 2014-2015 Canonical Limited. |
283 | +# |
284 | +# This file is part of charm-helpers. |
285 | +# |
286 | +# charm-helpers is free software: you can redistribute it and/or modify |
287 | +# it under the terms of the GNU Lesser General Public License version 3 as |
288 | +# published by the Free Software Foundation. |
289 | +# |
290 | +# charm-helpers is distributed in the hope that it will be useful, |
291 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
292 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
293 | +# GNU Lesser General Public License for more details. |
294 | +# |
295 | +# You should have received a copy of the GNU Lesser General Public License |
296 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
297 | + |
298 | +from . import cmdline |
299 | +from charmhelpers.contrib.benchmark import Benchmark |
300 | + |
301 | + |
302 | +@cmdline.subcommand(command_name='benchmark-start') |
303 | +def start(): |
304 | + Benchmark.start() |
305 | + |
306 | + |
307 | +@cmdline.subcommand(command_name='benchmark-finish') |
308 | +def finish(): |
309 | + Benchmark.finish() |
310 | + |
311 | + |
312 | +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") |
313 | +def service(subparser): |
314 | + subparser.add_argument("value", help="The composite score.") |
315 | + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") |
316 | + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") |
317 | + return Benchmark.set_composite_score |
318 | |
319 | === added file 'hooks/charmhelpers/cli/commands.py' |
320 | --- hooks/charmhelpers/cli/commands.py 1970-01-01 00:00:00 +0000 |
321 | +++ hooks/charmhelpers/cli/commands.py 2015-08-27 15:02:41 +0000 |
322 | @@ -0,0 +1,32 @@ |
323 | +# Copyright 2014-2015 Canonical Limited. |
324 | +# |
325 | +# This file is part of charm-helpers. |
326 | +# |
327 | +# charm-helpers is free software: you can redistribute it and/or modify |
328 | +# it under the terms of the GNU Lesser General Public License version 3 as |
329 | +# published by the Free Software Foundation. |
330 | +# |
331 | +# charm-helpers is distributed in the hope that it will be useful, |
332 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
333 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
334 | +# GNU Lesser General Public License for more details. |
335 | +# |
336 | +# You should have received a copy of the GNU Lesser General Public License |
337 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
338 | + |
339 | +""" |
340 | +This module loads sub-modules into the python runtime so they can be |
341 | +discovered via the inspect module. In order to prevent flake8 from (rightfully) |
342 | +telling us these are unused modules, throw a ' # noqa' at the end of each import |
343 | +so that the warning is suppressed. |
344 | +""" |
345 | + |
346 | +from . import CommandLine # noqa |
347 | + |
348 | +""" |
349 | +Import the sub-modules which have decorated subcommands to register with chlp. |
350 | +""" |
351 | +from . import host # noqa |
352 | +from . import benchmark # noqa |
353 | +from . import unitdata # noqa |
354 | +from . import hookenv # noqa |
355 | |
356 | === added file 'hooks/charmhelpers/cli/hookenv.py' |
357 | --- hooks/charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000 |
358 | +++ hooks/charmhelpers/cli/hookenv.py 2015-08-27 15:02:41 +0000 |
359 | @@ -0,0 +1,23 @@ |
360 | +# Copyright 2014-2015 Canonical Limited. |
361 | +# |
362 | +# This file is part of charm-helpers. |
363 | +# |
364 | +# charm-helpers is free software: you can redistribute it and/or modify |
365 | +# it under the terms of the GNU Lesser General Public License version 3 as |
366 | +# published by the Free Software Foundation. |
367 | +# |
368 | +# charm-helpers is distributed in the hope that it will be useful, |
369 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
370 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
371 | +# GNU Lesser General Public License for more details. |
372 | +# |
373 | +# You should have received a copy of the GNU Lesser General Public License |
374 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
375 | + |
376 | +from . import cmdline |
377 | +from charmhelpers.core import hookenv |
378 | + |
379 | + |
380 | +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) |
381 | +cmdline.subcommand('service-name')(hookenv.service_name) |
382 | +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) |
383 | |
384 | === added file 'hooks/charmhelpers/cli/host.py' |
385 | --- hooks/charmhelpers/cli/host.py 1970-01-01 00:00:00 +0000 |
386 | +++ hooks/charmhelpers/cli/host.py 2015-08-27 15:02:41 +0000 |
387 | @@ -0,0 +1,31 @@ |
388 | +# Copyright 2014-2015 Canonical Limited. |
389 | +# |
390 | +# This file is part of charm-helpers. |
391 | +# |
392 | +# charm-helpers is free software: you can redistribute it and/or modify |
393 | +# it under the terms of the GNU Lesser General Public License version 3 as |
394 | +# published by the Free Software Foundation. |
395 | +# |
396 | +# charm-helpers is distributed in the hope that it will be useful, |
397 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
398 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
399 | +# GNU Lesser General Public License for more details. |
400 | +# |
401 | +# You should have received a copy of the GNU Lesser General Public License |
402 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
403 | + |
404 | +from . import cmdline |
405 | +from charmhelpers.core import host |
406 | + |
407 | + |
408 | +@cmdline.subcommand() |
409 | +def mounts(): |
410 | + "List mounts" |
411 | + return host.mounts() |
412 | + |
413 | + |
414 | +@cmdline.subcommand_builder('service', description="Control system services") |
415 | +def service(subparser): |
416 | + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") |
417 | + subparser.add_argument("service_name", help="Name of the service to control") |
418 | + return host.service |
419 | |
420 | === added file 'hooks/charmhelpers/cli/unitdata.py' |
421 | --- hooks/charmhelpers/cli/unitdata.py 1970-01-01 00:00:00 +0000 |
422 | +++ hooks/charmhelpers/cli/unitdata.py 2015-08-27 15:02:41 +0000 |
423 | @@ -0,0 +1,39 @@ |
424 | +# Copyright 2014-2015 Canonical Limited. |
425 | +# |
426 | +# This file is part of charm-helpers. |
427 | +# |
428 | +# charm-helpers is free software: you can redistribute it and/or modify |
429 | +# it under the terms of the GNU Lesser General Public License version 3 as |
430 | +# published by the Free Software Foundation. |
431 | +# |
432 | +# charm-helpers is distributed in the hope that it will be useful, |
433 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
434 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
435 | +# GNU Lesser General Public License for more details. |
436 | +# |
437 | +# You should have received a copy of the GNU Lesser General Public License |
438 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
439 | + |
440 | +from . import cmdline |
441 | +from charmhelpers.core import unitdata |
442 | + |
443 | + |
444 | +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") |
445 | +def unitdata_cmd(subparser): |
446 | + nested = subparser.add_subparsers() |
447 | + get_cmd = nested.add_parser('get', help='Retrieve data') |
448 | + get_cmd.add_argument('key', help='Key to retrieve the value of') |
449 | + get_cmd.set_defaults(action='get', value=None) |
450 | + set_cmd = nested.add_parser('set', help='Store data') |
451 | + set_cmd.add_argument('key', help='Key to set') |
452 | + set_cmd.add_argument('value', help='Value to store') |
453 | + set_cmd.set_defaults(action='set') |
454 | + |
455 | + def _unitdata_cmd(action, key, value): |
456 | + if action == 'get': |
457 | + return unitdata.kv().get(key) |
458 | + elif action == 'set': |
459 | + unitdata.kv().set(key, value) |
460 | + unitdata.kv().flush() |
461 | + return '' |
462 | + return _unitdata_cmd |
463 | |
464 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
465 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-02-26 10:11:26 +0000 |
466 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-08-27 15:02:41 +0000 |
467 | @@ -44,6 +44,7 @@ |
468 | ERROR, |
469 | WARNING, |
470 | unit_get, |
471 | + is_leader as juju_is_leader |
472 | ) |
473 | from charmhelpers.core.decorators import ( |
474 | retry_on_exception, |
475 | @@ -52,6 +53,8 @@ |
476 | bool_from_string, |
477 | ) |
478 | |
479 | +DC_RESOURCE_NAME = 'DC' |
480 | + |
481 | |
482 | class HAIncompleteConfig(Exception): |
483 | pass |
484 | @@ -61,17 +64,30 @@ |
485 | pass |
486 | |
487 | |
488 | +class CRMDCNotFound(Exception): |
489 | + pass |
490 | + |
491 | + |
492 | def is_elected_leader(resource): |
493 | """ |
494 | Returns True if the charm executing this is the elected cluster leader. |
495 | |
496 | It relies on two mechanisms to determine leadership: |
497 | - 1. If the charm is part of a corosync cluster, call corosync to |
498 | + 1. If juju is sufficiently new and leadership election is supported, |
499 | + the is_leader command will be used. |
500 | + 2. If the charm is part of a corosync cluster, call corosync to |
501 | determine leadership. |
502 | - 2. If the charm is not part of a corosync cluster, the leader is |
503 | + 3. If the charm is not part of a corosync cluster, the leader is |
504 | determined as being "the alive unit with the lowest unit numer". In |
505 | other words, the oldest surviving unit. |
506 | """ |
507 | + try: |
508 | + return juju_is_leader() |
509 | + except NotImplementedError: |
510 | + log('Juju leadership election feature not enabled' |
511 | + ', using fallback support', |
512 | + level=WARNING) |
513 | + |
514 | if is_clustered(): |
515 | if not is_crm_leader(resource): |
516 | log('Deferring action to CRM leader.', level=INFO) |
517 | @@ -95,7 +111,33 @@ |
518 | return False |
519 | |
520 | |
521 | -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) |
522 | +def is_crm_dc(): |
523 | + """ |
524 | + Determine leadership by querying the pacemaker Designated Controller |
525 | + """ |
526 | + cmd = ['crm', 'status'] |
527 | + try: |
528 | + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
529 | + if not isinstance(status, six.text_type): |
530 | + status = six.text_type(status, "utf-8") |
531 | + except subprocess.CalledProcessError as ex: |
532 | + raise CRMDCNotFound(str(ex)) |
533 | + |
534 | + current_dc = '' |
535 | + for line in status.split('\n'): |
536 | + if line.startswith('Current DC'): |
537 | + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum |
538 | + current_dc = line.split(':')[1].split()[0] |
539 | + if current_dc == get_unit_hostname(): |
540 | + return True |
541 | + elif current_dc == 'NONE': |
542 | + raise CRMDCNotFound('Current DC: NONE') |
543 | + |
544 | + return False |
545 | + |
546 | + |
547 | +@retry_on_exception(5, base_delay=2, |
548 | + exc_type=(CRMResourceNotFound, CRMDCNotFound)) |
549 | def is_crm_leader(resource, retry=False): |
550 | """ |
551 | Returns True if the charm calling this is the elected corosync leader, |
552 | @@ -104,6 +146,8 @@ |
553 | We allow this operation to be retried to avoid the possibility of getting a |
554 | false negative. See LP #1396246 for more info. |
555 | """ |
556 | + if resource == DC_RESOURCE_NAME: |
557 | + return is_crm_dc() |
558 | cmd = ['crm', 'resource', 'show', resource] |
559 | try: |
560 | status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
561 | |
562 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
563 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:53:21 +0000 |
564 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-27 15:02:41 +0000 |
565 | @@ -44,7 +44,7 @@ |
566 | Determine if the local branch being tested is derived from its |
567 | stable or next (dev) branch, and based on this, use the corresonding |
568 | stable or next branches for the other_services.""" |
569 | - base_charms = ['mysql', 'mongodb'] |
570 | + base_charms = ['mysql', 'mongodb', 'nrpe'] |
571 | |
572 | if self.series in ['precise', 'trusty']: |
573 | base_series = self.series |
574 | @@ -79,9 +79,9 @@ |
575 | services.append(this_service) |
576 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
577 | 'ceph-osd', 'ceph-radosgw'] |
578 | - # Openstack subordinate charms do not expose an origin option as that |
579 | - # is controlled by the principle |
580 | - ignore = ['neutron-openvswitch'] |
581 | + # Most OpenStack subordinate charms do not expose an origin option |
582 | + # as that is controlled by the principle. |
583 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
584 | |
585 | if self.openstack: |
586 | for svc in services: |
587 | @@ -110,7 +110,8 @@ |
588 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
589 | self.precise_havana, self.precise_icehouse, |
590 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
591 | - self.trusty_kilo, self.vivid_kilo) = range(10) |
592 | + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
593 | + self.wily_liberty) = range(12) |
594 | |
595 | releases = { |
596 | ('precise', None): self.precise_essex, |
597 | @@ -121,8 +122,10 @@ |
598 | ('trusty', None): self.trusty_icehouse, |
599 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
600 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
601 | + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
602 | ('utopic', None): self.utopic_juno, |
603 | - ('vivid', None): self.vivid_kilo} |
604 | + ('vivid', None): self.vivid_kilo, |
605 | + ('wily', None): self.wily_liberty} |
606 | return releases[(self.series, self.openstack)] |
607 | |
608 | def _get_openstack_release_string(self): |
609 | @@ -138,9 +141,43 @@ |
610 | ('trusty', 'icehouse'), |
611 | ('utopic', 'juno'), |
612 | ('vivid', 'kilo'), |
613 | + ('wily', 'liberty'), |
614 | ]) |
615 | if self.openstack: |
616 | os_origin = self.openstack.split(':')[1] |
617 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
618 | else: |
619 | return releases[self.series] |
620 | + |
621 | + def get_ceph_expected_pools(self, radosgw=False): |
622 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
623 | + test scenario, based on OpenStack release and whether ceph radosgw |
624 | + is flagged as present or not.""" |
625 | + |
626 | + if self._get_openstack_release() >= self.trusty_kilo: |
627 | + # Kilo or later |
628 | + pools = [ |
629 | + 'rbd', |
630 | + 'cinder', |
631 | + 'glance' |
632 | + ] |
633 | + else: |
634 | + # Juno or earlier |
635 | + pools = [ |
636 | + 'data', |
637 | + 'metadata', |
638 | + 'rbd', |
639 | + 'cinder', |
640 | + 'glance' |
641 | + ] |
642 | + |
643 | + if radosgw: |
644 | + pools.extend([ |
645 | + '.rgw.root', |
646 | + '.rgw.control', |
647 | + '.rgw', |
648 | + '.rgw.gc', |
649 | + '.users.uid' |
650 | + ]) |
651 | + |
652 | + return pools |
653 | |
654 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' |
655 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-01-26 09:46:38 +0000 |
656 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-08-27 15:02:41 +0000 |
657 | @@ -14,16 +14,20 @@ |
658 | # You should have received a copy of the GNU Lesser General Public License |
659 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
660 | |
661 | +import amulet |
662 | +import json |
663 | import logging |
664 | import os |
665 | +import six |
666 | import time |
667 | import urllib |
668 | |
669 | +import cinderclient.v1.client as cinder_client |
670 | import glanceclient.v1.client as glance_client |
671 | +import heatclient.v1.client as heat_client |
672 | import keystoneclient.v2_0 as keystone_client |
673 | import novaclient.v1_1.client as nova_client |
674 | - |
675 | -import six |
676 | +import swiftclient |
677 | |
678 | from charmhelpers.contrib.amulet.utils import ( |
679 | AmuletUtils |
680 | @@ -37,7 +41,7 @@ |
681 | """OpenStack amulet utilities. |
682 | |
683 | This class inherits from AmuletUtils and has additional support |
684 | - that is specifically for use by OpenStack charms. |
685 | + that is specifically for use by OpenStack charm tests. |
686 | """ |
687 | |
688 | def __init__(self, log_level=ERROR): |
689 | @@ -51,6 +55,8 @@ |
690 | Validate actual endpoint data vs expected endpoint data. The ports |
691 | are used to find the matching endpoint. |
692 | """ |
693 | + self.log.debug('Validating endpoint data...') |
694 | + self.log.debug('actual: {}'.format(repr(endpoints))) |
695 | found = False |
696 | for ep in endpoints: |
697 | self.log.debug('endpoint: {}'.format(repr(ep))) |
698 | @@ -77,6 +83,7 @@ |
699 | Validate a list of actual service catalog endpoints vs a list of |
700 | expected service catalog endpoints. |
701 | """ |
702 | + self.log.debug('Validating service catalog endpoint data...') |
703 | self.log.debug('actual: {}'.format(repr(actual))) |
704 | for k, v in six.iteritems(expected): |
705 | if k in actual: |
706 | @@ -93,6 +100,7 @@ |
707 | Validate a list of actual tenant data vs list of expected tenant |
708 | data. |
709 | """ |
710 | + self.log.debug('Validating tenant data...') |
711 | self.log.debug('actual: {}'.format(repr(actual))) |
712 | for e in expected: |
713 | found = False |
714 | @@ -114,6 +122,7 @@ |
715 | Validate a list of actual role data vs a list of expected role |
716 | data. |
717 | """ |
718 | + self.log.debug('Validating role data...') |
719 | self.log.debug('actual: {}'.format(repr(actual))) |
720 | for e in expected: |
721 | found = False |
722 | @@ -134,6 +143,7 @@ |
723 | Validate a list of actual user data vs a list of expected user |
724 | data. |
725 | """ |
726 | + self.log.debug('Validating user data...') |
727 | self.log.debug('actual: {}'.format(repr(actual))) |
728 | for e in expected: |
729 | found = False |
730 | @@ -155,17 +165,30 @@ |
731 | |
732 | Validate a list of actual flavors vs a list of expected flavors. |
733 | """ |
734 | + self.log.debug('Validating flavor data...') |
735 | self.log.debug('actual: {}'.format(repr(actual))) |
736 | act = [a.name for a in actual] |
737 | return self._validate_list_data(expected, act) |
738 | |
739 | def tenant_exists(self, keystone, tenant): |
740 | """Return True if tenant exists.""" |
741 | + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
742 | return tenant in [t.name for t in keystone.tenants.list()] |
743 | |
744 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
745 | + password, tenant): |
746 | + """Authenticates admin user with cinder.""" |
747 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
748 | + service_ip = \ |
749 | + keystone_sentry.relation('shared-db', |
750 | + 'mysql:shared-db')['private-address'] |
751 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
752 | + return cinder_client.Client(username, password, tenant, ept) |
753 | + |
754 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
755 | tenant): |
756 | """Authenticates admin user with the keystone admin endpoint.""" |
757 | + self.log.debug('Authenticating keystone admin...') |
758 | unit = keystone_sentry |
759 | service_ip = unit.relation('shared-db', |
760 | 'mysql:shared-db')['private-address'] |
761 | @@ -175,6 +198,7 @@ |
762 | |
763 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
764 | """Authenticates a regular user with the keystone public endpoint.""" |
765 | + self.log.debug('Authenticating keystone user ({})...'.format(user)) |
766 | ep = keystone.service_catalog.url_for(service_type='identity', |
767 | endpoint_type='publicURL') |
768 | return keystone_client.Client(username=user, password=password, |
769 | @@ -182,19 +206,49 @@ |
770 | |
771 | def authenticate_glance_admin(self, keystone): |
772 | """Authenticates admin user with glance.""" |
773 | + self.log.debug('Authenticating glance admin...') |
774 | ep = keystone.service_catalog.url_for(service_type='image', |
775 | endpoint_type='adminURL') |
776 | return glance_client.Client(ep, token=keystone.auth_token) |
777 | |
778 | + def authenticate_heat_admin(self, keystone): |
779 | + """Authenticates the admin user with heat.""" |
780 | + self.log.debug('Authenticating heat admin...') |
781 | + ep = keystone.service_catalog.url_for(service_type='orchestration', |
782 | + endpoint_type='publicURL') |
783 | + return heat_client.Client(endpoint=ep, token=keystone.auth_token) |
784 | + |
785 | def authenticate_nova_user(self, keystone, user, password, tenant): |
786 | """Authenticates a regular user with nova-api.""" |
787 | + self.log.debug('Authenticating nova user ({})...'.format(user)) |
788 | ep = keystone.service_catalog.url_for(service_type='identity', |
789 | endpoint_type='publicURL') |
790 | return nova_client.Client(username=user, api_key=password, |
791 | project_id=tenant, auth_url=ep) |
792 | |
793 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
794 | + """Authenticates a regular user with swift api.""" |
795 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
796 | + ep = keystone.service_catalog.url_for(service_type='identity', |
797 | + endpoint_type='publicURL') |
798 | + return swiftclient.Connection(authurl=ep, |
799 | + user=user, |
800 | + key=password, |
801 | + tenant_name=tenant, |
802 | + auth_version='2.0') |
803 | + |
804 | def create_cirros_image(self, glance, image_name): |
805 | - """Download the latest cirros image and upload it to glance.""" |
806 | + """Download the latest cirros image and upload it to glance, |
807 | + validate and return a resource pointer. |
808 | + |
809 | + :param glance: pointer to authenticated glance connection |
810 | + :param image_name: display name for new image |
811 | + :returns: glance image pointer |
812 | + """ |
813 | + self.log.debug('Creating glance cirros image ' |
814 | + '({})...'.format(image_name)) |
815 | + |
816 | + # Download cirros image |
817 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
818 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
819 | if http_proxy: |
820 | @@ -203,57 +257,67 @@ |
821 | else: |
822 | opener = urllib.FancyURLopener() |
823 | |
824 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
825 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
826 | version = f.read().strip() |
827 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
828 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
829 | local_path = os.path.join('tests', cirros_img) |
830 | |
831 | if not os.path.exists(local_path): |
832 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
833 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
834 | version, cirros_img) |
835 | opener.retrieve(cirros_url, local_path) |
836 | f.close() |
837 | |
838 | + # Create glance image |
839 | with open(local_path) as f: |
840 | image = glance.images.create(name=image_name, is_public=True, |
841 | disk_format='qcow2', |
842 | container_format='bare', data=f) |
843 | - count = 1 |
844 | - status = image.status |
845 | - while status != 'active' and count < 10: |
846 | - time.sleep(3) |
847 | - image = glance.images.get(image.id) |
848 | - status = image.status |
849 | - self.log.debug('image status: {}'.format(status)) |
850 | - count += 1 |
851 | - |
852 | - if status != 'active': |
853 | - self.log.error('image creation timed out') |
854 | - return None |
855 | + |
856 | + # Wait for image to reach active status |
857 | + img_id = image.id |
858 | + ret = self.resource_reaches_status(glance.images, img_id, |
859 | + expected_stat='active', |
860 | + msg='Image status wait') |
861 | + if not ret: |
862 | + msg = 'Glance image failed to reach expected state.' |
863 | + amulet.raise_status(amulet.FAIL, msg=msg) |
864 | + |
865 | + # Re-validate new image |
866 | + self.log.debug('Validating image attributes...') |
867 | + val_img_name = glance.images.get(img_id).name |
868 | + val_img_stat = glance.images.get(img_id).status |
869 | + val_img_pub = glance.images.get(img_id).is_public |
870 | + val_img_cfmt = glance.images.get(img_id).container_format |
871 | + val_img_dfmt = glance.images.get(img_id).disk_format |
872 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
873 | + 'container fmt:{} disk fmt:{}'.format( |
874 | + val_img_name, val_img_pub, img_id, |
875 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
876 | + |
877 | + if val_img_name == image_name and val_img_stat == 'active' \ |
878 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
879 | + and val_img_dfmt == 'qcow2': |
880 | + self.log.debug(msg_attr) |
881 | + else: |
882 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
883 | + amulet.raise_status(amulet.FAIL, msg=msg) |
884 | |
885 | return image |
886 | |
887 | def delete_image(self, glance, image): |
888 | """Delete the specified image.""" |
889 | - num_before = len(list(glance.images.list())) |
890 | - glance.images.delete(image) |
891 | - |
892 | - count = 1 |
893 | - num_after = len(list(glance.images.list())) |
894 | - while num_after != (num_before - 1) and count < 10: |
895 | - time.sleep(3) |
896 | - num_after = len(list(glance.images.list())) |
897 | - self.log.debug('number of images: {}'.format(num_after)) |
898 | - count += 1 |
899 | - |
900 | - if num_after != (num_before - 1): |
901 | - self.log.error('image deletion timed out') |
902 | - return False |
903 | - |
904 | - return True |
905 | + |
906 | + # /!\ DEPRECATION WARNING |
907 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
908 | + 'delete_resource instead of delete_image.') |
909 | + self.log.debug('Deleting glance image ({})...'.format(image)) |
910 | + return self.delete_resource(glance.images, image, msg='glance image') |
911 | |
912 | def create_instance(self, nova, image_name, instance_name, flavor): |
913 | """Create the specified instance.""" |
914 | + self.log.debug('Creating instance ' |
915 | + '({}|{}|{})'.format(instance_name, image_name, flavor)) |
916 | image = nova.images.find(name=image_name) |
917 | flavor = nova.flavors.find(name=flavor) |
918 | instance = nova.servers.create(name=instance_name, image=image, |
919 | @@ -276,19 +340,265 @@ |
920 | |
921 | def delete_instance(self, nova, instance): |
922 | """Delete the specified instance.""" |
923 | - num_before = len(list(nova.servers.list())) |
924 | - nova.servers.delete(instance) |
925 | - |
926 | - count = 1 |
927 | - num_after = len(list(nova.servers.list())) |
928 | - while num_after != (num_before - 1) and count < 10: |
929 | - time.sleep(3) |
930 | - num_after = len(list(nova.servers.list())) |
931 | - self.log.debug('number of instances: {}'.format(num_after)) |
932 | - count += 1 |
933 | - |
934 | - if num_after != (num_before - 1): |
935 | - self.log.error('instance deletion timed out') |
936 | - return False |
937 | - |
938 | - return True |
939 | + |
940 | + # /!\ DEPRECATION WARNING |
941 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
942 | + 'delete_resource instead of delete_instance.') |
943 | + self.log.debug('Deleting instance ({})...'.format(instance)) |
944 | + return self.delete_resource(nova.servers, instance, |
945 | + msg='nova instance') |
946 | + |
947 | + def create_or_get_keypair(self, nova, keypair_name="testkey"): |
948 | + """Create a new keypair, or return pointer if it already exists.""" |
949 | + try: |
950 | + _keypair = nova.keypairs.get(keypair_name) |
951 | + self.log.debug('Keypair ({}) already exists, ' |
952 | + 'using it.'.format(keypair_name)) |
953 | + return _keypair |
954 | + except: |
955 | + self.log.debug('Keypair ({}) does not exist, ' |
956 | + 'creating it.'.format(keypair_name)) |
957 | + |
958 | + _keypair = nova.keypairs.create(name=keypair_name) |
959 | + return _keypair |
960 | + |
961 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
962 | + img_id=None, src_vol_id=None, snap_id=None): |
963 | + """Create cinder volume, optionally from a glance image, OR |
964 | + optionally as a clone of an existing volume, OR optionally |
965 | + from a snapshot. Wait for the new volume status to reach |
966 | + the expected status, validate and return a resource pointer. |
967 | + |
968 | + :param vol_name: cinder volume display name |
969 | + :param vol_size: size in gigabytes |
970 | + :param img_id: optional glance image id |
971 | + :param src_vol_id: optional source volume id to clone |
972 | + :param snap_id: optional snapshot id to use |
973 | + :returns: cinder volume pointer |
974 | + """ |
975 | + # Handle parameter input and avoid impossible combinations |
976 | + if img_id and not src_vol_id and not snap_id: |
977 | + # Create volume from image |
978 | + self.log.debug('Creating cinder volume from glance image...') |
979 | + bootable = 'true' |
980 | + elif src_vol_id and not img_id and not snap_id: |
981 | + # Clone an existing volume |
982 | + self.log.debug('Cloning cinder volume...') |
983 | + bootable = cinder.volumes.get(src_vol_id).bootable |
984 | + elif snap_id and not src_vol_id and not img_id: |
985 | + # Create volume from snapshot |
986 | + self.log.debug('Creating cinder volume from snapshot...') |
987 | + snap = cinder.volume_snapshots.find(id=snap_id) |
988 | + vol_size = snap.size |
989 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
990 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
991 | + elif not img_id and not src_vol_id and not snap_id: |
992 | + # Create volume |
993 | + self.log.debug('Creating cinder volume...') |
994 | + bootable = 'false' |
995 | + else: |
996 | + # Impossible combination of parameters |
997 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
998 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
999 | + img_id, src_vol_id, |
1000 | + snap_id)) |
1001 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1002 | + |
1003 | + # Create new volume |
1004 | + try: |
1005 | + vol_new = cinder.volumes.create(display_name=vol_name, |
1006 | + imageRef=img_id, |
1007 | + size=vol_size, |
1008 | + source_volid=src_vol_id, |
1009 | + snapshot_id=snap_id) |
1010 | + vol_id = vol_new.id |
1011 | + except Exception as e: |
1012 | + msg = 'Failed to create volume: {}'.format(e) |
1013 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1014 | + |
1015 | + # Wait for volume to reach available status |
1016 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
1017 | + expected_stat="available", |
1018 | + msg="Volume status wait") |
1019 | + if not ret: |
1020 | + msg = 'Cinder volume failed to reach expected state.' |
1021 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1022 | + |
1023 | + # Re-validate new volume |
1024 | + self.log.debug('Validating volume attributes...') |
1025 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
1026 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
1027 | + val_vol_stat = cinder.volumes.get(vol_id).status |
1028 | + val_vol_size = cinder.volumes.get(vol_id).size |
1029 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
1030 | + '{} size:{}'.format(val_vol_name, vol_id, |
1031 | + val_vol_stat, val_vol_boot, |
1032 | + val_vol_size)) |
1033 | + |
1034 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
1035 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
1036 | + self.log.debug(msg_attr) |
1037 | + else: |
1038 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
1039 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1040 | + |
1041 | + return vol_new |
1042 | + |
1043 | + def delete_resource(self, resource, resource_id, |
1044 | + msg="resource", max_wait=120): |
1045 | + """Delete one openstack resource, such as one instance, keypair, |
1046 | + image, volume, stack, etc., and confirm deletion within max wait time. |
1047 | + |
1048 | + :param resource: pointer to os resource type, ex:glance_client.images |
1049 | + :param resource_id: unique name or id for the openstack resource |
1050 | + :param msg: text to identify purpose in logging |
1051 | + :param max_wait: maximum wait time in seconds |
1052 | + :returns: True if successful, otherwise False |
1053 | + """ |
1054 | + self.log.debug('Deleting OpenStack resource ' |
1055 | + '{} ({})'.format(resource_id, msg)) |
1056 | + num_before = len(list(resource.list())) |
1057 | + resource.delete(resource_id) |
1058 | + |
1059 | + tries = 0 |
1060 | + num_after = len(list(resource.list())) |
1061 | + while num_after != (num_before - 1) and tries < (max_wait / 4): |
1062 | + self.log.debug('{} delete check: ' |
1063 | + '{} [{}:{}] {}'.format(msg, tries, |
1064 | + num_before, |
1065 | + num_after, |
1066 | + resource_id)) |
1067 | + time.sleep(4) |
1068 | + num_after = len(list(resource.list())) |
1069 | + tries += 1 |
1070 | + |
1071 | + self.log.debug('{}: expected, actual count = {}, ' |
1072 | + '{}'.format(msg, num_before - 1, num_after)) |
1073 | + |
1074 | + if num_after == (num_before - 1): |
1075 | + return True |
1076 | + else: |
1077 | + self.log.error('{} delete timed out'.format(msg)) |
1078 | + return False |
1079 | + |
1080 | + def resource_reaches_status(self, resource, resource_id, |
1081 | + expected_stat='available', |
1082 | + msg='resource', max_wait=120): |
1083 | + """Wait for an openstack resources status to reach an |
1084 | + expected status within a specified time. Useful to confirm that |
1085 | + nova instances, cinder vols, snapshots, glance images, heat stacks |
1086 | + and other resources eventually reach the expected status. |
1087 | + |
1088 | + :param resource: pointer to os resource type, ex: heat_client.stacks |
1089 | + :param resource_id: unique id for the openstack resource |
1090 | + :param expected_stat: status to expect resource to reach |
1091 | + :param msg: text to identify purpose in logging |
1092 | + :param max_wait: maximum wait time in seconds |
1093 | + :returns: True if successful, False if status is not reached |
1094 | + """ |
1095 | + |
1096 | + tries = 0 |
1097 | + resource_stat = resource.get(resource_id).status |
1098 | + while resource_stat != expected_stat and tries < (max_wait / 4): |
1099 | + self.log.debug('{} status check: ' |
1100 | + '{} [{}:{}] {}'.format(msg, tries, |
1101 | + resource_stat, |
1102 | + expected_stat, |
1103 | + resource_id)) |
1104 | + time.sleep(4) |
1105 | + resource_stat = resource.get(resource_id).status |
1106 | + tries += 1 |
1107 | + |
1108 | + self.log.debug('{}: expected, actual status = {}, ' |
1109 | + '{}'.format(msg, resource_stat, expected_stat)) |
1110 | + |
1111 | + if resource_stat == expected_stat: |
1112 | + return True |
1113 | + else: |
1114 | + self.log.debug('{} never reached expected status: ' |
1115 | + '{}'.format(resource_id, expected_stat)) |
1116 | + return False |
1117 | + |
1118 | + def get_ceph_osd_id_cmd(self, index): |
1119 | + """Produce a shell command that will return a ceph-osd id.""" |
1120 | + return ("`initctl list | grep 'ceph-osd ' | " |
1121 | + "awk 'NR=={} {{ print $2 }}' | " |
1122 | + "grep -o '[0-9]*'`".format(index + 1)) |
1123 | + |
1124 | + def get_ceph_pools(self, sentry_unit): |
1125 | + """Return a dict of ceph pools from a single ceph unit, with |
1126 | + pool name as keys, pool id as vals.""" |
1127 | + pools = {} |
1128 | + cmd = 'sudo ceph osd lspools' |
1129 | + output, code = sentry_unit.run(cmd) |
1130 | + if code != 0: |
1131 | + msg = ('{} `{}` returned {} ' |
1132 | + '{}'.format(sentry_unit.info['unit_name'], |
1133 | + cmd, code, output)) |
1134 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1135 | + |
1136 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
1137 | + for pool in str(output).split(','): |
1138 | + pool_id_name = pool.split(' ') |
1139 | + if len(pool_id_name) == 2: |
1140 | + pool_id = pool_id_name[0] |
1141 | + pool_name = pool_id_name[1] |
1142 | + pools[pool_name] = int(pool_id) |
1143 | + |
1144 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
1145 | + pools)) |
1146 | + return pools |
1147 | + |
1148 | + def get_ceph_df(self, sentry_unit): |
1149 | + """Return dict of ceph df json output, including ceph pool state. |
1150 | + |
1151 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1152 | + :returns: Dict of ceph df output |
1153 | + """ |
1154 | + cmd = 'sudo ceph df --format=json' |
1155 | + output, code = sentry_unit.run(cmd) |
1156 | + if code != 0: |
1157 | + msg = ('{} `{}` returned {} ' |
1158 | + '{}'.format(sentry_unit.info['unit_name'], |
1159 | + cmd, code, output)) |
1160 | + amulet.raise_status(amulet.FAIL, msg=msg) |
1161 | + return json.loads(output) |
1162 | + |
1163 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
1164 | + """Take a sample of attributes of a ceph pool, returning ceph |
1165 | + pool name, object count and disk space used for the specified |
1166 | + pool ID number. |
1167 | + |
1168 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
1169 | + :param pool_id: Ceph pool ID |
1170 | + :returns: List of pool name, object count, kb disk space used |
1171 | + """ |
1172 | + df = self.get_ceph_df(sentry_unit) |
1173 | + pool_name = df['pools'][pool_id]['name'] |
1174 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
1175 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
1176 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
1177 | + '{} kb used'.format(pool_name, pool_id, |
1178 | + obj_count, kb_used)) |
1179 | + return pool_name, obj_count, kb_used |
1180 | + |
1181 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
1182 | + """Validate ceph pool samples taken over time, such as pool |
1183 | + object counts or pool kb used, before adding, after adding, and |
1184 | + after deleting items which affect those pool attributes. The |
1185 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
1186 | + to be less than the 2nd. |
1187 | + |
1188 | + :param samples: List containing 3 data samples |
1189 | + :param sample_type: String for logging and usage context |
1190 | + :returns: None if successful, Failure message otherwise |
1191 | + """ |
1192 | + original, created, deleted = range(3) |
1193 | + if samples[created] <= samples[original] or \ |
1194 | + samples[deleted] >= samples[created]: |
1195 | + return ('Ceph {} samples ({}) ' |
1196 | + 'unexpected.'.format(sample_type, samples)) |
1197 | + else: |
1198 | + self.log.debug('Ceph {} samples (OK): ' |
1199 | + '{}'.format(sample_type, samples)) |
1200 | + return None |
1201 | |
1202 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
1203 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-06-02 12:12:49 +0000 |
1204 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-08-27 15:02:41 +0000 |
1205 | @@ -50,6 +50,8 @@ |
1206 | from charmhelpers.core.strutils import bool_from_string |
1207 | |
1208 | from charmhelpers.core.host import ( |
1209 | + get_bond_master, |
1210 | + is_phy_iface, |
1211 | list_nics, |
1212 | get_nic_hwaddr, |
1213 | mkdir, |
1214 | @@ -122,21 +124,24 @@ |
1215 | of specifying multiple key value pairs within the same string. For |
1216 | example, a string in the format of 'key1=value1, key2=value2' will |
1217 | return a dict of: |
1218 | - {'key1': 'value1', |
1219 | - 'key2': 'value2'}. |
1220 | + |
1221 | + {'key1': 'value1', |
1222 | + 'key2': 'value2'}. |
1223 | |
1224 | 2. A string in the above format, but supporting a comma-delimited list |
1225 | of values for the same key. For example, a string in the format of |
1226 | 'key1=value1, key2=value3,value4,value5' will return a dict of: |
1227 | - {'key1', 'value1', |
1228 | - 'key2', 'value2,value3,value4'} |
1229 | + |
1230 | + {'key1', 'value1', |
1231 | + 'key2', 'value2,value3,value4'} |
1232 | |
1233 | 3. A string containing a colon character (:) prior to an equal |
1234 | character (=) will be treated as yaml and parsed as such. This can be |
1235 | used to specify more complex key value pairs. For example, |
1236 | a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
1237 | return a dict of: |
1238 | - {'key1', 'subkey1=value1, subkey2=value2'} |
1239 | + |
1240 | + {'key1', 'subkey1=value1, subkey2=value2'} |
1241 | |
1242 | The provided config_flags string may be a list of comma-separated values |
1243 | which themselves may be comma-separated list of values. |
1244 | @@ -194,15 +199,6 @@ |
1245 | raise NotImplementedError |
1246 | |
1247 | |
1248 | -class OSPatternContextGenerator(OSContextGenerator): |
1249 | - """Base class for pattern context generators. |
1250 | - |
1251 | - __call__ should return a dictionary of { tuple: dict }, where the tuple |
1252 | - will be used as input for format() for the filename pattern as registered |
1253 | - by OSConfigRenderer.register_pattern(). |
1254 | - """ |
1255 | - |
1256 | - |
1257 | class SharedDBContext(OSContextGenerator): |
1258 | interfaces = ['shared-db'] |
1259 | |
1260 | @@ -249,7 +245,7 @@ |
1261 | if self.relation_prefix: |
1262 | password_setting = self.relation_prefix + '_password' |
1263 | |
1264 | - for rid in relation_ids('shared-db'): |
1265 | + for rid in relation_ids(self.interfaces[0]): |
1266 | for unit in related_units(rid): |
1267 | rdata = relation_get(rid=rid, unit=unit) |
1268 | host = rdata.get('db_host') |
1269 | @@ -900,8 +896,6 @@ |
1270 | return ctxt |
1271 | |
1272 | def __call__(self): |
1273 | - self._ensure_packages() |
1274 | - |
1275 | if self.network_manager not in ['quantum', 'neutron']: |
1276 | return {} |
1277 | |
1278 | @@ -931,7 +925,6 @@ |
1279 | |
1280 | |
1281 | class NeutronPortContext(OSContextGenerator): |
1282 | - NIC_PREFIXES = ['eth', 'bond'] |
1283 | |
1284 | def resolve_ports(self, ports): |
1285 | """Resolve NICs not yet bound to bridge(s) |
1286 | @@ -943,7 +936,18 @@ |
1287 | |
1288 | hwaddr_to_nic = {} |
1289 | hwaddr_to_ip = {} |
1290 | - for nic in list_nics(self.NIC_PREFIXES): |
1291 | + for nic in list_nics(): |
1292 | + # Ignore virtual interfaces (bond masters will be identified from |
1293 | + # their slaves) |
1294 | + if not is_phy_iface(nic): |
1295 | + continue |
1296 | + |
1297 | + _nic = get_bond_master(nic) |
1298 | + if _nic: |
1299 | + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), |
1300 | + level=DEBUG) |
1301 | + nic = _nic |
1302 | + |
1303 | hwaddr = get_nic_hwaddr(nic) |
1304 | hwaddr_to_nic[hwaddr] = nic |
1305 | addresses = get_ipv4_addr(nic, fatal=False) |
1306 | @@ -969,7 +973,8 @@ |
1307 | # trust it to be the real external network). |
1308 | resolved.append(entry) |
1309 | |
1310 | - return resolved |
1311 | + # Ensure no duplicates |
1312 | + return list(set(resolved)) |
1313 | |
1314 | |
1315 | class OSConfigFlagContext(OSContextGenerator): |
1316 | @@ -1059,13 +1064,22 @@ |
1317 | :param config_file : Service's config file to query sections |
1318 | :param interface : Subordinate interface to inspect |
1319 | """ |
1320 | - self.service = service |
1321 | self.config_file = config_file |
1322 | - self.interface = interface |
1323 | + if isinstance(service, list): |
1324 | + self.services = service |
1325 | + else: |
1326 | + self.services = [service] |
1327 | + if isinstance(interface, list): |
1328 | + self.interfaces = interface |
1329 | + else: |
1330 | + self.interfaces = [interface] |
1331 | |
1332 | def __call__(self): |
1333 | ctxt = {'sections': {}} |
1334 | - for rid in relation_ids(self.interface): |
1335 | + rids = [] |
1336 | + for interface in self.interfaces: |
1337 | + rids.extend(relation_ids(interface)) |
1338 | + for rid in rids: |
1339 | for unit in related_units(rid): |
1340 | sub_config = relation_get('subordinate_configuration', |
1341 | rid=rid, unit=unit) |
1342 | @@ -1077,29 +1091,32 @@ |
1343 | 'setting from %s' % rid, level=ERROR) |
1344 | continue |
1345 | |
1346 | - if self.service not in sub_config: |
1347 | - log('Found subordinate_config on %s but it contained' |
1348 | - 'nothing for %s service' % (rid, self.service), |
1349 | - level=INFO) |
1350 | - continue |
1351 | - |
1352 | - sub_config = sub_config[self.service] |
1353 | - if self.config_file not in sub_config: |
1354 | - log('Found subordinate_config on %s but it contained' |
1355 | - 'nothing for %s' % (rid, self.config_file), |
1356 | - level=INFO) |
1357 | - continue |
1358 | - |
1359 | - sub_config = sub_config[self.config_file] |
1360 | - for k, v in six.iteritems(sub_config): |
1361 | - if k == 'sections': |
1362 | - for section, config_dict in six.iteritems(v): |
1363 | - log("adding section '%s'" % (section), |
1364 | - level=DEBUG) |
1365 | - ctxt[k][section] = config_dict |
1366 | - else: |
1367 | - ctxt[k] = v |
1368 | - |
1369 | + for service in self.services: |
1370 | + if service not in sub_config: |
1371 | + log('Found subordinate_config on %s but it contained' |
1372 | + 'nothing for %s service' % (rid, service), |
1373 | + level=INFO) |
1374 | + continue |
1375 | + |
1376 | + sub_config = sub_config[service] |
1377 | + if self.config_file not in sub_config: |
1378 | + log('Found subordinate_config on %s but it contained' |
1379 | + 'nothing for %s' % (rid, self.config_file), |
1380 | + level=INFO) |
1381 | + continue |
1382 | + |
1383 | + sub_config = sub_config[self.config_file] |
1384 | + for k, v in six.iteritems(sub_config): |
1385 | + if k == 'sections': |
1386 | + for section, config_list in six.iteritems(v): |
1387 | + log("adding section '%s'" % (section), |
1388 | + level=DEBUG) |
1389 | + if ctxt[k].get(section): |
1390 | + ctxt[k][section].extend(config_list) |
1391 | + else: |
1392 | + ctxt[k][section] = config_list |
1393 | + else: |
1394 | + ctxt[k] = v |
1395 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
1396 | return ctxt |
1397 | |
1398 | @@ -1276,15 +1293,19 @@ |
1399 | def __call__(self): |
1400 | ports = config('data-port') |
1401 | if ports: |
1402 | + # Map of {port/mac:bridge} |
1403 | portmap = parse_data_port_mappings(ports) |
1404 | - ports = portmap.values() |
1405 | + ports = portmap.keys() |
1406 | + # Resolve provided ports or mac addresses and filter out those |
1407 | + # already attached to a bridge. |
1408 | resolved = self.resolve_ports(ports) |
1409 | + # FIXME: is this necessary? |
1410 | normalized = {get_nic_hwaddr(port): port for port in resolved |
1411 | if port not in ports} |
1412 | normalized.update({port: port for port in resolved |
1413 | if port in ports}) |
1414 | if resolved: |
1415 | - return {bridge: normalized[port] for bridge, port in |
1416 | + return {bridge: normalized[port] for port, bridge in |
1417 | six.iteritems(portmap) if port in normalized.keys()} |
1418 | |
1419 | return None |
1420 | |
1421 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' |
1422 | --- hooks/charmhelpers/contrib/openstack/ip.py 2015-02-26 10:11:26 +0000 |
1423 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2015-08-27 15:02:41 +0000 |
1424 | @@ -17,6 +17,7 @@ |
1425 | from charmhelpers.core.hookenv import ( |
1426 | config, |
1427 | unit_get, |
1428 | + service_name, |
1429 | ) |
1430 | from charmhelpers.contrib.network.ip import ( |
1431 | get_address_in_network, |
1432 | @@ -26,8 +27,6 @@ |
1433 | ) |
1434 | from charmhelpers.contrib.hahelpers.cluster import is_clustered |
1435 | |
1436 | -from functools import partial |
1437 | - |
1438 | PUBLIC = 'public' |
1439 | INTERNAL = 'int' |
1440 | ADMIN = 'admin' |
1441 | @@ -35,15 +34,18 @@ |
1442 | ADDRESS_MAP = { |
1443 | PUBLIC: { |
1444 | 'config': 'os-public-network', |
1445 | - 'fallback': 'public-address' |
1446 | + 'fallback': 'public-address', |
1447 | + 'override': 'os-public-hostname', |
1448 | }, |
1449 | INTERNAL: { |
1450 | 'config': 'os-internal-network', |
1451 | - 'fallback': 'private-address' |
1452 | + 'fallback': 'private-address', |
1453 | + 'override': 'os-internal-hostname', |
1454 | }, |
1455 | ADMIN: { |
1456 | 'config': 'os-admin-network', |
1457 | - 'fallback': 'private-address' |
1458 | + 'fallback': 'private-address', |
1459 | + 'override': 'os-admin-hostname', |
1460 | } |
1461 | } |
1462 | |
1463 | @@ -57,15 +59,50 @@ |
1464 | :param endpoint_type: str endpoint type to resolve. |
1465 | :param returns: str base URL for services on the current service unit. |
1466 | """ |
1467 | - scheme = 'http' |
1468 | - if 'https' in configs.complete_contexts(): |
1469 | - scheme = 'https' |
1470 | + scheme = _get_scheme(configs) |
1471 | + |
1472 | address = resolve_address(endpoint_type) |
1473 | if is_ipv6(address): |
1474 | address = "[{}]".format(address) |
1475 | + |
1476 | return '%s://%s' % (scheme, address) |
1477 | |
1478 | |
1479 | +def _get_scheme(configs): |
1480 | + """Returns the scheme to use for the url (either http or https) |
1481 | + depending upon whether https is in the configs value. |
1482 | + |
1483 | + :param configs: OSTemplateRenderer config templating object to inspect |
1484 | + for a complete https context. |
1485 | + :returns: either 'http' or 'https' depending on whether https is |
1486 | + configured within the configs context. |
1487 | + """ |
1488 | + scheme = 'http' |
1489 | + if configs and 'https' in configs.complete_contexts(): |
1490 | + scheme = 'https' |
1491 | + return scheme |
1492 | + |
1493 | + |
1494 | +def _get_address_override(endpoint_type=PUBLIC): |
1495 | + """Returns any address overrides that the user has defined based on the |
1496 | + endpoint type. |
1497 | + |
1498 | + Note: this function allows for the service name to be inserted into the |
1499 | + address if the user specifies {service_name}.somehost.org. |
1500 | + |
1501 | + :param endpoint_type: the type of endpoint to retrieve the override |
1502 | + value for. |
1503 | + :returns: any endpoint address or hostname that the user has overridden |
1504 | + or None if an override is not present. |
1505 | + """ |
1506 | + override_key = ADDRESS_MAP[endpoint_type]['override'] |
1507 | + addr_override = config(override_key) |
1508 | + if not addr_override: |
1509 | + return None |
1510 | + else: |
1511 | + return addr_override.format(service_name=service_name()) |
1512 | + |
1513 | + |
1514 | def resolve_address(endpoint_type=PUBLIC): |
1515 | """Return unit address depending on net config. |
1516 | |
1517 | @@ -77,7 +114,10 @@ |
1518 | |
1519 | :param endpoint_type: Network endpoing type |
1520 | """ |
1521 | - resolved_address = None |
1522 | + resolved_address = _get_address_override(endpoint_type) |
1523 | + if resolved_address: |
1524 | + return resolved_address |
1525 | + |
1526 | vips = config('vip') |
1527 | if vips: |
1528 | vips = vips.split() |
1529 | @@ -109,38 +149,3 @@ |
1530 | "clustered=%s)" % (net_type, clustered)) |
1531 | |
1532 | return resolved_address |
1533 | - |
1534 | - |
1535 | -def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, |
1536 | - override=None): |
1537 | - """Returns the correct endpoint URL to advertise to Keystone. |
1538 | - |
1539 | - This method provides the correct endpoint URL which should be advertised to |
1540 | - the keystone charm for endpoint creation. This method allows for the url to |
1541 | - be overridden to force a keystone endpoint to have specific URL for any of |
1542 | - the defined scopes (admin, internal, public). |
1543 | - |
1544 | - :param configs: OSTemplateRenderer config templating object to inspect |
1545 | - for a complete https context. |
1546 | - :param url_template: str format string for creating the url template. Only |
1547 | - two values will be passed - the scheme+hostname |
1548 | - returned by the canonical_url and the port. |
1549 | - :param endpoint_type: str endpoint type to resolve. |
1550 | - :param override: str the name of the config option which overrides the |
1551 | - endpoint URL defined by the charm itself. None will |
1552 | - disable any overrides (default). |
1553 | - """ |
1554 | - if override: |
1555 | - # Return any user-defined overrides for the keystone endpoint URL. |
1556 | - user_value = config(override) |
1557 | - if user_value: |
1558 | - return user_value.strip() |
1559 | - |
1560 | - return url_template % (canonical_url(configs, endpoint_type), port) |
1561 | - |
1562 | - |
1563 | -public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) |
1564 | - |
1565 | -internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) |
1566 | - |
1567 | -admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) |
1568 | |
1569 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' |
1570 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-16 20:24:28 +0000 |
1571 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-08-27 15:02:41 +0000 |
1572 | @@ -172,14 +172,16 @@ |
1573 | 'services': ['calico-felix', |
1574 | 'bird', |
1575 | 'neutron-dhcp-agent', |
1576 | - 'nova-api-metadata'], |
1577 | + 'nova-api-metadata', |
1578 | + 'etcd'], |
1579 | 'packages': [[headers_package()] + determine_dkms_package(), |
1580 | ['calico-compute', |
1581 | 'bird', |
1582 | 'neutron-dhcp-agent', |
1583 | - 'nova-api-metadata']], |
1584 | - 'server_packages': ['neutron-server', 'calico-control'], |
1585 | - 'server_services': ['neutron-server'] |
1586 | + 'nova-api-metadata', |
1587 | + 'etcd']], |
1588 | + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], |
1589 | + 'server_services': ['neutron-server', 'etcd'] |
1590 | }, |
1591 | 'vsp': { |
1592 | 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', |
1593 | @@ -253,14 +255,30 @@ |
1594 | return 'neutron' |
1595 | |
1596 | |
1597 | -def parse_mappings(mappings): |
1598 | +def parse_mappings(mappings, key_rvalue=False): |
1599 | + """By default mappings are lvalue keyed. |
1600 | + |
1601 | + If key_rvalue is True, the mapping will be reversed to allow multiple |
1602 | + configs for the same lvalue. |
1603 | + """ |
1604 | parsed = {} |
1605 | if mappings: |
1606 | - mappings = mappings.split(' ') |
1607 | + mappings = mappings.split() |
1608 | for m in mappings: |
1609 | p = m.partition(':') |
1610 | - if p[1] == ':': |
1611 | - parsed[p[0].strip()] = p[2].strip() |
1612 | + |
1613 | + if key_rvalue: |
1614 | + key_index = 2 |
1615 | + val_index = 0 |
1616 | + # if there is no rvalue skip to next |
1617 | + if not p[1]: |
1618 | + continue |
1619 | + else: |
1620 | + key_index = 0 |
1621 | + val_index = 2 |
1622 | + |
1623 | + key = p[key_index].strip() |
1624 | + parsed[key] = p[val_index].strip() |
1625 | |
1626 | return parsed |
1627 | |
1628 | @@ -278,25 +296,25 @@ |
1629 | def parse_data_port_mappings(mappings, default_bridge='br-data'): |
1630 | """Parse data port mappings. |
1631 | |
1632 | - Mappings must be a space-delimited list of bridge:port mappings. |
1633 | + Mappings must be a space-delimited list of port:bridge mappings. |
1634 | |
1635 | - Returns dict of the form {bridge:port}. |
1636 | + Returns dict of the form {port:bridge} where port may be an mac address or |
1637 | + interface name. |
1638 | """ |
1639 | - _mappings = parse_mappings(mappings) |
1640 | - if not _mappings: |
1641 | + |
1642 | + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be |
1643 | + # proposed for <port> since it may be a mac address which will differ |
1644 | + # across units this allowing first-known-good to be chosen. |
1645 | + _mappings = parse_mappings(mappings, key_rvalue=True) |
1646 | + if not _mappings or list(_mappings.values()) == ['']: |
1647 | if not mappings: |
1648 | return {} |
1649 | |
1650 | # For backwards-compatibility we need to support port-only provided in |
1651 | # config. |
1652 | - _mappings = {default_bridge: mappings.split(' ')[0]} |
1653 | - |
1654 | - bridges = _mappings.keys() |
1655 | - ports = _mappings.values() |
1656 | - if len(set(bridges)) != len(bridges): |
1657 | - raise Exception("It is not allowed to have more than one port " |
1658 | - "configured on the same bridge") |
1659 | - |
1660 | + _mappings = {mappings.split()[0]: default_bridge} |
1661 | + |
1662 | + ports = _mappings.keys() |
1663 | if len(set(ports)) != len(ports): |
1664 | raise Exception("It is not allowed to have the same port configured " |
1665 | "on more than one bridge") |
1666 | @@ -309,6 +327,8 @@ |
1667 | |
1668 | Mappings must be a space-delimited list of provider:start:end mappings. |
1669 | |
1670 | + The start:end range is optional and may be omitted. |
1671 | + |
1672 | Returns dict of the form {provider: (start, end)}. |
1673 | """ |
1674 | _mappings = parse_mappings(mappings) |
1675 | |
1676 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
1677 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-02-24 05:48:43 +0000 |
1678 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-08-27 15:02:41 +0000 |
1679 | @@ -5,11 +5,11 @@ |
1680 | ############################################################################### |
1681 | [global] |
1682 | {% if auth -%} |
1683 | - auth_supported = {{ auth }} |
1684 | - keyring = /etc/ceph/$cluster.$name.keyring |
1685 | - mon host = {{ mon_hosts }} |
1686 | +auth_supported = {{ auth }} |
1687 | +keyring = /etc/ceph/$cluster.$name.keyring |
1688 | +mon host = {{ mon_hosts }} |
1689 | {% endif -%} |
1690 | - log to syslog = {{ use_syslog }} |
1691 | - err to syslog = {{ use_syslog }} |
1692 | - clog to syslog = {{ use_syslog }} |
1693 | +log to syslog = {{ use_syslog }} |
1694 | +err to syslog = {{ use_syslog }} |
1695 | +clog to syslog = {{ use_syslog }} |
1696 | |
1697 | |
1698 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' |
1699 | --- hooks/charmhelpers/contrib/openstack/templating.py 2015-06-02 12:12:49 +0000 |
1700 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2015-08-27 15:02:41 +0000 |
1701 | @@ -14,9 +14,7 @@ |
1702 | # You should have received a copy of the GNU Lesser General Public License |
1703 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1704 | |
1705 | -import glob |
1706 | import os |
1707 | -import string |
1708 | |
1709 | import six |
1710 | |
1711 | @@ -27,13 +25,12 @@ |
1712 | INFO |
1713 | ) |
1714 | from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES |
1715 | -from charmhelpers.contrib.openstack.context import OSPatternContextGenerator |
1716 | |
1717 | try: |
1718 | from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
1719 | except ImportError: |
1720 | - # python-jinja2 may not be installed yet, or we're running unittests. |
1721 | - FileSystemLoader = ChoiceLoader = Environment = exceptions = None |
1722 | + apt_install('python-jinja2', fatal=True) |
1723 | + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions |
1724 | |
1725 | |
1726 | class OSConfigException(Exception): |
1727 | @@ -99,7 +96,7 @@ |
1728 | else: |
1729 | self.contexts = contexts |
1730 | |
1731 | - self._complete_contexts = set() |
1732 | + self._complete_contexts = [] |
1733 | |
1734 | def context(self): |
1735 | ctxt = {} |
1736 | @@ -108,7 +105,9 @@ |
1737 | if _ctxt: |
1738 | ctxt.update(_ctxt) |
1739 | # track interfaces for every complete context. |
1740 | - self._complete_contexts.update(context.interfaces) |
1741 | + [self._complete_contexts.append(interface) |
1742 | + for interface in context.interfaces |
1743 | + if interface not in self._complete_contexts] |
1744 | return ctxt |
1745 | |
1746 | def complete_contexts(self): |
1747 | @@ -121,41 +120,6 @@ |
1748 | return self._complete_contexts |
1749 | |
1750 | |
1751 | -class OSPatternConfigTemplate(OSConfigTemplate): |
1752 | - """ |
1753 | - Associates a config pattern template with a list of context generators. |
1754 | - Responsible for constructing a template context based on those generators. |
1755 | - """ |
1756 | - def __init__(self, pattern, contexts): |
1757 | - self.pattern = pattern |
1758 | - super(OSPatternConfigTemplate, self).__init__(config_file=None, |
1759 | - contexts=contexts) |
1760 | - |
1761 | - def context(self): |
1762 | - base_ctxt = {} |
1763 | - ctxt = {} |
1764 | - for context in self.contexts: |
1765 | - _ctxt = context() |
1766 | - if not _ctxt: |
1767 | - continue |
1768 | - elif isinstance(context, OSPatternContextGenerator): |
1769 | - # for each returned key initialize its context with base_ctxt |
1770 | - # if not defined before and update with new data |
1771 | - for k, v in _ctxt.items(): |
1772 | - if k not in ctxt: |
1773 | - ctxt[k] = base_ctxt.copy() |
1774 | - ctxt[k].update(v) |
1775 | - else: |
1776 | - # update the base context and all pre-existing file-specific |
1777 | - # contexts |
1778 | - base_ctxt.update(_ctxt) |
1779 | - for key in ctxt: |
1780 | - ctxt[key].update(_ctxt) |
1781 | - # track interfaces for every complete context. |
1782 | - self._complete_contexts.update(context.interfaces) |
1783 | - return ctxt |
1784 | - |
1785 | - |
1786 | class OSConfigRenderer(object): |
1787 | """ |
1788 | This class provides a common templating system to be used by OpenStack |
1789 | @@ -168,13 +132,6 @@ |
1790 | # import some common context generates from charmhelpers |
1791 | from charmhelpers.contrib.openstack import context |
1792 | |
1793 | - # or create your own |
1794 | - class SimpleContextGenerator(OSContextGenerator): |
1795 | - def __call__(): |
1796 | - return { |
1797 | - 'key': 'value' |
1798 | - } |
1799 | - |
1800 | # Create a renderer object for a specific OS release. |
1801 | configs = OSConfigRenderer(templates_dir='/tmp/templates', |
1802 | openstack_release='folsom') |
1803 | @@ -185,31 +142,12 @@ |
1804 | configs.register(config_file='/etc/nova/api-paste.ini', |
1805 | contexts=[context.IdentityServiceContext()]) |
1806 | configs.register(config_file='/etc/haproxy/haproxy.conf', |
1807 | - contexts=[context.HAProxyContext(), |
1808 | - SimpleContextGenerator()]) |
1809 | + contexts=[context.HAProxyContext()]) |
1810 | # write out a single config |
1811 | configs.write('/etc/nova/nova.conf') |
1812 | # write out all registered configs |
1813 | configs.write_all() |
1814 | |
1815 | - Using patterns:: |
1816 | - class DashboardContextGenerator(OSPatternContextGenerator): |
1817 | - def __call__(): |
1818 | - return { |
1819 | - (40, 'router'): { 'DISABLED': True } |
1820 | - } |
1821 | - |
1822 | - configs.register_pattern( |
1823 | - pattern='/usr/share/openstack-dashboard/openstack_dashboard' |
1824 | - '/enabled/_{}_juju_{}.py', |
1825 | - contexts=[ |
1826 | - DashboardContextGenerator() |
1827 | - ]) |
1828 | - # delete all files matching the pattern and |
1829 | - # write _40_juju_router.py anew |
1830 | - configs.write('/usr/share/openstack-dashboard/openstack_dashboard' |
1831 | - '/enabled/_{}_juju_{}.py') |
1832 | - |
1833 | **OpenStack Releases and template loading** |
1834 | |
1835 | When the object is instantiated, it is associated with a specific OS |
1836 | @@ -281,15 +219,6 @@ |
1837 | contexts=contexts) |
1838 | log('Registered config file: %s' % config_file, level=INFO) |
1839 | |
1840 | - def register_pattern(self, pattern, contexts): |
1841 | - """ |
1842 | - Register a config file name pattern with a list of context generators |
1843 | - to be called during rendering. Use standard format() specification. |
1844 | - """ |
1845 | - self.templates[pattern] = OSPatternConfigTemplate(pattern=pattern, |
1846 | - contexts=contexts) |
1847 | - log('Registered config pattern: %s' % pattern, level=INFO) |
1848 | - |
1849 | def _get_tmpl_env(self): |
1850 | if not self._tmpl_env: |
1851 | loader = get_loader(self.templates_dir, self.openstack_release) |
1852 | @@ -305,8 +234,7 @@ |
1853 | if config_file not in self.templates: |
1854 | log('Config not registered: %s' % config_file, level=ERROR) |
1855 | raise OSConfigException |
1856 | - ostemplate = self.templates[config_file] |
1857 | - ctxt = ostemplate.context() |
1858 | + ctxt = self.templates[config_file].context() |
1859 | |
1860 | _tmpl = os.path.basename(config_file) |
1861 | try: |
1862 | @@ -325,14 +253,7 @@ |
1863 | raise e |
1864 | |
1865 | log('Rendering from template: %s' % _tmpl, level=INFO) |
1866 | - |
1867 | - if not isinstance(ostemplate, OSPatternConfigTemplate): |
1868 | - ctxt = {(): ctxt} |
1869 | - |
1870 | - renders = {} |
1871 | - for args, file_ctxt in ctxt.items(): |
1872 | - renders[args] = template.render(file_ctxt) |
1873 | - return renders |
1874 | + return template.render(ctxt) |
1875 | |
1876 | def write(self, config_file): |
1877 | """ |
1878 | @@ -342,16 +263,10 @@ |
1879 | log('Config not registered: %s' % config_file, level=ERROR) |
1880 | raise OSConfigException |
1881 | |
1882 | - renders = self.render(config_file) |
1883 | - |
1884 | - files = glob.glob(''.join([t[0] + ('' if t[1] is None else '*') |
1885 | - for t in string.Formatter().parse(config_file)])) |
1886 | - for name in files: |
1887 | - os.unlink(name) |
1888 | - |
1889 | - for args, render in renders.items(): |
1890 | - with open(config_file.format(*args), 'wb') as out: |
1891 | - out.write(render) |
1892 | + _out = self.render(config_file) |
1893 | + |
1894 | + with open(config_file, 'wb') as out: |
1895 | + out.write(_out) |
1896 | |
1897 | log('Wrote template %s.' % config_file, level=INFO) |
1898 | |
1899 | |
1900 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
1901 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-04-16 20:24:28 +0000 |
1902 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-08-27 15:02:41 +0000 |
1903 | @@ -24,6 +24,7 @@ |
1904 | import json |
1905 | import os |
1906 | import sys |
1907 | +import re |
1908 | |
1909 | import six |
1910 | import yaml |
1911 | @@ -53,9 +54,13 @@ |
1912 | get_ipv6_addr |
1913 | ) |
1914 | |
1915 | +from charmhelpers.contrib.python.packages import ( |
1916 | + pip_create_virtualenv, |
1917 | + pip_install, |
1918 | +) |
1919 | + |
1920 | from charmhelpers.core.host import lsb_release, mounts, umount |
1921 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
1922 | -from charmhelpers.contrib.python.packages import pip_install |
1923 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
1924 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
1925 | |
1926 | @@ -65,7 +70,6 @@ |
1927 | DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' |
1928 | 'restricted main multiverse universe') |
1929 | |
1930 | - |
1931 | UBUNTU_OPENSTACK_RELEASE = OrderedDict([ |
1932 | ('oneiric', 'diablo'), |
1933 | ('precise', 'essex'), |
1934 | @@ -75,6 +79,7 @@ |
1935 | ('trusty', 'icehouse'), |
1936 | ('utopic', 'juno'), |
1937 | ('vivid', 'kilo'), |
1938 | + ('wily', 'liberty'), |
1939 | ]) |
1940 | |
1941 | |
1942 | @@ -87,6 +92,7 @@ |
1943 | ('2014.1', 'icehouse'), |
1944 | ('2014.2', 'juno'), |
1945 | ('2015.1', 'kilo'), |
1946 | + ('2015.2', 'liberty'), |
1947 | ]) |
1948 | |
1949 | # The ugly duckling |
1950 | @@ -109,8 +115,37 @@ |
1951 | ('2.2.0', 'juno'), |
1952 | ('2.2.1', 'kilo'), |
1953 | ('2.2.2', 'kilo'), |
1954 | + ('2.3.0', 'liberty'), |
1955 | ]) |
1956 | |
1957 | +# >= Liberty version->codename mapping |
1958 | +PACKAGE_CODENAMES = { |
1959 | + 'nova-common': OrderedDict([ |
1960 | + ('12.0.0', 'liberty'), |
1961 | + ]), |
1962 | + 'neutron-common': OrderedDict([ |
1963 | + ('7.0.0', 'liberty'), |
1964 | + ]), |
1965 | + 'cinder-common': OrderedDict([ |
1966 | + ('7.0.0', 'liberty'), |
1967 | + ]), |
1968 | + 'keystone': OrderedDict([ |
1969 | + ('8.0.0', 'liberty'), |
1970 | + ]), |
1971 | + 'horizon-common': OrderedDict([ |
1972 | + ('8.0.0', 'liberty'), |
1973 | + ]), |
1974 | + 'ceilometer-common': OrderedDict([ |
1975 | + ('5.0.0', 'liberty'), |
1976 | + ]), |
1977 | + 'heat-common': OrderedDict([ |
1978 | + ('5.0.0', 'liberty'), |
1979 | + ]), |
1980 | + 'glance-common': OrderedDict([ |
1981 | + ('11.0.0', 'liberty'), |
1982 | + ]), |
1983 | +} |
1984 | + |
1985 | DEFAULT_LOOPBACK_SIZE = '5G' |
1986 | |
1987 | |
1988 | @@ -194,20 +229,29 @@ |
1989 | error_out(e) |
1990 | |
1991 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
1992 | + match = re.match('^(\d)\.(\d)\.(\d)', vers) |
1993 | + if match: |
1994 | + vers = match.group(0) |
1995 | |
1996 | - try: |
1997 | - if 'swift' in pkg.name: |
1998 | - swift_vers = vers[:5] |
1999 | - if swift_vers not in SWIFT_CODENAMES: |
2000 | - # Deal with 1.10.0 upward |
2001 | - swift_vers = vers[:6] |
2002 | - return SWIFT_CODENAMES[swift_vers] |
2003 | - else: |
2004 | - vers = vers[:6] |
2005 | - return OPENSTACK_CODENAMES[vers] |
2006 | - except KeyError: |
2007 | - e = 'Could not determine OpenStack codename for version %s' % vers |
2008 | - error_out(e) |
2009 | + # >= Liberty independent project versions |
2010 | + if (package in PACKAGE_CODENAMES and |
2011 | + vers in PACKAGE_CODENAMES[package]): |
2012 | + return PACKAGE_CODENAMES[package][vers] |
2013 | + else: |
2014 | + # < Liberty co-ordinated project versions |
2015 | + try: |
2016 | + if 'swift' in pkg.name: |
2017 | + swift_vers = vers[:5] |
2018 | + if swift_vers not in SWIFT_CODENAMES: |
2019 | + # Deal with 1.10.0 upward |
2020 | + swift_vers = vers[:6] |
2021 | + return SWIFT_CODENAMES[swift_vers] |
2022 | + else: |
2023 | + vers = vers[:6] |
2024 | + return OPENSTACK_CODENAMES[vers] |
2025 | + except KeyError: |
2026 | + e = 'Could not determine OpenStack codename for version %s' % vers |
2027 | + error_out(e) |
2028 | |
2029 | |
2030 | def get_os_version_package(pkg, fatal=True): |
2031 | @@ -317,6 +361,9 @@ |
2032 | 'kilo': 'trusty-updates/kilo', |
2033 | 'kilo/updates': 'trusty-updates/kilo', |
2034 | 'kilo/proposed': 'trusty-proposed/kilo', |
2035 | + 'liberty': 'trusty-updates/liberty', |
2036 | + 'liberty/updates': 'trusty-updates/liberty', |
2037 | + 'liberty/proposed': 'trusty-proposed/liberty', |
2038 | } |
2039 | |
2040 | try: |
2041 | @@ -497,11 +544,22 @@ |
2042 | requirements_dir = None |
2043 | |
2044 | |
2045 | -def git_clone_and_install(projects_yaml, core_project): |
2046 | +def _git_yaml_load(projects_yaml): |
2047 | + """ |
2048 | + Load the specified yaml into a dictionary. |
2049 | + """ |
2050 | + if not projects_yaml: |
2051 | + return None |
2052 | + |
2053 | + return yaml.load(projects_yaml) |
2054 | + |
2055 | + |
2056 | +def git_clone_and_install(projects_yaml, core_project, depth=1): |
2057 | """ |
2058 | Clone/install all specified OpenStack repositories. |
2059 | |
2060 | The expected format of projects_yaml is: |
2061 | + |
2062 | repositories: |
2063 | - {name: keystone, |
2064 | repository: 'git://git.openstack.org/openstack/keystone.git', |
2065 | @@ -509,24 +567,25 @@ |
2066 | - {name: requirements, |
2067 | repository: 'git://git.openstack.org/openstack/requirements.git', |
2068 | branch: 'stable/icehouse'} |
2069 | + |
2070 | directory: /mnt/openstack-git |
2071 | - http_proxy: http://squid.internal:3128 |
2072 | - https_proxy: https://squid.internal:3128 |
2073 | - |
2074 | - The directory, http_proxy, and https_proxy keys are optional. |
2075 | + http_proxy: squid-proxy-url |
2076 | + https_proxy: squid-proxy-url |
2077 | + |
2078 | + The directory, http_proxy, and https_proxy keys are optional. |
2079 | + |
2080 | """ |
2081 | global requirements_dir |
2082 | parent_dir = '/mnt/openstack-git' |
2083 | - |
2084 | - if not projects_yaml: |
2085 | - return |
2086 | - |
2087 | - projects = yaml.load(projects_yaml) |
2088 | + http_proxy = None |
2089 | + |
2090 | + projects = _git_yaml_load(projects_yaml) |
2091 | _git_validate_projects_yaml(projects, core_project) |
2092 | |
2093 | old_environ = dict(os.environ) |
2094 | |
2095 | if 'http_proxy' in projects.keys(): |
2096 | + http_proxy = projects['http_proxy'] |
2097 | os.environ['http_proxy'] = projects['http_proxy'] |
2098 | if 'https_proxy' in projects.keys(): |
2099 | os.environ['https_proxy'] = projects['https_proxy'] |
2100 | @@ -534,15 +593,25 @@ |
2101 | if 'directory' in projects.keys(): |
2102 | parent_dir = projects['directory'] |
2103 | |
2104 | + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) |
2105 | + |
2106 | + # Upgrade setuptools and pip from default virtualenv versions. The default |
2107 | + # versions in trusty break master OpenStack branch deployments. |
2108 | + for p in ['pip', 'setuptools']: |
2109 | + pip_install(p, upgrade=True, proxy=http_proxy, |
2110 | + venv=os.path.join(parent_dir, 'venv')) |
2111 | + |
2112 | for p in projects['repositories']: |
2113 | repo = p['repository'] |
2114 | branch = p['branch'] |
2115 | if p['name'] == 'requirements': |
2116 | - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, |
2117 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
2118 | + parent_dir, http_proxy, |
2119 | update_requirements=False) |
2120 | requirements_dir = repo_dir |
2121 | else: |
2122 | - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, |
2123 | + repo_dir = _git_clone_and_install_single(repo, branch, depth, |
2124 | + parent_dir, http_proxy, |
2125 | update_requirements=True) |
2126 | |
2127 | os.environ = old_environ |
2128 | @@ -574,7 +643,8 @@ |
2129 | error_out('openstack-origin-git key \'{}\' is missing'.format(key)) |
2130 | |
2131 | |
2132 | -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): |
2133 | +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, |
2134 | + update_requirements): |
2135 | """ |
2136 | Clone and install a single git repository. |
2137 | """ |
2138 | @@ -587,23 +657,29 @@ |
2139 | |
2140 | if not os.path.exists(dest_dir): |
2141 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
2142 | - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) |
2143 | + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, |
2144 | + depth=depth) |
2145 | else: |
2146 | repo_dir = dest_dir |
2147 | |
2148 | + venv = os.path.join(parent_dir, 'venv') |
2149 | + |
2150 | if update_requirements: |
2151 | if not requirements_dir: |
2152 | error_out('requirements repo must be cloned before ' |
2153 | 'updating from global requirements.') |
2154 | - _git_update_requirements(repo_dir, requirements_dir) |
2155 | + _git_update_requirements(venv, repo_dir, requirements_dir) |
2156 | |
2157 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
2158 | - pip_install(repo_dir) |
2159 | + if http_proxy: |
2160 | + pip_install(repo_dir, proxy=http_proxy, venv=venv) |
2161 | + else: |
2162 | + pip_install(repo_dir, venv=venv) |
2163 | |
2164 | return repo_dir |
2165 | |
2166 | |
2167 | -def _git_update_requirements(package_dir, reqs_dir): |
2168 | +def _git_update_requirements(venv, package_dir, reqs_dir): |
2169 | """ |
2170 | Update from global requirements. |
2171 | |
2172 | @@ -612,25 +688,38 @@ |
2173 | """ |
2174 | orig_dir = os.getcwd() |
2175 | os.chdir(reqs_dir) |
2176 | - cmd = ['python', 'update.py', package_dir] |
2177 | + python = os.path.join(venv, 'bin/python') |
2178 | + cmd = [python, 'update.py', package_dir] |
2179 | try: |
2180 | subprocess.check_call(cmd) |
2181 | except subprocess.CalledProcessError: |
2182 | package = os.path.basename(package_dir) |
2183 | - error_out("Error updating {} from global-requirements.txt".format(package)) |
2184 | + error_out("Error updating {} from " |
2185 | + "global-requirements.txt".format(package)) |
2186 | os.chdir(orig_dir) |
2187 | |
2188 | |
2189 | +def git_pip_venv_dir(projects_yaml): |
2190 | + """ |
2191 | + Return the pip virtualenv path. |
2192 | + """ |
2193 | + parent_dir = '/mnt/openstack-git' |
2194 | + |
2195 | + projects = _git_yaml_load(projects_yaml) |
2196 | + |
2197 | + if 'directory' in projects.keys(): |
2198 | + parent_dir = projects['directory'] |
2199 | + |
2200 | + return os.path.join(parent_dir, 'venv') |
2201 | + |
2202 | + |
2203 | def git_src_dir(projects_yaml, project): |
2204 | """ |
2205 | Return the directory where the specified project's source is located. |
2206 | """ |
2207 | parent_dir = '/mnt/openstack-git' |
2208 | |
2209 | - if not projects_yaml: |
2210 | - return |
2211 | - |
2212 | - projects = yaml.load(projects_yaml) |
2213 | + projects = _git_yaml_load(projects_yaml) |
2214 | |
2215 | if 'directory' in projects.keys(): |
2216 | parent_dir = projects['directory'] |
2217 | @@ -640,3 +729,15 @@ |
2218 | return os.path.join(parent_dir, os.path.basename(p['repository'])) |
2219 | |
2220 | return None |
2221 | + |
2222 | + |
2223 | +def git_yaml_value(projects_yaml, key): |
2224 | + """ |
2225 | + Return the value in projects_yaml for the specified key. |
2226 | + """ |
2227 | + projects = _git_yaml_load(projects_yaml) |
2228 | + |
2229 | + if key in projects.keys(): |
2230 | + return projects[key] |
2231 | + |
2232 | + return None |
2233 | |
2234 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' |
2235 | --- hooks/charmhelpers/contrib/python/packages.py 2015-02-26 10:11:26 +0000 |
2236 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-08-27 15:02:41 +0000 |
2237 | @@ -17,8 +17,11 @@ |
2238 | # You should have received a copy of the GNU Lesser General Public License |
2239 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2240 | |
2241 | +import os |
2242 | +import subprocess |
2243 | + |
2244 | from charmhelpers.fetch import apt_install, apt_update |
2245 | -from charmhelpers.core.hookenv import log |
2246 | +from charmhelpers.core.hookenv import charm_dir, log |
2247 | |
2248 | try: |
2249 | from pip import main as pip_execute |
2250 | @@ -33,6 +36,8 @@ |
2251 | def parse_options(given, available): |
2252 | """Given a set of options, check if available""" |
2253 | for key, value in sorted(given.items()): |
2254 | + if not value: |
2255 | + continue |
2256 | if key in available: |
2257 | yield "--{0}={1}".format(key, value) |
2258 | |
2259 | @@ -51,11 +56,15 @@ |
2260 | pip_execute(command) |
2261 | |
2262 | |
2263 | -def pip_install(package, fatal=False, upgrade=False, **options): |
2264 | +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): |
2265 | """Install a python package""" |
2266 | - command = ["install"] |
2267 | + if venv: |
2268 | + venv_python = os.path.join(venv, 'bin/pip') |
2269 | + command = [venv_python, "install"] |
2270 | + else: |
2271 | + command = ["install"] |
2272 | |
2273 | - available_options = ('proxy', 'src', 'log', "index-url", ) |
2274 | + available_options = ('proxy', 'src', 'log', 'index-url', ) |
2275 | for option in parse_options(options, available_options): |
2276 | command.append(option) |
2277 | |
2278 | @@ -69,7 +78,10 @@ |
2279 | |
2280 | log("Installing {} package with options: {}".format(package, |
2281 | command)) |
2282 | - pip_execute(command) |
2283 | + if venv: |
2284 | + subprocess.check_call(command) |
2285 | + else: |
2286 | + pip_execute(command) |
2287 | |
2288 | |
2289 | def pip_uninstall(package, **options): |
2290 | @@ -94,3 +106,16 @@ |
2291 | """Returns the list of current python installed packages |
2292 | """ |
2293 | return pip_execute(["list"]) |
2294 | + |
2295 | + |
2296 | +def pip_create_virtualenv(path=None): |
2297 | + """Create an isolated Python environment.""" |
2298 | + apt_install('python-virtualenv') |
2299 | + |
2300 | + if path: |
2301 | + venv_path = path |
2302 | + else: |
2303 | + venv_path = os.path.join(charm_dir(), 'venv') |
2304 | + |
2305 | + if not os.path.exists(venv_path): |
2306 | + subprocess.check_call(['virtualenv', venv_path]) |
2307 | |
2308 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' |
2309 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-01-26 09:46:38 +0000 |
2310 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-08-27 15:02:41 +0000 |
2311 | @@ -60,12 +60,12 @@ |
2312 | KEYFILE = '/etc/ceph/ceph.client.{}.key' |
2313 | |
2314 | CEPH_CONF = """[global] |
2315 | - auth supported = {auth} |
2316 | - keyring = {keyring} |
2317 | - mon host = {mon_hosts} |
2318 | - log to syslog = {use_syslog} |
2319 | - err to syslog = {use_syslog} |
2320 | - clog to syslog = {use_syslog} |
2321 | +auth supported = {auth} |
2322 | +keyring = {keyring} |
2323 | +mon host = {mon_hosts} |
2324 | +log to syslog = {use_syslog} |
2325 | +err to syslog = {use_syslog} |
2326 | +clog to syslog = {use_syslog} |
2327 | """ |
2328 | |
2329 | |
2330 | |
2331 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' |
2332 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2015-01-26 09:46:38 +0000 |
2333 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-08-27 15:02:41 +0000 |
2334 | @@ -43,9 +43,10 @@ |
2335 | |
2336 | :param block_device: str: Full path of block device to clean. |
2337 | ''' |
2338 | + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b |
2339 | # sometimes sgdisk exits non-zero; this is OK, dd will clean up |
2340 | - call(['sgdisk', '--zap-all', '--mbrtogpt', |
2341 | - '--clear', block_device]) |
2342 | + call(['sgdisk', '--zap-all', '--', block_device]) |
2343 | + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) |
2344 | dev_end = check_output(['blockdev', '--getsz', |
2345 | block_device]).decode('UTF-8') |
2346 | gpt_end = int(dev_end.split()[0]) - 100 |
2347 | @@ -67,4 +68,4 @@ |
2348 | out = check_output(['mount']).decode('UTF-8') |
2349 | if is_partition: |
2350 | return bool(re.search(device + r"\b", out)) |
2351 | - return bool(re.search(device + r"[0-9]+\b", out)) |
2352 | + return bool(re.search(device + r"[0-9]*\b", out)) |
2353 | |
2354 | === added file 'hooks/charmhelpers/core/files.py' |
2355 | --- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000 |
2356 | +++ hooks/charmhelpers/core/files.py 2015-08-27 15:02:41 +0000 |
2357 | @@ -0,0 +1,45 @@ |
2358 | +#!/usr/bin/env python |
2359 | +# -*- coding: utf-8 -*- |
2360 | + |
2361 | +# Copyright 2014-2015 Canonical Limited. |
2362 | +# |
2363 | +# This file is part of charm-helpers. |
2364 | +# |
2365 | +# charm-helpers is free software: you can redistribute it and/or modify |
2366 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2367 | +# published by the Free Software Foundation. |
2368 | +# |
2369 | +# charm-helpers is distributed in the hope that it will be useful, |
2370 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2371 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2372 | +# GNU Lesser General Public License for more details. |
2373 | +# |
2374 | +# You should have received a copy of the GNU Lesser General Public License |
2375 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2376 | + |
2377 | +__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' |
2378 | + |
2379 | +import os |
2380 | +import subprocess |
2381 | + |
2382 | + |
2383 | +def sed(filename, before, after, flags='g'): |
2384 | + """ |
2385 | + Search and replaces the given pattern on filename. |
2386 | + |
2387 | + :param filename: relative or absolute file path. |
2388 | + :param before: expression to be replaced (see 'man sed') |
2389 | + :param after: expression to replace with (see 'man sed') |
2390 | + :param flags: sed-compatible regex flags in example, to make |
2391 | + the search and replace case insensitive, specify ``flags="i"``. |
2392 | + The ``g`` flag is always specified regardless, so you do not |
2393 | + need to remember to include it when overriding this parameter. |
2394 | + :returns: If the sed command exit code was zero then return, |
2395 | + otherwise raise CalledProcessError. |
2396 | + """ |
2397 | + expression = r's/{0}/{1}/{2}'.format(before, |
2398 | + after, flags) |
2399 | + |
2400 | + return subprocess.check_call(["sed", "-i", "-r", "-e", |
2401 | + expression, |
2402 | + os.path.expanduser(filename)]) |
2403 | |
2404 | === modified file 'hooks/charmhelpers/core/hookenv.py' |
2405 | --- hooks/charmhelpers/core/hookenv.py 2015-04-16 20:24:28 +0000 |
2406 | +++ hooks/charmhelpers/core/hookenv.py 2015-08-27 15:02:41 +0000 |
2407 | @@ -21,12 +21,17 @@ |
2408 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
2409 | |
2410 | from __future__ import print_function |
2411 | +import copy |
2412 | +from distutils.version import LooseVersion |
2413 | +from functools import wraps |
2414 | +import glob |
2415 | import os |
2416 | import json |
2417 | import yaml |
2418 | import subprocess |
2419 | import sys |
2420 | import errno |
2421 | +import tempfile |
2422 | from subprocess import CalledProcessError |
2423 | |
2424 | import six |
2425 | @@ -58,15 +63,18 @@ |
2426 | |
2427 | will cache the result of unit_get + 'test' for future calls. |
2428 | """ |
2429 | + @wraps(func) |
2430 | def wrapper(*args, **kwargs): |
2431 | global cache |
2432 | key = str((func, args, kwargs)) |
2433 | try: |
2434 | return cache[key] |
2435 | except KeyError: |
2436 | - res = func(*args, **kwargs) |
2437 | - cache[key] = res |
2438 | - return res |
2439 | + pass # Drop out of the exception handler scope. |
2440 | + res = func(*args, **kwargs) |
2441 | + cache[key] = res |
2442 | + return res |
2443 | + wrapper._wrapped = func |
2444 | return wrapper |
2445 | |
2446 | |
2447 | @@ -166,9 +174,19 @@ |
2448 | return os.environ.get('JUJU_RELATION', None) |
2449 | |
2450 | |
2451 | -def relation_id(): |
2452 | - """The relation ID for the current relation hook""" |
2453 | - return os.environ.get('JUJU_RELATION_ID', None) |
2454 | +@cached |
2455 | +def relation_id(relation_name=None, service_or_unit=None): |
2456 | + """The relation ID for the current or a specified relation""" |
2457 | + if not relation_name and not service_or_unit: |
2458 | + return os.environ.get('JUJU_RELATION_ID', None) |
2459 | + elif relation_name and service_or_unit: |
2460 | + service_name = service_or_unit.split('/')[0] |
2461 | + for relid in relation_ids(relation_name): |
2462 | + remote_service = remote_service_name(relid) |
2463 | + if remote_service == service_name: |
2464 | + return relid |
2465 | + else: |
2466 | + raise ValueError('Must specify neither or both of relation_name and service_or_unit') |
2467 | |
2468 | |
2469 | def local_unit(): |
2470 | @@ -178,7 +196,7 @@ |
2471 | |
2472 | def remote_unit(): |
2473 | """The remote unit for the current relation hook""" |
2474 | - return os.environ['JUJU_REMOTE_UNIT'] |
2475 | + return os.environ.get('JUJU_REMOTE_UNIT', None) |
2476 | |
2477 | |
2478 | def service_name(): |
2479 | @@ -186,9 +204,20 @@ |
2480 | return local_unit().split('/')[0] |
2481 | |
2482 | |
2483 | +@cached |
2484 | +def remote_service_name(relid=None): |
2485 | + """The remote service name for a given relation-id (or the current relation)""" |
2486 | + if relid is None: |
2487 | + unit = remote_unit() |
2488 | + else: |
2489 | + units = related_units(relid) |
2490 | + unit = units[0] if units else None |
2491 | + return unit.split('/')[0] if unit else None |
2492 | + |
2493 | + |
2494 | def hook_name(): |
2495 | """The name of the currently executing hook""" |
2496 | - return os.path.basename(sys.argv[0]) |
2497 | + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
2498 | |
2499 | |
2500 | class Config(dict): |
2501 | @@ -238,23 +267,7 @@ |
2502 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
2503 | if os.path.exists(self.path): |
2504 | self.load_previous() |
2505 | - |
2506 | - def __getitem__(self, key): |
2507 | - """For regular dict lookups, check the current juju config first, |
2508 | - then the previous (saved) copy. This ensures that user-saved values |
2509 | - will be returned by a dict lookup. |
2510 | - |
2511 | - """ |
2512 | - try: |
2513 | - return dict.__getitem__(self, key) |
2514 | - except KeyError: |
2515 | - return (self._prev_dict or {})[key] |
2516 | - |
2517 | - def keys(self): |
2518 | - prev_keys = [] |
2519 | - if self._prev_dict is not None: |
2520 | - prev_keys = self._prev_dict.keys() |
2521 | - return list(set(prev_keys + list(dict.keys(self)))) |
2522 | + atexit(self._implicit_save) |
2523 | |
2524 | def load_previous(self, path=None): |
2525 | """Load previous copy of config from disk. |
2526 | @@ -273,6 +286,9 @@ |
2527 | self.path = path or self.path |
2528 | with open(self.path) as f: |
2529 | self._prev_dict = json.load(f) |
2530 | + for k, v in copy.deepcopy(self._prev_dict).items(): |
2531 | + if k not in self: |
2532 | + self[k] = v |
2533 | |
2534 | def changed(self, key): |
2535 | """Return True if the current value for this key is different from |
2536 | @@ -304,13 +320,13 @@ |
2537 | instance. |
2538 | |
2539 | """ |
2540 | - if self._prev_dict: |
2541 | - for k, v in six.iteritems(self._prev_dict): |
2542 | - if k not in self: |
2543 | - self[k] = v |
2544 | with open(self.path, 'w') as f: |
2545 | json.dump(self, f) |
2546 | |
2547 | + def _implicit_save(self): |
2548 | + if self.implicit_save: |
2549 | + self.save() |
2550 | + |
2551 | |
2552 | @cached |
2553 | def config(scope=None): |
2554 | @@ -353,18 +369,49 @@ |
2555 | """Set relation information for the current unit""" |
2556 | relation_settings = relation_settings if relation_settings else {} |
2557 | relation_cmd_line = ['relation-set'] |
2558 | + accepts_file = "--file" in subprocess.check_output( |
2559 | + relation_cmd_line + ["--help"], universal_newlines=True) |
2560 | if relation_id is not None: |
2561 | relation_cmd_line.extend(('-r', relation_id)) |
2562 | - for k, v in (list(relation_settings.items()) + list(kwargs.items())): |
2563 | - if v is None: |
2564 | - relation_cmd_line.append('{}='.format(k)) |
2565 | - else: |
2566 | - relation_cmd_line.append('{}={}'.format(k, v)) |
2567 | - subprocess.check_call(relation_cmd_line) |
2568 | + settings = relation_settings.copy() |
2569 | + settings.update(kwargs) |
2570 | + for key, value in settings.items(): |
2571 | + # Force value to be a string: it always should, but some call |
2572 | + # sites pass in things like dicts or numbers. |
2573 | + if value is not None: |
2574 | + settings[key] = "{}".format(value) |
2575 | + if accepts_file: |
2576 | + # --file was introduced in Juju 1.23.2. Use it by default if |
2577 | + # available, since otherwise we'll break if the relation data is |
2578 | + # too big. Ideally we should tell relation-set to read the data from |
2579 | + # stdin, but that feature is broken in 1.23.2: Bug #1454678. |
2580 | + with tempfile.NamedTemporaryFile(delete=False) as settings_file: |
2581 | + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) |
2582 | + subprocess.check_call( |
2583 | + relation_cmd_line + ["--file", settings_file.name]) |
2584 | + os.remove(settings_file.name) |
2585 | + else: |
2586 | + for key, value in settings.items(): |
2587 | + if value is None: |
2588 | + relation_cmd_line.append('{}='.format(key)) |
2589 | + else: |
2590 | + relation_cmd_line.append('{}={}'.format(key, value)) |
2591 | + subprocess.check_call(relation_cmd_line) |
2592 | # Flush cache of any relation-gets for local unit |
2593 | flush(local_unit()) |
2594 | |
2595 | |
2596 | +def relation_clear(r_id=None): |
2597 | + ''' Clears any relation data already set on relation r_id ''' |
2598 | + settings = relation_get(rid=r_id, |
2599 | + unit=local_unit()) |
2600 | + for setting in settings: |
2601 | + if setting not in ['public-address', 'private-address']: |
2602 | + settings[setting] = None |
2603 | + relation_set(relation_id=r_id, |
2604 | + **settings) |
2605 | + |
2606 | + |
2607 | @cached |
2608 | def relation_ids(reltype=None): |
2609 | """A list of relation_ids""" |
2610 | @@ -444,6 +491,63 @@ |
2611 | |
2612 | |
2613 | @cached |
2614 | +def relation_to_interface(relation_name): |
2615 | + """ |
2616 | + Given the name of a relation, return the interface that relation uses. |
2617 | + |
2618 | + :returns: The interface name, or ``None``. |
2619 | + """ |
2620 | + return relation_to_role_and_interface(relation_name)[1] |
2621 | + |
2622 | + |
2623 | +@cached |
2624 | +def relation_to_role_and_interface(relation_name): |
2625 | + """ |
2626 | + Given the name of a relation, return the role and the name of the interface |
2627 | + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). |
2628 | + |
2629 | + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
2630 | + """ |
2631 | + _metadata = metadata() |
2632 | + for role in ('provides', 'requires', 'peer'): |
2633 | + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
2634 | + if interface: |
2635 | + return role, interface |
2636 | + return None, None |
2637 | + |
2638 | + |
2639 | +@cached |
2640 | +def role_and_interface_to_relations(role, interface_name): |
2641 | + """ |
2642 | + Given a role and interface name, return a list of relation names for the |
2643 | + current charm that use that interface under that role (where role is one |
2644 | + of ``provides``, ``requires``, or ``peer``). |
2645 | + |
2646 | + :returns: A list of relation names. |
2647 | + """ |
2648 | + _metadata = metadata() |
2649 | + results = [] |
2650 | + for relation_name, relation in _metadata.get(role, {}).items(): |
2651 | + if relation['interface'] == interface_name: |
2652 | + results.append(relation_name) |
2653 | + return results |
2654 | + |
2655 | + |
2656 | +@cached |
2657 | +def interface_to_relations(interface_name): |
2658 | + """ |
2659 | + Given an interface, return a list of relation names for the current |
2660 | + charm that use that interface. |
2661 | + |
2662 | + :returns: A list of relation names. |
2663 | + """ |
2664 | + results = [] |
2665 | + for role in ('provides', 'requires', 'peer'): |
2666 | + results.extend(role_and_interface_to_relations(role, interface_name)) |
2667 | + return results |
2668 | + |
2669 | + |
2670 | +@cached |
2671 | def charm_name(): |
2672 | """Get the name of the current charm as is specified on metadata.yaml""" |
2673 | return metadata().get('name') |
2674 | @@ -509,6 +613,11 @@ |
2675 | return None |
2676 | |
2677 | |
2678 | +def unit_public_ip(): |
2679 | + """Get this unit's public IP address""" |
2680 | + return unit_get('public-address') |
2681 | + |
2682 | + |
2683 | def unit_private_ip(): |
2684 | """Get this unit's private IP address""" |
2685 | return unit_get('private-address') |
2686 | @@ -541,10 +650,14 @@ |
2687 | hooks.execute(sys.argv) |
2688 | """ |
2689 | |
2690 | - def __init__(self, config_save=True): |
2691 | + def __init__(self, config_save=None): |
2692 | super(Hooks, self).__init__() |
2693 | self._hooks = {} |
2694 | - self._config_save = config_save |
2695 | + |
2696 | + # For unknown reasons, we allow the Hooks constructor to override |
2697 | + # config().implicit_save. |
2698 | + if config_save is not None: |
2699 | + config().implicit_save = config_save |
2700 | |
2701 | def register(self, name, function): |
2702 | """Register a hook""" |
2703 | @@ -552,13 +665,16 @@ |
2704 | |
2705 | def execute(self, args): |
2706 | """Execute a registered hook based on args[0]""" |
2707 | + _run_atstart() |
2708 | hook_name = os.path.basename(args[0]) |
2709 | if hook_name in self._hooks: |
2710 | - self._hooks[hook_name]() |
2711 | - if self._config_save: |
2712 | - cfg = config() |
2713 | - if cfg.implicit_save: |
2714 | - cfg.save() |
2715 | + try: |
2716 | + self._hooks[hook_name]() |
2717 | + except SystemExit as x: |
2718 | + if x.code is None or x.code == 0: |
2719 | + _run_atexit() |
2720 | + raise |
2721 | + _run_atexit() |
2722 | else: |
2723 | raise UnregisteredHookError(hook_name) |
2724 | |
2725 | @@ -605,3 +721,176 @@ |
2726 | |
2727 | The results set by action_set are preserved.""" |
2728 | subprocess.check_call(['action-fail', message]) |
2729 | + |
2730 | + |
2731 | +def action_name(): |
2732 | + """Get the name of the currently executing action.""" |
2733 | + return os.environ.get('JUJU_ACTION_NAME') |
2734 | + |
2735 | + |
2736 | +def action_uuid(): |
2737 | + """Get the UUID of the currently executing action.""" |
2738 | + return os.environ.get('JUJU_ACTION_UUID') |
2739 | + |
2740 | + |
2741 | +def action_tag(): |
2742 | + """Get the tag for the currently executing action.""" |
2743 | + return os.environ.get('JUJU_ACTION_TAG') |
2744 | + |
2745 | + |
2746 | +def status_set(workload_state, message): |
2747 | + """Set the workload state with a message |
2748 | + |
2749 | + Use status-set to set the workload state with a message which is visible |
2750 | + to the user via juju status. If the status-set command is not found then |
2751 | + assume this is juju < 1.23 and juju-log the message unstead. |
2752 | + |
2753 | + workload_state -- valid juju workload state. |
2754 | + message -- status update message |
2755 | + """ |
2756 | + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] |
2757 | + if workload_state not in valid_states: |
2758 | + raise ValueError( |
2759 | + '{!r} is not a valid workload state'.format(workload_state) |
2760 | + ) |
2761 | + cmd = ['status-set', workload_state, message] |
2762 | + try: |
2763 | + ret = subprocess.call(cmd) |
2764 | + if ret == 0: |
2765 | + return |
2766 | + except OSError as e: |
2767 | + if e.errno != errno.ENOENT: |
2768 | + raise |
2769 | + log_message = 'status-set failed: {} {}'.format(workload_state, |
2770 | + message) |
2771 | + log(log_message, level='INFO') |
2772 | + |
2773 | + |
2774 | +def status_get(): |
2775 | + """Retrieve the previously set juju workload state |
2776 | + |
2777 | + If the status-set command is not found then assume this is juju < 1.23 and |
2778 | + return 'unknown' |
2779 | + """ |
2780 | + cmd = ['status-get'] |
2781 | + try: |
2782 | + raw_status = subprocess.check_output(cmd, universal_newlines=True) |
2783 | + status = raw_status.rstrip() |
2784 | + return status |
2785 | + except OSError as e: |
2786 | + if e.errno == errno.ENOENT: |
2787 | + return 'unknown' |
2788 | + else: |
2789 | + raise |
2790 | + |
2791 | + |
2792 | +def translate_exc(from_exc, to_exc): |
2793 | + def inner_translate_exc1(f): |
2794 | + def inner_translate_exc2(*args, **kwargs): |
2795 | + try: |
2796 | + return f(*args, **kwargs) |
2797 | + except from_exc: |
2798 | + raise to_exc |
2799 | + |
2800 | + return inner_translate_exc2 |
2801 | + |
2802 | + return inner_translate_exc1 |
2803 | + |
2804 | + |
2805 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
2806 | +def is_leader(): |
2807 | + """Does the current unit hold the juju leadership |
2808 | + |
2809 | + Uses juju to determine whether the current unit is the leader of its peers |
2810 | + """ |
2811 | + cmd = ['is-leader', '--format=json'] |
2812 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
2813 | + |
2814 | + |
2815 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
2816 | +def leader_get(attribute=None): |
2817 | + """Juju leader get value(s)""" |
2818 | + cmd = ['leader-get', '--format=json'] + [attribute or '-'] |
2819 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
2820 | + |
2821 | + |
2822 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
2823 | +def leader_set(settings=None, **kwargs): |
2824 | + """Juju leader set value(s)""" |
2825 | + # Don't log secrets. |
2826 | + # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
2827 | + cmd = ['leader-set'] |
2828 | + settings = settings or {} |
2829 | + settings.update(kwargs) |
2830 | + for k, v in settings.items(): |
2831 | + if v is None: |
2832 | + cmd.append('{}='.format(k)) |
2833 | + else: |
2834 | + cmd.append('{}={}'.format(k, v)) |
2835 | + subprocess.check_call(cmd) |
2836 | + |
2837 | + |
2838 | +@cached |
2839 | +def juju_version(): |
2840 | + """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
2841 | + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
2842 | + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
2843 | + return subprocess.check_output([jujud, 'version'], |
2844 | + universal_newlines=True).strip() |
2845 | + |
2846 | + |
2847 | +@cached |
2848 | +def has_juju_version(minimum_version): |
2849 | + """Return True if the Juju version is at least the provided version""" |
2850 | + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
2851 | + |
2852 | + |
2853 | +_atexit = [] |
2854 | +_atstart = [] |
2855 | + |
2856 | + |
2857 | +def atstart(callback, *args, **kwargs): |
2858 | + '''Schedule a callback to run before the main hook. |
2859 | + |
2860 | + Callbacks are run in the order they were added. |
2861 | + |
2862 | + This is useful for modules and classes to perform initialization |
2863 | + and inject behavior. In particular: |
2864 | + |
2865 | + - Run common code before all of your hooks, such as logging |
2866 | + the hook name or interesting relation data. |
2867 | + - Defer object or module initialization that requires a hook |
2868 | + context until we know there actually is a hook context, |
2869 | + making testing easier. |
2870 | + - Rather than requiring charm authors to include boilerplate to |
2871 | + invoke your helper's behavior, have it run automatically if |
2872 | + your object is instantiated or module imported. |
2873 | + |
2874 | + This is not at all useful after your hook framework as been launched. |
2875 | + ''' |
2876 | + global _atstart |
2877 | + _atstart.append((callback, args, kwargs)) |
2878 | + |
2879 | + |
2880 | +def atexit(callback, *args, **kwargs): |
2881 | + '''Schedule a callback to run on successful hook completion. |
2882 | + |
2883 | + Callbacks are run in the reverse order that they were added.''' |
2884 | + _atexit.append((callback, args, kwargs)) |
2885 | + |
2886 | + |
2887 | +def _run_atstart(): |
2888 | + '''Hook frameworks must invoke this before running the main hook body.''' |
2889 | + global _atstart |
2890 | + for callback, args, kwargs in _atstart: |
2891 | + callback(*args, **kwargs) |
2892 | + del _atstart[:] |
2893 | + |
2894 | + |
2895 | +def _run_atexit(): |
2896 | + '''Hook frameworks must invoke this after the main hook body has |
2897 | + successfully completed. Do not invoke it if the hook fails.''' |
2898 | + global _atexit |
2899 | + for callback, args, kwargs in reversed(_atexit): |
2900 | + callback(*args, **kwargs) |
2901 | + del _atexit[:] |
2902 | |
2903 | === modified file 'hooks/charmhelpers/core/host.py' |
2904 | --- hooks/charmhelpers/core/host.py 2015-06-02 12:12:49 +0000 |
2905 | +++ hooks/charmhelpers/core/host.py 2015-08-27 15:02:41 +0000 |
2906 | @@ -63,6 +63,36 @@ |
2907 | return service_result |
2908 | |
2909 | |
2910 | +def service_pause(service_name, init_dir=None): |
2911 | + """Pause a system service. |
2912 | + |
2913 | + Stop it, and prevent it from starting again at boot.""" |
2914 | + if init_dir is None: |
2915 | + init_dir = "/etc/init" |
2916 | + stopped = service_stop(service_name) |
2917 | + # XXX: Support systemd too |
2918 | + override_path = os.path.join( |
2919 | + init_dir, '{}.override'.format(service_name)) |
2920 | + with open(override_path, 'w') as fh: |
2921 | + fh.write("manual\n") |
2922 | + return stopped |
2923 | + |
2924 | + |
2925 | +def service_resume(service_name, init_dir=None): |
2926 | + """Resume a system service. |
2927 | + |
2928 | + Reenable starting again at boot. Start the service""" |
2929 | + # XXX: Support systemd too |
2930 | + if init_dir is None: |
2931 | + init_dir = "/etc/init" |
2932 | + override_path = os.path.join( |
2933 | + init_dir, '{}.override'.format(service_name)) |
2934 | + if os.path.exists(override_path): |
2935 | + os.unlink(override_path) |
2936 | + started = service_start(service_name) |
2937 | + return started |
2938 | + |
2939 | + |
2940 | def service(action, service_name): |
2941 | """Control a system service""" |
2942 | cmd = ['service', service_name, action] |
2943 | @@ -91,7 +121,7 @@ |
2944 | ['service', service_name, 'status'], |
2945 | stderr=subprocess.STDOUT).decode('UTF-8') |
2946 | except subprocess.CalledProcessError as e: |
2947 | - return 'unrecognized service' not in e.output |
2948 | + return b'unrecognized service' not in e.output |
2949 | else: |
2950 | return True |
2951 | |
2952 | @@ -118,6 +148,16 @@ |
2953 | return user_info |
2954 | |
2955 | |
2956 | +def user_exists(username): |
2957 | + """Check if a user exists""" |
2958 | + try: |
2959 | + pwd.getpwnam(username) |
2960 | + user_exists = True |
2961 | + except KeyError: |
2962 | + user_exists = False |
2963 | + return user_exists |
2964 | + |
2965 | + |
2966 | def add_group(group_name, system_group=False): |
2967 | """Add a group to the system""" |
2968 | try: |
2969 | @@ -140,11 +180,7 @@ |
2970 | |
2971 | def add_user_to_group(username, group): |
2972 | """Add a user to a group""" |
2973 | - cmd = [ |
2974 | - 'gpasswd', '-a', |
2975 | - username, |
2976 | - group |
2977 | - ] |
2978 | + cmd = ['gpasswd', '-a', username, group] |
2979 | log("Adding user {} to group {}".format(username, group)) |
2980 | subprocess.check_call(cmd) |
2981 | |
2982 | @@ -254,6 +290,17 @@ |
2983 | return system_mounts |
2984 | |
2985 | |
2986 | +def fstab_mount(mountpoint): |
2987 | + """Mount filesystem using fstab""" |
2988 | + cmd_args = ['mount', mountpoint] |
2989 | + try: |
2990 | + subprocess.check_output(cmd_args) |
2991 | + except subprocess.CalledProcessError as e: |
2992 | + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
2993 | + return False |
2994 | + return True |
2995 | + |
2996 | + |
2997 | def file_hash(path, hash_type='md5'): |
2998 | """ |
2999 | Generate a hash checksum of the contents of 'path' or None if not found. |
3000 | @@ -370,25 +417,80 @@ |
3001 | return(''.join(random_chars)) |
3002 | |
3003 | |
3004 | -def list_nics(nic_type): |
3005 | +def is_phy_iface(interface): |
3006 | + """Returns True if interface is not virtual, otherwise False.""" |
3007 | + if interface: |
3008 | + sys_net = '/sys/class/net' |
3009 | + if os.path.isdir(sys_net): |
3010 | + for iface in glob.glob(os.path.join(sys_net, '*')): |
3011 | + if '/virtual/' in os.path.realpath(iface): |
3012 | + continue |
3013 | + |
3014 | + if interface == os.path.basename(iface): |
3015 | + return True |
3016 | + |
3017 | + return False |
3018 | + |
3019 | + |
3020 | +def get_bond_master(interface): |
3021 | + """Returns bond master if interface is bond slave otherwise None. |
3022 | + |
3023 | + NOTE: the provided interface is expected to be physical |
3024 | + """ |
3025 | + if interface: |
3026 | + iface_path = '/sys/class/net/%s' % (interface) |
3027 | + if os.path.exists(iface_path): |
3028 | + if '/virtual/' in os.path.realpath(iface_path): |
3029 | + return None |
3030 | + |
3031 | + master = os.path.join(iface_path, 'master') |
3032 | + if os.path.exists(master): |
3033 | + master = os.path.realpath(master) |
3034 | + # make sure it is a bond master |
3035 | + if os.path.exists(os.path.join(master, 'bonding')): |
3036 | + return os.path.basename(master) |
3037 | + |
3038 | + return None |
3039 | + |
3040 | + |
3041 | +def list_nics(nic_type=None): |
3042 | '''Return a list of nics of given type(s)''' |
3043 | if isinstance(nic_type, six.string_types): |
3044 | int_types = [nic_type] |
3045 | else: |
3046 | int_types = nic_type |
3047 | + |
3048 | interfaces = [] |
3049 | - for int_type in int_types: |
3050 | - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] |
3051 | + if nic_type: |
3052 | + for int_type in int_types: |
3053 | + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] |
3054 | + ip_output = subprocess.check_output(cmd).decode('UTF-8') |
3055 | + ip_output = ip_output.split('\n') |
3056 | + ip_output = (line for line in ip_output if line) |
3057 | + for line in ip_output: |
3058 | + if line.split()[1].startswith(int_type): |
3059 | + matched = re.search('.*: (' + int_type + |
3060 | + r'[0-9]+\.[0-9]+)@.*', line) |
3061 | + if matched: |
3062 | + iface = matched.groups()[0] |
3063 | + else: |
3064 | + iface = line.split()[1].replace(":", "") |
3065 | + |
3066 | + if iface not in interfaces: |
3067 | + interfaces.append(iface) |
3068 | + else: |
3069 | + cmd = ['ip', 'a'] |
3070 | ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') |
3071 | - ip_output = (line for line in ip_output if line) |
3072 | + ip_output = (line.strip() for line in ip_output if line) |
3073 | + |
3074 | + key = re.compile('^[0-9]+:\s+(.+):') |
3075 | for line in ip_output: |
3076 | - if line.split()[1].startswith(int_type): |
3077 | - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) |
3078 | - if matched: |
3079 | - interface = matched.groups()[0] |
3080 | - else: |
3081 | - interface = line.split()[1].replace(":", "") |
3082 | - interfaces.append(interface) |
3083 | + matched = re.search(key, line) |
3084 | + if matched: |
3085 | + iface = matched.group(1) |
3086 | + iface = iface.partition("@")[0] |
3087 | + if iface not in interfaces: |
3088 | + interfaces.append(iface) |
3089 | |
3090 | return interfaces |
3091 | |
3092 | |
3093 | === added file 'hooks/charmhelpers/core/hugepage.py' |
3094 | --- hooks/charmhelpers/core/hugepage.py 1970-01-01 00:00:00 +0000 |
3095 | +++ hooks/charmhelpers/core/hugepage.py 2015-08-27 15:02:41 +0000 |
3096 | @@ -0,0 +1,62 @@ |
3097 | +# -*- coding: utf-8 -*- |
3098 | + |
3099 | +# Copyright 2014-2015 Canonical Limited. |
3100 | +# |
3101 | +# This file is part of charm-helpers. |
3102 | +# |
3103 | +# charm-helpers is free software: you can redistribute it and/or modify |
3104 | +# it under the terms of the GNU Lesser General Public License version 3 as |
3105 | +# published by the Free Software Foundation. |
3106 | +# |
3107 | +# charm-helpers is distributed in the hope that it will be useful, |
3108 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
3109 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
3110 | +# GNU Lesser General Public License for more details. |
3111 | +# |
3112 | +# You should have received a copy of the GNU Lesser General Public License |
3113 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3114 | + |
3115 | +import yaml |
3116 | +from charmhelpers.core import fstab |
3117 | +from charmhelpers.core import sysctl |
3118 | +from charmhelpers.core.host import ( |
3119 | + add_group, |
3120 | + add_user_to_group, |
3121 | + fstab_mount, |
3122 | + mkdir, |
3123 | +) |
3124 | + |
3125 | + |
3126 | +def hugepage_support(user, group='hugetlb', nr_hugepages=256, |
3127 | + max_map_count=65536, mnt_point='/run/hugepages/kvm', |
3128 | + pagesize='2MB', mount=True): |
3129 | + """Enable hugepages on system. |
3130 | + |
3131 | + Args: |
3132 | + user (str) -- Username to allow access to hugepages to |
3133 | + group (str) -- Group name to own hugepages |
3134 | + nr_hugepages (int) -- Number of pages to reserve |
3135 | + max_map_count (int) -- Number of Virtual Memory Areas a process can own |
3136 | + mnt_point (str) -- Directory to mount hugepages on |
3137 | + pagesize (str) -- Size of hugepages |
3138 | + mount (bool) -- Whether to Mount hugepages |
3139 | + """ |
3140 | + group_info = add_group(group) |
3141 | + gid = group_info.gr_gid |
3142 | + add_user_to_group(user, group) |
3143 | + sysctl_settings = { |
3144 | + 'vm.nr_hugepages': nr_hugepages, |
3145 | + 'vm.max_map_count': max_map_count, |
3146 | + 'vm.hugetlb_shm_group': gid, |
3147 | + } |
3148 | + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') |
3149 | + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) |
3150 | + lfstab = fstab.Fstab() |
3151 | + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) |
3152 | + if fstab_entry: |
3153 | + lfstab.remove_entry(fstab_entry) |
3154 | + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', |
3155 | + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) |
3156 | + lfstab.add_entry(entry) |
3157 | + if mount: |
3158 | + fstab_mount(mnt_point) |
3159 | |
3160 | === modified file 'hooks/charmhelpers/core/services/base.py' |
3161 | --- hooks/charmhelpers/core/services/base.py 2015-01-26 09:46:38 +0000 |
3162 | +++ hooks/charmhelpers/core/services/base.py 2015-08-27 15:02:41 +0000 |
3163 | @@ -15,9 +15,9 @@ |
3164 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3165 | |
3166 | import os |
3167 | -import re |
3168 | import json |
3169 | -from collections import Iterable |
3170 | +from inspect import getargspec |
3171 | +from collections import Iterable, OrderedDict |
3172 | |
3173 | from charmhelpers.core import host |
3174 | from charmhelpers.core import hookenv |
3175 | @@ -119,7 +119,7 @@ |
3176 | """ |
3177 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
3178 | self._ready = None |
3179 | - self.services = {} |
3180 | + self.services = OrderedDict() |
3181 | for service in services or []: |
3182 | service_name = service['service'] |
3183 | self.services[service_name] = service |
3184 | @@ -128,15 +128,18 @@ |
3185 | """ |
3186 | Handle the current hook by doing The Right Thing with the registered services. |
3187 | """ |
3188 | - hook_name = hookenv.hook_name() |
3189 | - if hook_name == 'stop': |
3190 | - self.stop_services() |
3191 | - else: |
3192 | - self.provide_data() |
3193 | - self.reconfigure_services() |
3194 | - cfg = hookenv.config() |
3195 | - if cfg.implicit_save: |
3196 | - cfg.save() |
3197 | + hookenv._run_atstart() |
3198 | + try: |
3199 | + hook_name = hookenv.hook_name() |
3200 | + if hook_name == 'stop': |
3201 | + self.stop_services() |
3202 | + else: |
3203 | + self.reconfigure_services() |
3204 | + self.provide_data() |
3205 | + except SystemExit as x: |
3206 | + if x.code is None or x.code == 0: |
3207 | + hookenv._run_atexit() |
3208 | + hookenv._run_atexit() |
3209 | |
3210 | def provide_data(self): |
3211 | """ |
3212 | @@ -145,15 +148,36 @@ |
3213 | A provider must have a `name` attribute, which indicates which relation |
3214 | to set data on, and a `provide_data()` method, which returns a dict of |
3215 | data to set. |
3216 | + |
3217 | + The `provide_data()` method can optionally accept two parameters: |
3218 | + |
3219 | + * ``remote_service`` The name of the remote service that the data will |
3220 | + be provided to. The `provide_data()` method will be called once |
3221 | + for each connected service (not unit). This allows the method to |
3222 | + tailor its data to the given service. |
3223 | + * ``service_ready`` Whether or not the service definition had all of |
3224 | + its requirements met, and thus the ``data_ready`` callbacks run. |
3225 | + |
3226 | + Note that the ``provided_data`` methods are now called **after** the |
3227 | + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks |
3228 | + a chance to generate any data necessary for the providing to the remote |
3229 | + services. |
3230 | """ |
3231 | - hook_name = hookenv.hook_name() |
3232 | - for service in self.services.values(): |
3233 | + for service_name, service in self.services.items(): |
3234 | + service_ready = self.is_ready(service_name) |
3235 | for provider in service.get('provided_data', []): |
3236 | - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): |
3237 | - data = provider.provide_data() |
3238 | - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data |
3239 | - if _ready: |
3240 | - hookenv.relation_set(None, data) |
3241 | + for relid in hookenv.relation_ids(provider.name): |
3242 | + units = hookenv.related_units(relid) |
3243 | + if not units: |
3244 | + continue |
3245 | + remote_service = units[0].split('/')[0] |
3246 | + argspec = getargspec(provider.provide_data) |
3247 | + if len(argspec.args) > 1: |
3248 | + data = provider.provide_data(remote_service, service_ready) |
3249 | + else: |
3250 | + data = provider.provide_data() |
3251 | + if data: |
3252 | + hookenv.relation_set(relid, data) |
3253 | |
3254 | def reconfigure_services(self, *service_names): |
3255 | """ |
3256 | |
3257 | === modified file 'hooks/charmhelpers/core/services/helpers.py' |
3258 | --- hooks/charmhelpers/core/services/helpers.py 2015-04-16 20:24:28 +0000 |
3259 | +++ hooks/charmhelpers/core/services/helpers.py 2015-08-27 15:02:41 +0000 |
3260 | @@ -16,7 +16,9 @@ |
3261 | |
3262 | import os |
3263 | import yaml |
3264 | + |
3265 | from charmhelpers.core import hookenv |
3266 | +from charmhelpers.core import host |
3267 | from charmhelpers.core import templating |
3268 | |
3269 | from charmhelpers.core.services.base import ManagerCallback |
3270 | @@ -239,28 +241,42 @@ |
3271 | action. |
3272 | |
3273 | :param str source: The template source file, relative to |
3274 | - `$CHARM_DIR/templates` |
3275 | + `$CHARM_DIR/templates` |
3276 | |
3277 | :param str target: The target to write the rendered template to |
3278 | :param str owner: The owner of the rendered file |
3279 | :param str group: The group of the rendered file |
3280 | :param int perms: The permissions of the rendered file |
3281 | + :param partial on_change_action: functools partial to be executed when |
3282 | + rendered file changes |
3283 | """ |
3284 | def __init__(self, source, target, |
3285 | - owner='root', group='root', perms=0o444): |
3286 | + owner='root', group='root', perms=0o444, |
3287 | + on_change_action=None): |
3288 | self.source = source |
3289 | self.target = target |
3290 | self.owner = owner |
3291 | self.group = group |
3292 | self.perms = perms |
3293 | + self.on_change_action = on_change_action |
3294 | |
3295 | def __call__(self, manager, service_name, event_name): |
3296 | + pre_checksum = '' |
3297 | + if self.on_change_action and os.path.isfile(self.target): |
3298 | + pre_checksum = host.file_hash(self.target) |
3299 | service = manager.get_service(service_name) |
3300 | context = {} |
3301 | for ctx in service.get('required_data', []): |
3302 | context.update(ctx) |
3303 | templating.render(self.source, self.target, context, |
3304 | self.owner, self.group, self.perms) |
3305 | + if self.on_change_action: |
3306 | + if pre_checksum == host.file_hash(self.target): |
3307 | + hookenv.log( |
3308 | + 'No change detected: {}'.format(self.target), |
3309 | + hookenv.DEBUG) |
3310 | + else: |
3311 | + self.on_change_action() |
3312 | |
3313 | |
3314 | # Convenience aliases for templates |
3315 | |
3316 | === modified file 'hooks/charmhelpers/core/unitdata.py' |
3317 | --- hooks/charmhelpers/core/unitdata.py 2015-04-16 20:24:28 +0000 |
3318 | +++ hooks/charmhelpers/core/unitdata.py 2015-08-27 15:02:41 +0000 |
3319 | @@ -152,6 +152,7 @@ |
3320 | import collections |
3321 | import contextlib |
3322 | import datetime |
3323 | +import itertools |
3324 | import json |
3325 | import os |
3326 | import pprint |
3327 | @@ -164,8 +165,7 @@ |
3328 | class Storage(object): |
3329 | """Simple key value database for local unit state within charms. |
3330 | |
3331 | - Modifications are automatically committed at hook exit. That's |
3332 | - currently regardless of exit code. |
3333 | + Modifications are not persisted unless :meth:`flush` is called. |
3334 | |
3335 | To support dicts, lists, integer, floats, and booleans values |
3336 | are automatically json encoded/decoded. |
3337 | @@ -173,8 +173,11 @@ |
3338 | def __init__(self, path=None): |
3339 | self.db_path = path |
3340 | if path is None: |
3341 | - self.db_path = os.path.join( |
3342 | - os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
3343 | + if 'UNIT_STATE_DB' in os.environ: |
3344 | + self.db_path = os.environ['UNIT_STATE_DB'] |
3345 | + else: |
3346 | + self.db_path = os.path.join( |
3347 | + os.environ.get('CHARM_DIR', ''), '.unit-state.db') |
3348 | self.conn = sqlite3.connect('%s' % self.db_path) |
3349 | self.cursor = self.conn.cursor() |
3350 | self.revision = None |
3351 | @@ -189,15 +192,8 @@ |
3352 | self.conn.close() |
3353 | self._closed = True |
3354 | |
3355 | - def _scoped_query(self, stmt, params=None): |
3356 | - if params is None: |
3357 | - params = [] |
3358 | - return stmt, params |
3359 | - |
3360 | def get(self, key, default=None, record=False): |
3361 | - self.cursor.execute( |
3362 | - *self._scoped_query( |
3363 | - 'select data from kv where key=?', [key])) |
3364 | + self.cursor.execute('select data from kv where key=?', [key]) |
3365 | result = self.cursor.fetchone() |
3366 | if not result: |
3367 | return default |
3368 | @@ -206,33 +202,81 @@ |
3369 | return json.loads(result[0]) |
3370 | |
3371 | def getrange(self, key_prefix, strip=False): |
3372 | - stmt = "select key, data from kv where key like '%s%%'" % key_prefix |
3373 | - self.cursor.execute(*self._scoped_query(stmt)) |
3374 | + """ |
3375 | + Get a range of keys starting with a common prefix as a mapping of |
3376 | + keys to values. |
3377 | + |
3378 | + :param str key_prefix: Common prefix among all keys |
3379 | + :param bool strip: Optionally strip the common prefix from the key |
3380 | + names in the returned dict |
3381 | + :return dict: A (possibly empty) dict of key-value mappings |
3382 | + """ |
3383 | + self.cursor.execute("select key, data from kv where key like ?", |
3384 | + ['%s%%' % key_prefix]) |
3385 | result = self.cursor.fetchall() |
3386 | |
3387 | if not result: |
3388 | - return None |
3389 | + return {} |
3390 | if not strip: |
3391 | key_prefix = '' |
3392 | return dict([ |
3393 | (k[len(key_prefix):], json.loads(v)) for k, v in result]) |
3394 | |
3395 | def update(self, mapping, prefix=""): |
3396 | + """ |
3397 | + Set the values of multiple keys at once. |
3398 | + |
3399 | + :param dict mapping: Mapping of keys to values |
3400 | + :param str prefix: Optional prefix to apply to all keys in `mapping` |
3401 | + before setting |
3402 | + """ |
3403 | for k, v in mapping.items(): |
3404 | self.set("%s%s" % (prefix, k), v) |
3405 | |
3406 | def unset(self, key): |
3407 | + """ |
3408 | + Remove a key from the database entirely. |
3409 | + """ |
3410 | self.cursor.execute('delete from kv where key=?', [key]) |
3411 | if self.revision and self.cursor.rowcount: |
3412 | self.cursor.execute( |
3413 | 'insert into kv_revisions values (?, ?, ?)', |
3414 | [key, self.revision, json.dumps('DELETED')]) |
3415 | |
3416 | + def unsetrange(self, keys=None, prefix=""): |
3417 | + """ |
3418 | + Remove a range of keys starting with a common prefix, from the database |
3419 | + entirely. |
3420 | + |
3421 | + :param list keys: List of keys to remove. |
3422 | + :param str prefix: Optional prefix to apply to all keys in ``keys`` |
3423 | + before removing. |
3424 | + """ |
3425 | + if keys is not None: |
3426 | + keys = ['%s%s' % (prefix, key) for key in keys] |
3427 | + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) |
3428 | + if self.revision and self.cursor.rowcount: |
3429 | + self.cursor.execute( |
3430 | + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), |
3431 | + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) |
3432 | + else: |
3433 | + self.cursor.execute('delete from kv where key like ?', |
3434 | + ['%s%%' % prefix]) |
3435 | + if self.revision and self.cursor.rowcount: |
3436 | + self.cursor.execute( |
3437 | + 'insert into kv_revisions values (?, ?, ?)', |
3438 | + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) |
3439 | + |
3440 | def set(self, key, value): |
3441 | + """ |
3442 | + Set a value in the database. |
3443 | + |
3444 | + :param str key: Key to set the value for |
3445 | + :param value: Any JSON-serializable value to be set |
3446 | + """ |
3447 | serialized = json.dumps(value) |
3448 | |
3449 | - self.cursor.execute( |
3450 | - 'select data from kv where key=?', [key]) |
3451 | + self.cursor.execute('select data from kv where key=?', [key]) |
3452 | exists = self.cursor.fetchone() |
3453 | |
3454 | # Skip mutations to the same value |
3455 | |
3456 | === modified file 'hooks/charmhelpers/fetch/__init__.py' |
3457 | --- hooks/charmhelpers/fetch/__init__.py 2015-01-26 09:46:38 +0000 |
3458 | +++ hooks/charmhelpers/fetch/__init__.py 2015-08-27 15:02:41 +0000 |
3459 | @@ -90,6 +90,14 @@ |
3460 | 'kilo/proposed': 'trusty-proposed/kilo', |
3461 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
3462 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
3463 | + # Liberty |
3464 | + 'liberty': 'trusty-updates/liberty', |
3465 | + 'trusty-liberty': 'trusty-updates/liberty', |
3466 | + 'trusty-liberty/updates': 'trusty-updates/liberty', |
3467 | + 'trusty-updates/liberty': 'trusty-updates/liberty', |
3468 | + 'liberty/proposed': 'trusty-proposed/liberty', |
3469 | + 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
3470 | + 'trusty-proposed/liberty': 'trusty-proposed/liberty', |
3471 | } |
3472 | |
3473 | # The order of this list is very important. Handlers should be listed in from |
3474 | @@ -158,7 +166,7 @@ |
3475 | |
3476 | def apt_cache(in_memory=True): |
3477 | """Build and return an apt cache""" |
3478 | - import apt_pkg |
3479 | + from apt import apt_pkg |
3480 | apt_pkg.init() |
3481 | if in_memory: |
3482 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
3483 | @@ -215,19 +223,27 @@ |
3484 | _run_apt_command(cmd, fatal) |
3485 | |
3486 | |
3487 | +def apt_mark(packages, mark, fatal=False): |
3488 | + """Flag one or more packages using apt-mark""" |
3489 | + cmd = ['apt-mark', mark] |
3490 | + if isinstance(packages, six.string_types): |
3491 | + cmd.append(packages) |
3492 | + else: |
3493 | + cmd.extend(packages) |
3494 | + log("Holding {}".format(packages)) |
3495 | + |
3496 | + if fatal: |
3497 | + subprocess.check_call(cmd, universal_newlines=True) |
3498 | + else: |
3499 | + subprocess.call(cmd, universal_newlines=True) |
3500 | + |
3501 | + |
3502 | def apt_hold(packages, fatal=False): |
3503 | - """Hold one or more packages""" |
3504 | - cmd = ['apt-mark', 'hold'] |
3505 | - if isinstance(packages, six.string_types): |
3506 | - cmd.append(packages) |
3507 | - else: |
3508 | - cmd.extend(packages) |
3509 | - log("Holding {}".format(packages)) |
3510 | - |
3511 | - if fatal: |
3512 | - subprocess.check_call(cmd) |
3513 | - else: |
3514 | - subprocess.call(cmd) |
3515 | + return apt_mark(packages, 'hold', fatal=fatal) |
3516 | + |
3517 | + |
3518 | +def apt_unhold(packages, fatal=False): |
3519 | + return apt_mark(packages, 'unhold', fatal=fatal) |
3520 | |
3521 | |
3522 | def add_source(source, key=None): |
3523 | @@ -370,8 +386,9 @@ |
3524 | for handler in handlers: |
3525 | try: |
3526 | installed_to = handler.install(source, *args, **kwargs) |
3527 | - except UnhandledSource: |
3528 | - pass |
3529 | + except UnhandledSource as e: |
3530 | + log('Install source attempt unsuccessful: {}'.format(e), |
3531 | + level='WARNING') |
3532 | if not installed_to: |
3533 | raise UnhandledSource("No handler found for source {}".format(source)) |
3534 | return installed_to |
3535 | |
3536 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' |
3537 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-02-26 10:11:26 +0000 |
3538 | +++ hooks/charmhelpers/fetch/archiveurl.py 2015-08-27 15:02:41 +0000 |
3539 | @@ -77,6 +77,8 @@ |
3540 | def can_handle(self, source): |
3541 | url_parts = self.parse_url(source) |
3542 | if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): |
3543 | + # XXX: Why is this returning a boolean and a string? It's |
3544 | + # doomed to fail since "bool(can_handle('foo://'))" will be True. |
3545 | return "Wrong source type" |
3546 | if get_archive_handler(self.base_url(source)): |
3547 | return True |
3548 | @@ -155,7 +157,11 @@ |
3549 | else: |
3550 | algorithms = hashlib.algorithms_available |
3551 | if key in algorithms: |
3552 | - check_hash(dld_file, value, key) |
3553 | + if len(value) != 1: |
3554 | + raise TypeError( |
3555 | + "Expected 1 hash value, not %d" % len(value)) |
3556 | + expected = value[0] |
3557 | + check_hash(dld_file, expected, key) |
3558 | if checksum: |
3559 | check_hash(dld_file, checksum, hash_type) |
3560 | return extract(dld_file, dest) |
3561 | |
3562 | === modified file 'hooks/charmhelpers/fetch/giturl.py' |
3563 | --- hooks/charmhelpers/fetch/giturl.py 2015-02-26 10:11:26 +0000 |
3564 | +++ hooks/charmhelpers/fetch/giturl.py 2015-08-27 15:02:41 +0000 |
3565 | @@ -45,14 +45,16 @@ |
3566 | else: |
3567 | return True |
3568 | |
3569 | - def clone(self, source, dest, branch): |
3570 | + def clone(self, source, dest, branch, depth=None): |
3571 | if not self.can_handle(source): |
3572 | raise UnhandledSource("Cannot handle {}".format(source)) |
3573 | |
3574 | - repo = Repo.clone_from(source, dest) |
3575 | - repo.git.checkout(branch) |
3576 | + if depth: |
3577 | + Repo.clone_from(source, dest, branch=branch, depth=depth) |
3578 | + else: |
3579 | + Repo.clone_from(source, dest, branch=branch) |
3580 | |
3581 | - def install(self, source, branch="master", dest=None): |
3582 | + def install(self, source, branch="master", dest=None, depth=None): |
3583 | url_parts = self.parse_url(source) |
3584 | branch_name = url_parts.path.strip("/").split("/")[-1] |
3585 | if dest: |
3586 | @@ -63,9 +65,9 @@ |
3587 | if not os.path.exists(dest_dir): |
3588 | mkdir(dest_dir, perms=0o755) |
3589 | try: |
3590 | - self.clone(source, dest_dir, branch) |
3591 | + self.clone(source, dest_dir, branch, depth) |
3592 | except GitCommandError as e: |
3593 | - raise UnhandledSource(e.message) |
3594 | + raise UnhandledSource(e) |
3595 | except OSError as e: |
3596 | raise UnhandledSource(e.strerror) |
3597 | return dest_dir |
3598 | |
3599 | === modified file 'hooks/horizon_contexts.py' |
3600 | --- hooks/horizon_contexts.py 2015-06-03 13:47:12 +0000 |
3601 | +++ hooks/horizon_contexts.py 2015-08-27 15:02:41 +0000 |
3602 | @@ -10,7 +10,6 @@ |
3603 | ) |
3604 | from charmhelpers.contrib.openstack.context import ( |
3605 | OSContextGenerator, |
3606 | - OSPatternContextGenerator, |
3607 | HAProxyContext, |
3608 | context_complete |
3609 | ) |
3610 | @@ -28,7 +27,6 @@ |
3611 | |
3612 | from base64 import b64decode |
3613 | import os |
3614 | -import re |
3615 | |
3616 | |
3617 | class HorizonHAProxyContext(HAProxyContext): |
3618 | @@ -199,27 +197,3 @@ |
3619 | key=lambda r: r[1]['priority'])] |
3620 | } |
3621 | return ctxt |
3622 | - |
3623 | - |
3624 | -class PluginsContext(OSPatternContextGenerator): |
3625 | - def __call__(self): |
3626 | - |
3627 | - plugins = {} |
3628 | - |
3629 | - for rid in relation_ids("plugin"): |
3630 | - try: |
3631 | - unit = related_units(rid)[0] |
3632 | - except IndexError: |
3633 | - pass |
3634 | - else: |
3635 | - rdata = relation_get(unit=unit, rid=rid) |
3636 | - try: |
3637 | - if rdata['priority'] is not None and rdata['plugin-file']: |
3638 | - service = re.sub('[^a-z0-9_]', '_', unit.split('/')[0]) |
3639 | - plugins[(rdata['priority'], service)] = { |
3640 | - 'unit': unit, |
3641 | - 'plugin_file': rdata['plugin-file']} |
3642 | - except KeyError: |
3643 | - pass |
3644 | - |
3645 | - return plugins |
3646 | |
3647 | === modified file 'hooks/horizon_hooks.py' |
3648 | --- hooks/horizon_hooks.py 2015-06-03 13:07:42 +0000 |
3649 | +++ hooks/horizon_hooks.py 2015-08-27 15:02:41 +0000 |
3650 | @@ -33,7 +33,7 @@ |
3651 | register_configs, |
3652 | restart_map, |
3653 | services, |
3654 | - LOCAL_SETTINGS, HAPROXY_CONF, PLUGIN_SETTINGS, |
3655 | + LOCAL_SETTINGS, HAPROXY_CONF, |
3656 | enable_ssl, |
3657 | do_openstack_upgrade, |
3658 | git_install, |
3659 | @@ -125,7 +125,7 @@ |
3660 | open_port(443) |
3661 | |
3662 | if git_install_requested(): |
3663 | - git_post_install_late() |
3664 | + git_post_install_late(config('openstack-origin-git')) |
3665 | |
3666 | |
3667 | @hooks.hook('identity-service-relation-joined') |
3668 | @@ -253,7 +253,6 @@ |
3669 | @restart_on_change(restart_map()) |
3670 | def update_plugin_config(): |
3671 | CONFIGS.write(LOCAL_SETTINGS) |
3672 | - CONFIGS.write(PLUGIN_SETTINGS) |
3673 | |
3674 | |
3675 | def main(): |
3676 | |
3677 | === modified file 'hooks/horizon_utils.py' |
3678 | --- hooks/horizon_utils.py 2015-06-03 13:07:42 +0000 |
3679 | +++ hooks/horizon_utils.py 2015-08-27 15:02:41 +0000 |
3680 | @@ -5,7 +5,6 @@ |
3681 | import pwd |
3682 | import subprocess |
3683 | import shutil |
3684 | -import string |
3685 | from collections import OrderedDict |
3686 | |
3687 | import charmhelpers.contrib.openstack.context as context |
3688 | @@ -18,9 +17,13 @@ |
3689 | git_clone_and_install, |
3690 | os_release, |
3691 | git_src_dir, |
3692 | + git_pip_venv_dir, |
3693 | + git_yaml_value, |
3694 | +) |
3695 | +from charmhelpers.contrib.python.packages import ( |
3696 | + pip_install, |
3697 | ) |
3698 | from charmhelpers.core.hookenv import ( |
3699 | - charm_dir, |
3700 | config, |
3701 | log |
3702 | ) |
3703 | @@ -33,7 +36,9 @@ |
3704 | mkdir, |
3705 | service_restart, |
3706 | ) |
3707 | - |
3708 | +from charmhelpers.core.templating import ( |
3709 | + render, |
3710 | +) |
3711 | from charmhelpers.fetch import ( |
3712 | apt_upgrade, |
3713 | apt_update, |
3714 | @@ -54,9 +59,14 @@ |
3715 | BASE_GIT_PACKAGES = [ |
3716 | 'apache2', |
3717 | 'libapache2-mod-wsgi', |
3718 | + 'libffi-dev', |
3719 | + 'libpcre3-dev', |
3720 | + 'libssl-dev', |
3721 | 'libxml2-dev', |
3722 | 'libxslt1-dev', |
3723 | + 'libyaml-dev', |
3724 | 'python-dev', |
3725 | + 'python-lesscpy', |
3726 | 'python-pip', |
3727 | 'python-setuptools', |
3728 | 'zlib1g-dev', |
3729 | @@ -83,9 +93,6 @@ |
3730 | APACHE_DEFAULT = "%s/sites-available/default" % (APACHE_CONF_DIR) |
3731 | ROUTER_SETTING = \ |
3732 | "/usr/share/openstack-dashboard/openstack_dashboard/enabled/_40_router.py" |
3733 | -PLUGIN_SETTINGS = \ |
3734 | - "/usr/share/openstack-dashboard/openstack_dashboard/local/enabled" \ |
3735 | - "/_{}_juju_{}.py" |
3736 | |
3737 | TEMPLATES = 'templates' |
3738 | |
3739 | @@ -137,10 +144,6 @@ |
3740 | 'hook_contexts': [horizon_contexts.RouterSettingContext()], |
3741 | 'services': ['apache2'], |
3742 | }), |
3743 | - (PLUGIN_SETTINGS, { |
3744 | - 'hook_contexts': [horizon_contexts.PluginsContext()], |
3745 | - 'services': ['apache2'], |
3746 | - }), |
3747 | ]) |
3748 | |
3749 | |
3750 | @@ -179,11 +182,6 @@ |
3751 | if os.path.exists(os.path.dirname(ROUTER_SETTING)): |
3752 | configs.register(ROUTER_SETTING, |
3753 | CONFIG_FILES[ROUTER_SETTING]['hook_contexts']) |
3754 | - |
3755 | - if os_release('openstack_dashboard') >= 'icehouse': |
3756 | - configs.register_pattern(PLUGIN_SETTINGS, |
3757 | - CONFIG_FILES[PLUGIN_SETTINGS] |
3758 | - ['hook_contexts']) |
3759 | return configs |
3760 | |
3761 | |
3762 | @@ -201,10 +199,7 @@ |
3763 | for svc in ctxt['services']: |
3764 | svcs.append(svc) |
3765 | if svcs: |
3766 | - # replace all formatting in path with asterisks for glob |
3767 | - path = ''.join([t[0] + ('' if t[1] is None else '*') |
3768 | - for t in string.Formatter().parse(f)]) |
3769 | - _map.append((path, svcs)) |
3770 | + _map.append((f, svcs)) |
3771 | return OrderedDict(_map) |
3772 | |
3773 | |
3774 | @@ -313,7 +308,6 @@ |
3775 | def git_post_install(projects_yaml): |
3776 | """Perform horizon post-install setup.""" |
3777 | src_dir = git_src_dir(projects_yaml, 'horizon') |
3778 | - templates_dir = os.path.join(charm_dir(), 'templates/git') |
3779 | copy_files = { |
3780 | 'manage': { |
3781 | 'src': os.path.join(src_dir, 'manage.py'), |
3782 | @@ -328,10 +322,6 @@ |
3783 | 'local_settings.py.example'), |
3784 | 'dest': '/etc/openstack-dashboard/local_settings.py', |
3785 | }, |
3786 | - 'openstack-dashboard': { |
3787 | - 'src': os.path.join(templates_dir, 'dashboard.conf'), |
3788 | - 'dest': '/etc/apache2/conf-available/openstack-dashboard.conf', |
3789 | - }, |
3790 | } |
3791 | |
3792 | for name, files in copy_files.iteritems(): |
3793 | @@ -359,8 +349,8 @@ |
3794 | 'link': '/usr/share/openstack-dashboard/bin/less/lessc'}, |
3795 | {'src': '/etc/openstack-dashboard/local_settings.py', |
3796 | 'link': os.path.join(share_dir, 'local/local_settings.py')}, |
3797 | - {'src': |
3798 | - '/usr/local/lib/python2.7/dist-packages/horizon/static/horizon/', |
3799 | + {'src': os.path.join(git_pip_venv_dir(projects_yaml), |
3800 | + 'local/lib/python2.7/site-packages/horizon/static/horizon/'), |
3801 | 'link': os.path.join(share_dir, 'static/horizon')}, |
3802 | ] |
3803 | |
3804 | @@ -369,12 +359,25 @@ |
3805 | os.remove(s['link']) |
3806 | os.symlink(s['src'], s['link']) |
3807 | |
3808 | + render('git/dashboard.conf', |
3809 | + '/etc/apache2/conf-available/openstack-dashboard.conf', |
3810 | + {'virtualenv': git_pip_venv_dir(projects_yaml)}, |
3811 | + owner='root', group='root', perms=0o644) |
3812 | + |
3813 | os.chmod('/var/lib/openstack-dashboard', 0o750) |
3814 | os.chmod('/usr/share/openstack-dashboard/manage.py', 0o755), |
3815 | |
3816 | - subprocess.check_call(['/usr/share/openstack-dashboard/manage.py', |
3817 | + http_proxy = git_yaml_value(projects_yaml, 'http_proxy') |
3818 | + if http_proxy: |
3819 | + pip_install('python-memcached', proxy=http_proxy, |
3820 | + venv=git_pip_venv_dir(projects_yaml)) |
3821 | + else: |
3822 | + pip_install('python-memcached', |
3823 | + venv=git_pip_venv_dir(projects_yaml)) |
3824 | + python = os.path.join(git_pip_venv_dir(projects_yaml), 'bin/python') |
3825 | + subprocess.check_call([python, '/usr/share/openstack-dashboard/manage.py', |
3826 | 'collectstatic', '--noinput']) |
3827 | - subprocess.check_call(['/usr/share/openstack-dashboard/manage.py', |
3828 | + subprocess.check_call([python, '/usr/share/openstack-dashboard/manage.py', |
3829 | 'compress', '--force']) |
3830 | |
3831 | uid = pwd.getpwnam('horizon').pw_uid |
3832 | @@ -396,9 +399,15 @@ |
3833 | service_restart('apache2') |
3834 | |
3835 | |
3836 | -def git_post_install_late(): |
3837 | +def git_post_install_late(projects_yaml): |
3838 | """Perform horizon post-install setup.""" |
3839 | - subprocess.check_call(['/usr/share/openstack-dashboard/manage.py', |
3840 | + render('git/dashboard.conf', |
3841 | + '/etc/apache2/conf-available/openstack-dashboard.conf', |
3842 | + {'virtualenv': git_pip_venv_dir(projects_yaml)}, |
3843 | + owner='root', group='root', perms=0o644) |
3844 | + |
3845 | + python = os.path.join(git_pip_venv_dir(projects_yaml), 'bin/python') |
3846 | + subprocess.check_call([python, '/usr/share/openstack-dashboard/manage.py', |
3847 | 'collectstatic', '--noinput']) |
3848 | - subprocess.check_call(['/usr/share/openstack-dashboard/manage.py', |
3849 | + subprocess.check_call([python, '/usr/share/openstack-dashboard/manage.py', |
3850 | 'compress', '--force']) |
3851 | |
3852 | === modified file 'templates/git/dashboard.conf' |
3853 | --- templates/git/dashboard.conf 2015-04-13 14:18:45 +0000 |
3854 | +++ templates/git/dashboard.conf 2015-08-27 15:02:41 +0000 |
3855 | @@ -1,6 +1,7 @@ |
3856 | WSGIScriptAlias /horizon /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi |
3857 | WSGIDaemonProcess horizon user=horizon group=horizon processes=3 threads=10 |
3858 | WSGIProcessGroup horizon |
3859 | +WSGIPythonHome {{ virtualenv }} |
3860 | Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/ |
3861 | <Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi> |
3862 | Order allow,deny |
3863 | |
3864 | === removed file 'templates/icehouse/_{}_juju_{}.py' |
3865 | --- templates/icehouse/_{}_juju_{}.py 2015-06-03 13:07:42 +0000 |
3866 | +++ templates/icehouse/_{}_juju_{}.py 1970-01-01 00:00:00 +0000 |
3867 | @@ -1,2 +0,0 @@ |
3868 | -# {{ unit }} |
3869 | -{{ plugin_file }} |
3870 | \ No newline at end of file |
3871 | |
3872 | === modified file 'tests/00-setup' |
3873 | --- tests/00-setup 2015-02-11 18:24:07 +0000 |
3874 | +++ tests/00-setup 2015-08-27 15:02:41 +0000 |
3875 | @@ -5,6 +5,7 @@ |
3876 | sudo add-apt-repository --yes ppa:juju/stable |
3877 | sudo apt-get update --yes |
3878 | sudo apt-get install --yes python-amulet \ |
3879 | + python-distro-info \ |
3880 | python-neutronclient \ |
3881 | python-keystoneclient \ |
3882 | python-novaclient \ |
3883 | |
3884 | === modified file 'tests/017-basic-trusty-kilo' (properties changed: -x to +x) |
3885 | === removed file 'tests/018-basic-utopic-juno' |
3886 | --- tests/018-basic-utopic-juno 2015-04-16 21:36:17 +0000 |
3887 | +++ tests/018-basic-utopic-juno 1970-01-01 00:00:00 +0000 |
3888 | @@ -1,9 +0,0 @@ |
3889 | -#!/usr/bin/python |
3890 | - |
3891 | -"""Amulet tests on a basic openstack-dashboard deployment on utopic-juno.""" |
3892 | - |
3893 | -from basic_deployment import OpenstackDashboardBasicDeployment |
3894 | - |
3895 | -if __name__ == '__main__': |
3896 | - deployment = OpenstackDashboardBasicDeployment(series='utopic') |
3897 | - deployment.run_tests() |
3898 | |
3899 | === added file 'tests/052-basic-trusty-kilo-git' |
3900 | --- tests/052-basic-trusty-kilo-git 1970-01-01 00:00:00 +0000 |
3901 | +++ tests/052-basic-trusty-kilo-git 2015-08-27 15:02:41 +0000 |
3902 | @@ -0,0 +1,12 @@ |
3903 | +#!/usr/bin/python |
3904 | + |
3905 | +"""Amulet tests on a basic openstack-dashboard git deployment on trusty-kilo.""" |
3906 | + |
3907 | +from basic_deployment import OpenstackDashboardBasicDeployment |
3908 | + |
3909 | +if __name__ == '__main__': |
3910 | + deployment = OpenstackDashboardBasicDeployment(series='trusty', |
3911 | + openstack='cloud:trusty-kilo', |
3912 | + source='cloud:trusty-updates/kilo', |
3913 | + git=True) |
3914 | + deployment.run_tests() |
3915 | |
3916 | === modified file 'tests/basic_deployment.py' |
3917 | --- tests/basic_deployment.py 2015-04-15 19:31:27 +0000 |
3918 | +++ tests/basic_deployment.py 2015-08-27 15:02:41 +0000 |
3919 | @@ -59,21 +59,58 @@ |
3920 | """Configure all of the services.""" |
3921 | horizon_config = {} |
3922 | if self.git: |
3923 | + amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') |
3924 | + |
3925 | + reqs_repo = 'git://github.com/openstack/requirements' |
3926 | + horizon_repo = 'git://github.com/openstack/horizon' |
3927 | + if self._get_openstack_release() == self.trusty_icehouse: |
3928 | + reqs_repo = 'git://github.com/coreycb/requirements' |
3929 | + horizon_repo = 'git://github.com/coreycb/horizon' |
3930 | + |
3931 | branch = 'stable/' + self._get_openstack_release_string() |
3932 | - amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') |
3933 | - openstack_origin_git = { |
3934 | - 'repositories': [ |
3935 | - {'name': 'requirements', |
3936 | - 'repository': 'git://git.openstack.org/openstack/requirements', |
3937 | - 'branch': branch}, |
3938 | - {'name': 'horizon', |
3939 | - 'repository': 'git://git.openstack.org/openstack/horizon', |
3940 | - 'branch': branch}, |
3941 | - ], |
3942 | - 'directory': '/mnt/openstack-git', |
3943 | - 'http_proxy': amulet_http_proxy, |
3944 | - 'https_proxy': amulet_http_proxy, |
3945 | - } |
3946 | + |
3947 | + if self._get_openstack_release() == self.trusty_juno: |
3948 | + openstack_origin_git = { |
3949 | + 'repositories': [ |
3950 | + {'name': 'requirements', |
3951 | + 'repository': reqs_repo, |
3952 | + 'branch': branch}, |
3953 | + # NOTE(coreycb): Pin oslo libraries here because they're not |
3954 | + # capped and recently released versions causing issues for juno. |
3955 | + {'name': 'oslo-config', |
3956 | + 'repository': 'git://github.com/openstack/oslo.config', |
3957 | + 'branch': '1.6.0'}, |
3958 | + {'name': 'oslo-i18n', |
3959 | + 'repository': 'git://github.com/openstack/oslo.i18n', |
3960 | + 'branch': '1.3.1'}, |
3961 | + {'name': 'oslo-serialization', |
3962 | + 'repository': 'git://github.com/openstack/oslo.serialization', |
3963 | + 'branch': '1.2.0'}, |
3964 | + {'name': 'oslo-utils', |
3965 | + 'repository': 'git://github.com/openstack/oslo.utils', |
3966 | + 'branch': '1.4.0'}, |
3967 | + {'name': 'horizon', |
3968 | + 'repository': horizon_repo, |
3969 | + 'branch': branch}, |
3970 | + ], |
3971 | + 'directory': '/mnt/openstack-git', |
3972 | + 'http_proxy': amulet_http_proxy, |
3973 | + 'https_proxy': amulet_http_proxy, |
3974 | + } |
3975 | + else: |
3976 | + openstack_origin_git = { |
3977 | + 'repositories': [ |
3978 | + {'name': 'requirements', |
3979 | + 'repository': reqs_repo, |
3980 | + 'branch': branch}, |
3981 | + {'name': 'horizon', |
3982 | + 'repository': horizon_repo, |
3983 | + 'branch': branch}, |
3984 | + ], |
3985 | + 'directory': '/mnt/openstack-git', |
3986 | + 'http_proxy': amulet_http_proxy, |
3987 | + 'https_proxy': amulet_http_proxy, |
3988 | + } |
3989 | horizon_config['openstack-origin-git'] = yaml.dump(openstack_origin_git) |
3990 | |
3991 | keystone_config = {'admin-password': 'openstack', |
3992 | @@ -164,7 +201,7 @@ |
3993 | conf = '/etc/openstack-dashboard/local_settings.py' |
3994 | services = ['apache2'] |
3995 | self.d.configure('openstack-dashboard', {'use-syslog': 'True'}) |
3996 | - time = 40 |
3997 | + time = 120 |
3998 | for s in services: |
3999 | if not u.service_restarted(self.openstack_dashboard_sentry, s, conf, |
4000 | pgrep_full=True, sleep_time=time): |
4001 | |
4002 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' |
4003 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-04-23 14:53:21 +0000 |
4004 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-08-27 15:02:41 +0000 |
4005 | @@ -14,14 +14,23 @@ |
4006 | # You should have received a copy of the GNU Lesser General Public License |
4007 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4008 | |
4009 | -import ConfigParser |
4010 | import io |
4011 | +import json |
4012 | import logging |
4013 | +import os |
4014 | import re |
4015 | +import subprocess |
4016 | import sys |
4017 | import time |
4018 | |
4019 | +import amulet |
4020 | +import distro_info |
4021 | import six |
4022 | +from six.moves import configparser |
4023 | +if six.PY3: |
4024 | + from urllib import parse as urlparse |
4025 | +else: |
4026 | + import urlparse |
4027 | |
4028 | |
4029 | class AmuletUtils(object): |
4030 | @@ -33,6 +42,7 @@ |
4031 | |
4032 | def __init__(self, log_level=logging.ERROR): |
4033 | self.log = self.get_logger(level=log_level) |
4034 | + self.ubuntu_releases = self.get_ubuntu_releases() |
4035 | |
4036 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): |
4037 | """Get a logger object that will log to stdout.""" |
4038 | @@ -70,12 +80,44 @@ |
4039 | else: |
4040 | return False |
4041 | |
4042 | + def get_ubuntu_release_from_sentry(self, sentry_unit): |
4043 | + """Get Ubuntu release codename from sentry unit. |
4044 | + |
4045 | + :param sentry_unit: amulet sentry/service unit pointer |
4046 | + :returns: list of strings - release codename, failure message |
4047 | + """ |
4048 | + msg = None |
4049 | + cmd = 'lsb_release -cs' |
4050 | + release, code = sentry_unit.run(cmd) |
4051 | + if code == 0: |
4052 | + self.log.debug('{} lsb_release: {}'.format( |
4053 | + sentry_unit.info['unit_name'], release)) |
4054 | + else: |
4055 | + msg = ('{} `{}` returned {} ' |
4056 | + '{}'.format(sentry_unit.info['unit_name'], |
4057 | + cmd, release, code)) |
4058 | + if release not in self.ubuntu_releases: |
4059 | + msg = ("Release ({}) not found in Ubuntu releases " |
4060 | + "({})".format(release, self.ubuntu_releases)) |
4061 | + return release, msg |
4062 | + |
4063 | def validate_services(self, commands): |
4064 | - """Validate services. |
4065 | - |
4066 | - Verify the specified services are running on the corresponding |
4067 | + """Validate that lists of commands succeed on service units. Can be |
4068 | + used to verify system services are running on the corresponding |
4069 | service units. |
4070 | - """ |
4071 | + |
4072 | + :param commands: dict with sentry keys and arbitrary command list vals |
4073 | + :returns: None if successful, Failure string message otherwise |
4074 | + """ |
4075 | + self.log.debug('Checking status of system services...') |
4076 | + |
4077 | + # /!\ DEPRECATION WARNING (beisner): |
4078 | + # New and existing tests should be rewritten to use |
4079 | + # validate_services_by_name() as it is aware of init systems. |
4080 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
4081 | + 'validate_services_by_name instead of validate_services ' |
4082 | + 'due to init system differences.') |
4083 | + |
4084 | for k, v in six.iteritems(commands): |
4085 | for cmd in v: |
4086 | output, code = k.run(cmd) |
4087 | @@ -86,6 +128,45 @@ |
4088 | return "command `{}` returned {}".format(cmd, str(code)) |
4089 | return None |
4090 | |
4091 | + def validate_services_by_name(self, sentry_services): |
4092 | + """Validate system service status by service name, automatically |
4093 | + detecting init system based on Ubuntu release codename. |
4094 | + |
4095 | + :param sentry_services: dict with sentry keys and svc list values |
4096 | + :returns: None if successful, Failure string message otherwise |
4097 | + """ |
4098 | + self.log.debug('Checking status of system services...') |
4099 | + |
4100 | + # Point at which systemd became a thing |
4101 | + systemd_switch = self.ubuntu_releases.index('vivid') |
4102 | + |
4103 | + for sentry_unit, services_list in six.iteritems(sentry_services): |
4104 | + # Get lsb_release codename from unit |
4105 | + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) |
4106 | + if ret: |
4107 | + return ret |
4108 | + |
4109 | + for service_name in services_list: |
4110 | + if (self.ubuntu_releases.index(release) >= systemd_switch or |
4111 | + service_name in ['rabbitmq-server', 'apache2']): |
4112 | + # init is systemd (or regular sysv) |
4113 | + cmd = 'sudo service {} status'.format(service_name) |
4114 | + output, code = sentry_unit.run(cmd) |
4115 | + service_running = code == 0 |
4116 | + elif self.ubuntu_releases.index(release) < systemd_switch: |
4117 | + # init is upstart |
4118 | + cmd = 'sudo status {}'.format(service_name) |
4119 | + output, code = sentry_unit.run(cmd) |
4120 | + service_running = code == 0 and "start/running" in output |
4121 | + |
4122 | + self.log.debug('{} `{}` returned ' |
4123 | + '{}'.format(sentry_unit.info['unit_name'], |
4124 | + cmd, code)) |
4125 | + if not service_running: |
4126 | + return u"command `{}` returned {} {}".format( |
4127 | + cmd, output, str(code)) |
4128 | + return None |
4129 | + |
4130 | def _get_config(self, unit, filename): |
4131 | """Get a ConfigParser object for parsing a unit's config file.""" |
4132 | file_contents = unit.file_contents(filename) |
4133 | @@ -93,7 +174,7 @@ |
4134 | # NOTE(beisner): by default, ConfigParser does not handle options |
4135 | # with no value, such as the flags used in the mysql my.cnf file. |
4136 | # https://bugs.python.org/issue7005 |
4137 | - config = ConfigParser.ConfigParser(allow_no_value=True) |
4138 | + config = configparser.ConfigParser(allow_no_value=True) |
4139 | config.readfp(io.StringIO(file_contents)) |
4140 | return config |
4141 | |
4142 | @@ -103,7 +184,15 @@ |
4143 | |
4144 | Verify that the specified section of the config file contains |
4145 | the expected option key:value pairs. |
4146 | + |
4147 | + Compare expected dictionary data vs actual dictionary data. |
4148 | + The values in the 'expected' dictionary can be strings, bools, ints, |
4149 | + longs, or can be a function that evaluates a variable and returns a |
4150 | + bool. |
4151 | """ |
4152 | + self.log.debug('Validating config file data ({} in {} on {})' |
4153 | + '...'.format(section, config_file, |
4154 | + sentry_unit.info['unit_name'])) |
4155 | config = self._get_config(sentry_unit, config_file) |
4156 | |
4157 | if section != 'DEFAULT' and not config.has_section(section): |
4158 | @@ -112,9 +201,20 @@ |
4159 | for k in expected.keys(): |
4160 | if not config.has_option(section, k): |
4161 | return "section [{}] is missing option {}".format(section, k) |
4162 | - if config.get(section, k) != expected[k]: |
4163 | + |
4164 | + actual = config.get(section, k) |
4165 | + v = expected[k] |
4166 | + if (isinstance(v, six.string_types) or |
4167 | + isinstance(v, bool) or |
4168 | + isinstance(v, six.integer_types)): |
4169 | + # handle explicit values |
4170 | + if actual != v: |
4171 | + return "section [{}] {}:{} != expected {}:{}".format( |
4172 | + section, k, actual, k, expected[k]) |
4173 | + # handle function pointers, such as not_null or valid_ip |
4174 | + elif not v(actual): |
4175 | return "section [{}] {}:{} != expected {}:{}".format( |
4176 | - section, k, config.get(section, k), k, expected[k]) |
4177 | + section, k, actual, k, expected[k]) |
4178 | return None |
4179 | |
4180 | def _validate_dict_data(self, expected, actual): |
4181 | @@ -122,7 +222,7 @@ |
4182 | |
4183 | Compare expected dictionary data vs actual dictionary data. |
4184 | The values in the 'expected' dictionary can be strings, bools, ints, |
4185 | - longs, or can be a function that evaluate a variable and returns a |
4186 | + longs, or can be a function that evaluates a variable and returns a |
4187 | bool. |
4188 | """ |
4189 | self.log.debug('actual: {}'.format(repr(actual))) |
4190 | @@ -133,8 +233,10 @@ |
4191 | if (isinstance(v, six.string_types) or |
4192 | isinstance(v, bool) or |
4193 | isinstance(v, six.integer_types)): |
4194 | + # handle explicit values |
4195 | if v != actual[k]: |
4196 | return "{}:{}".format(k, actual[k]) |
4197 | + # handle function pointers, such as not_null or valid_ip |
4198 | elif not v(actual[k]): |
4199 | return "{}:{}".format(k, actual[k]) |
4200 | else: |
4201 | @@ -321,3 +423,174 @@ |
4202 | |
4203 | def endpoint_error(self, name, data): |
4204 | return 'unexpected endpoint data in {} - {}'.format(name, data) |
4205 | + |
4206 | + def get_ubuntu_releases(self): |
4207 | + """Return a list of all Ubuntu releases in order of release.""" |
4208 | + _d = distro_info.UbuntuDistroInfo() |
4209 | + _release_list = _d.all |
4210 | + self.log.debug('Ubuntu release list: {}'.format(_release_list)) |
4211 | + return _release_list |
4212 | + |
4213 | + def file_to_url(self, file_rel_path): |
4214 | + """Convert a relative file path to a file URL.""" |
4215 | + _abs_path = os.path.abspath(file_rel_path) |
4216 | + return urlparse.urlparse(_abs_path, scheme='file').geturl() |
4217 | + |
4218 | + def check_commands_on_units(self, commands, sentry_units): |
4219 | + """Check that all commands in a list exit zero on all |
4220 | + sentry units in a list. |
4221 | + |
4222 | + :param commands: list of bash commands |
4223 | + :param sentry_units: list of sentry unit pointers |
4224 | + :returns: None if successful; Failure message otherwise |
4225 | + """ |
4226 | + self.log.debug('Checking exit codes for {} commands on {} ' |
4227 | + 'sentry units...'.format(len(commands), |
4228 | + len(sentry_units))) |
4229 | + for sentry_unit in sentry_units: |
4230 | + for cmd in commands: |
4231 | + output, code = sentry_unit.run(cmd) |
4232 | + if code == 0: |
4233 | + self.log.debug('{} `{}` returned {} ' |
4234 | + '(OK)'.format(sentry_unit.info['unit_name'], |
4235 | + cmd, code)) |
4236 | + else: |
4237 | + return ('{} `{}` returned {} ' |
4238 | + '{}'.format(sentry_unit.info['unit_name'], |
4239 | + cmd, code, output)) |
4240 | + return None |
4241 | + |
4242 | + def get_process_id_list(self, sentry_unit, process_name, |
4243 | + expect_success=True): |
4244 | + """Get a list of process ID(s) from a single sentry juju unit |
4245 | + for a single process name. |
4246 | + |
4247 | + :param sentry_unit: Amulet sentry instance (juju unit) |
4248 | + :param process_name: Process name |
4249 | + :param expect_success: If False, expect the PID to be missing, |
4250 | + raise if it is present. |
4251 | + :returns: List of process IDs |
4252 | + """ |
4253 | + cmd = 'pidof -x {}'.format(process_name) |
4254 | + if not expect_success: |
4255 | + cmd += " || exit 0 && exit 1" |
4256 | + output, code = sentry_unit.run(cmd) |
4257 | + if code != 0: |
4258 | + msg = ('{} `{}` returned {} ' |
4259 | + '{}'.format(sentry_unit.info['unit_name'], |
4260 | + cmd, code, output)) |
4261 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4262 | + return str(output).split() |
4263 | + |
4264 | + def get_unit_process_ids(self, unit_processes, expect_success=True): |
4265 | + """Construct a dict containing unit sentries, process names, and |
4266 | + process IDs. |
4267 | + |
4268 | + :param unit_processes: A dictionary of Amulet sentry instance |
4269 | + to list of process names. |
4270 | + :param expect_success: if False expect the processes to not be |
4271 | + running, raise if they are. |
4272 | + :returns: Dictionary of Amulet sentry instance to dictionary |
4273 | + of process names to PIDs. |
4274 | + """ |
4275 | + pid_dict = {} |
4276 | + for sentry_unit, process_list in six.iteritems(unit_processes): |
4277 | + pid_dict[sentry_unit] = {} |
4278 | + for process in process_list: |
4279 | + pids = self.get_process_id_list( |
4280 | + sentry_unit, process, expect_success=expect_success) |
4281 | + pid_dict[sentry_unit].update({process: pids}) |
4282 | + return pid_dict |
4283 | + |
4284 | + def validate_unit_process_ids(self, expected, actual): |
4285 | + """Validate process id quantities for services on units.""" |
4286 | + self.log.debug('Checking units for running processes...') |
4287 | + self.log.debug('Expected PIDs: {}'.format(expected)) |
4288 | + self.log.debug('Actual PIDs: {}'.format(actual)) |
4289 | + |
4290 | + if len(actual) != len(expected): |
4291 | + return ('Unit count mismatch. expected, actual: {}, ' |
4292 | + '{} '.format(len(expected), len(actual))) |
4293 | + |
4294 | + for (e_sentry, e_proc_names) in six.iteritems(expected): |
4295 | + e_sentry_name = e_sentry.info['unit_name'] |
4296 | + if e_sentry in actual.keys(): |
4297 | + a_proc_names = actual[e_sentry] |
4298 | + else: |
4299 | + return ('Expected sentry ({}) not found in actual dict data.' |
4300 | + '{}'.format(e_sentry_name, e_sentry)) |
4301 | + |
4302 | + if len(e_proc_names.keys()) != len(a_proc_names.keys()): |
4303 | + return ('Process name count mismatch. expected, actual: {}, ' |
4304 | + '{}'.format(len(expected), len(actual))) |
4305 | + |
4306 | + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ |
4307 | + zip(e_proc_names.items(), a_proc_names.items()): |
4308 | + if e_proc_name != a_proc_name: |
4309 | + return ('Process name mismatch. expected, actual: {}, ' |
4310 | + '{}'.format(e_proc_name, a_proc_name)) |
4311 | + |
4312 | + a_pids_length = len(a_pids) |
4313 | + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' |
4314 | + '{}, {} ({})'.format(e_sentry_name, e_proc_name, |
4315 | + e_pids_length, a_pids_length, |
4316 | + a_pids)) |
4317 | + |
4318 | + # If expected is not bool, ensure PID quantities match |
4319 | + if not isinstance(e_pids_length, bool) and \ |
4320 | + a_pids_length != e_pids_length: |
4321 | + return fail_msg |
4322 | + # If expected is bool True, ensure 1 or more PIDs exist |
4323 | + elif isinstance(e_pids_length, bool) and \ |
4324 | + e_pids_length is True and a_pids_length < 1: |
4325 | + return fail_msg |
4326 | + # If expected is bool False, ensure 0 PIDs exist |
4327 | + elif isinstance(e_pids_length, bool) and \ |
4328 | + e_pids_length is False and a_pids_length != 0: |
4329 | + return fail_msg |
4330 | + else: |
4331 | + self.log.debug('PID check OK: {} {} {}: ' |
4332 | + '{}'.format(e_sentry_name, e_proc_name, |
4333 | + e_pids_length, a_pids)) |
4334 | + return None |
4335 | + |
4336 | + def validate_list_of_identical_dicts(self, list_of_dicts): |
4337 | + """Check that all dicts within a list are identical.""" |
4338 | + hashes = [] |
4339 | + for _dict in list_of_dicts: |
4340 | + hashes.append(hash(frozenset(_dict.items()))) |
4341 | + |
4342 | + self.log.debug('Hashes: {}'.format(hashes)) |
4343 | + if len(set(hashes)) == 1: |
4344 | + self.log.debug('Dicts within list are identical') |
4345 | + else: |
4346 | + return 'Dicts within list are not identical' |
4347 | + |
4348 | + return None |
4349 | + |
4350 | + def run_action(self, unit_sentry, action, |
4351 | + _check_output=subprocess.check_output): |
4352 | + """Run the named action on a given unit sentry. |
4353 | + |
4354 | + _check_output parameter is used for dependency injection. |
4355 | + |
4356 | + @return action_id. |
4357 | + """ |
4358 | + unit_id = unit_sentry.info["unit_name"] |
4359 | + command = ["juju", "action", "do", "--format=json", unit_id, action] |
4360 | + self.log.info("Running command: %s\n" % " ".join(command)) |
4361 | + output = _check_output(command, universal_newlines=True) |
4362 | + data = json.loads(output) |
4363 | + action_id = data[u'Action queued with id'] |
4364 | + return action_id |
4365 | + |
4366 | + def wait_on_action(self, action_id, _check_output=subprocess.check_output): |
4367 | + """Wait for a given action, returning if it completed or not. |
4368 | + |
4369 | + _check_output parameter is used for dependency injection. |
4370 | + """ |
4371 | + command = ["juju", "action", "fetch", "--format=json", "--wait=0", |
4372 | + action_id] |
4373 | + output = _check_output(command, universal_newlines=True) |
4374 | + data = json.loads(output) |
4375 | + return data.get(u"status") == "completed" |
4376 | |
4377 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
4378 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:53:21 +0000 |
4379 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-08-27 15:02:41 +0000 |
4380 | @@ -44,7 +44,7 @@ |
4381 | Determine if the local branch being tested is derived from its |
4382 | stable or next (dev) branch, and based on this, use the corresonding |
4383 | stable or next branches for the other_services.""" |
4384 | - base_charms = ['mysql', 'mongodb'] |
4385 | + base_charms = ['mysql', 'mongodb', 'nrpe'] |
4386 | |
4387 | if self.series in ['precise', 'trusty']: |
4388 | base_series = self.series |
4389 | @@ -79,9 +79,9 @@ |
4390 | services.append(this_service) |
4391 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
4392 | 'ceph-osd', 'ceph-radosgw'] |
4393 | - # Openstack subordinate charms do not expose an origin option as that |
4394 | - # is controlled by the principle |
4395 | - ignore = ['neutron-openvswitch'] |
4396 | + # Most OpenStack subordinate charms do not expose an origin option |
4397 | + # as that is controlled by the principle. |
4398 | + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
4399 | |
4400 | if self.openstack: |
4401 | for svc in services: |
4402 | @@ -110,7 +110,8 @@ |
4403 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
4404 | self.precise_havana, self.precise_icehouse, |
4405 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
4406 | - self.trusty_kilo, self.vivid_kilo) = range(10) |
4407 | + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
4408 | + self.wily_liberty) = range(12) |
4409 | |
4410 | releases = { |
4411 | ('precise', None): self.precise_essex, |
4412 | @@ -121,8 +122,10 @@ |
4413 | ('trusty', None): self.trusty_icehouse, |
4414 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
4415 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
4416 | + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
4417 | ('utopic', None): self.utopic_juno, |
4418 | - ('vivid', None): self.vivid_kilo} |
4419 | + ('vivid', None): self.vivid_kilo, |
4420 | + ('wily', None): self.wily_liberty} |
4421 | return releases[(self.series, self.openstack)] |
4422 | |
4423 | def _get_openstack_release_string(self): |
4424 | @@ -138,9 +141,43 @@ |
4425 | ('trusty', 'icehouse'), |
4426 | ('utopic', 'juno'), |
4427 | ('vivid', 'kilo'), |
4428 | + ('wily', 'liberty'), |
4429 | ]) |
4430 | if self.openstack: |
4431 | os_origin = self.openstack.split(':')[1] |
4432 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
4433 | else: |
4434 | return releases[self.series] |
4435 | + |
4436 | + def get_ceph_expected_pools(self, radosgw=False): |
4437 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
4438 | + test scenario, based on OpenStack release and whether ceph radosgw |
4439 | + is flagged as present or not.""" |
4440 | + |
4441 | + if self._get_openstack_release() >= self.trusty_kilo: |
4442 | + # Kilo or later |
4443 | + pools = [ |
4444 | + 'rbd', |
4445 | + 'cinder', |
4446 | + 'glance' |
4447 | + ] |
4448 | + else: |
4449 | + # Juno or earlier |
4450 | + pools = [ |
4451 | + 'data', |
4452 | + 'metadata', |
4453 | + 'rbd', |
4454 | + 'cinder', |
4455 | + 'glance' |
4456 | + ] |
4457 | + |
4458 | + if radosgw: |
4459 | + pools.extend([ |
4460 | + '.rgw.root', |
4461 | + '.rgw.control', |
4462 | + '.rgw', |
4463 | + '.rgw.gc', |
4464 | + '.users.uid' |
4465 | + ]) |
4466 | + |
4467 | + return pools |
4468 | |
4469 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
4470 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-02-10 18:50:39 +0000 |
4471 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-08-27 15:02:41 +0000 |
4472 | @@ -14,16 +14,20 @@ |
4473 | # You should have received a copy of the GNU Lesser General Public License |
4474 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4475 | |
4476 | +import amulet |
4477 | +import json |
4478 | import logging |
4479 | import os |
4480 | +import six |
4481 | import time |
4482 | import urllib |
4483 | |
4484 | +import cinderclient.v1.client as cinder_client |
4485 | import glanceclient.v1.client as glance_client |
4486 | +import heatclient.v1.client as heat_client |
4487 | import keystoneclient.v2_0 as keystone_client |
4488 | import novaclient.v1_1.client as nova_client |
4489 | - |
4490 | -import six |
4491 | +import swiftclient |
4492 | |
4493 | from charmhelpers.contrib.amulet.utils import ( |
4494 | AmuletUtils |
4495 | @@ -37,7 +41,7 @@ |
4496 | """OpenStack amulet utilities. |
4497 | |
4498 | This class inherits from AmuletUtils and has additional support |
4499 | - that is specifically for use by OpenStack charms. |
4500 | + that is specifically for use by OpenStack charm tests. |
4501 | """ |
4502 | |
4503 | def __init__(self, log_level=ERROR): |
4504 | @@ -51,6 +55,8 @@ |
4505 | Validate actual endpoint data vs expected endpoint data. The ports |
4506 | are used to find the matching endpoint. |
4507 | """ |
4508 | + self.log.debug('Validating endpoint data...') |
4509 | + self.log.debug('actual: {}'.format(repr(endpoints))) |
4510 | found = False |
4511 | for ep in endpoints: |
4512 | self.log.debug('endpoint: {}'.format(repr(ep))) |
4513 | @@ -77,6 +83,7 @@ |
4514 | Validate a list of actual service catalog endpoints vs a list of |
4515 | expected service catalog endpoints. |
4516 | """ |
4517 | + self.log.debug('Validating service catalog endpoint data...') |
4518 | self.log.debug('actual: {}'.format(repr(actual))) |
4519 | for k, v in six.iteritems(expected): |
4520 | if k in actual: |
4521 | @@ -93,6 +100,7 @@ |
4522 | Validate a list of actual tenant data vs list of expected tenant |
4523 | data. |
4524 | """ |
4525 | + self.log.debug('Validating tenant data...') |
4526 | self.log.debug('actual: {}'.format(repr(actual))) |
4527 | for e in expected: |
4528 | found = False |
4529 | @@ -114,6 +122,7 @@ |
4530 | Validate a list of actual role data vs a list of expected role |
4531 | data. |
4532 | """ |
4533 | + self.log.debug('Validating role data...') |
4534 | self.log.debug('actual: {}'.format(repr(actual))) |
4535 | for e in expected: |
4536 | found = False |
4537 | @@ -134,6 +143,7 @@ |
4538 | Validate a list of actual user data vs a list of expected user |
4539 | data. |
4540 | """ |
4541 | + self.log.debug('Validating user data...') |
4542 | self.log.debug('actual: {}'.format(repr(actual))) |
4543 | for e in expected: |
4544 | found = False |
4545 | @@ -155,17 +165,30 @@ |
4546 | |
4547 | Validate a list of actual flavors vs a list of expected flavors. |
4548 | """ |
4549 | + self.log.debug('Validating flavor data...') |
4550 | self.log.debug('actual: {}'.format(repr(actual))) |
4551 | act = [a.name for a in actual] |
4552 | return self._validate_list_data(expected, act) |
4553 | |
4554 | def tenant_exists(self, keystone, tenant): |
4555 | """Return True if tenant exists.""" |
4556 | + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
4557 | return tenant in [t.name for t in keystone.tenants.list()] |
4558 | |
4559 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
4560 | + password, tenant): |
4561 | + """Authenticates admin user with cinder.""" |
4562 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
4563 | + service_ip = \ |
4564 | + keystone_sentry.relation('shared-db', |
4565 | + 'mysql:shared-db')['private-address'] |
4566 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
4567 | + return cinder_client.Client(username, password, tenant, ept) |
4568 | + |
4569 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
4570 | tenant): |
4571 | """Authenticates admin user with the keystone admin endpoint.""" |
4572 | + self.log.debug('Authenticating keystone admin...') |
4573 | unit = keystone_sentry |
4574 | service_ip = unit.relation('shared-db', |
4575 | 'mysql:shared-db')['private-address'] |
4576 | @@ -175,6 +198,7 @@ |
4577 | |
4578 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
4579 | """Authenticates a regular user with the keystone public endpoint.""" |
4580 | + self.log.debug('Authenticating keystone user ({})...'.format(user)) |
4581 | ep = keystone.service_catalog.url_for(service_type='identity', |
4582 | endpoint_type='publicURL') |
4583 | return keystone_client.Client(username=user, password=password, |
4584 | @@ -182,19 +206,49 @@ |
4585 | |
4586 | def authenticate_glance_admin(self, keystone): |
4587 | """Authenticates admin user with glance.""" |
4588 | + self.log.debug('Authenticating glance admin...') |
4589 | ep = keystone.service_catalog.url_for(service_type='image', |
4590 | endpoint_type='adminURL') |
4591 | return glance_client.Client(ep, token=keystone.auth_token) |
4592 | |
4593 | + def authenticate_heat_admin(self, keystone): |
4594 | + """Authenticates the admin user with heat.""" |
4595 | + self.log.debug('Authenticating heat admin...') |
4596 | + ep = keystone.service_catalog.url_for(service_type='orchestration', |
4597 | + endpoint_type='publicURL') |
4598 | + return heat_client.Client(endpoint=ep, token=keystone.auth_token) |
4599 | + |
4600 | def authenticate_nova_user(self, keystone, user, password, tenant): |
4601 | """Authenticates a regular user with nova-api.""" |
4602 | + self.log.debug('Authenticating nova user ({})...'.format(user)) |
4603 | ep = keystone.service_catalog.url_for(service_type='identity', |
4604 | endpoint_type='publicURL') |
4605 | return nova_client.Client(username=user, api_key=password, |
4606 | project_id=tenant, auth_url=ep) |
4607 | |
4608 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
4609 | + """Authenticates a regular user with swift api.""" |
4610 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
4611 | + ep = keystone.service_catalog.url_for(service_type='identity', |
4612 | + endpoint_type='publicURL') |
4613 | + return swiftclient.Connection(authurl=ep, |
4614 | + user=user, |
4615 | + key=password, |
4616 | + tenant_name=tenant, |
4617 | + auth_version='2.0') |
4618 | + |
4619 | def create_cirros_image(self, glance, image_name): |
4620 | - """Download the latest cirros image and upload it to glance.""" |
4621 | + """Download the latest cirros image and upload it to glance, |
4622 | + validate and return a resource pointer. |
4623 | + |
4624 | + :param glance: pointer to authenticated glance connection |
4625 | + :param image_name: display name for new image |
4626 | + :returns: glance image pointer |
4627 | + """ |
4628 | + self.log.debug('Creating glance cirros image ' |
4629 | + '({})...'.format(image_name)) |
4630 | + |
4631 | + # Download cirros image |
4632 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
4633 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
4634 | if http_proxy: |
4635 | @@ -203,57 +257,67 @@ |
4636 | else: |
4637 | opener = urllib.FancyURLopener() |
4638 | |
4639 | - f = opener.open("http://download.cirros-cloud.net/version/released") |
4640 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
4641 | version = f.read().strip() |
4642 | - cirros_img = "cirros-{}-x86_64-disk.img".format(version) |
4643 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
4644 | local_path = os.path.join('tests', cirros_img) |
4645 | |
4646 | if not os.path.exists(local_path): |
4647 | - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", |
4648 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
4649 | version, cirros_img) |
4650 | opener.retrieve(cirros_url, local_path) |
4651 | f.close() |
4652 | |
4653 | + # Create glance image |
4654 | with open(local_path) as f: |
4655 | image = glance.images.create(name=image_name, is_public=True, |
4656 | disk_format='qcow2', |
4657 | container_format='bare', data=f) |
4658 | - count = 1 |
4659 | - status = image.status |
4660 | - while status != 'active' and count < 10: |
4661 | - time.sleep(3) |
4662 | - image = glance.images.get(image.id) |
4663 | - status = image.status |
4664 | - self.log.debug('image status: {}'.format(status)) |
4665 | - count += 1 |
4666 | - |
4667 | - if status != 'active': |
4668 | - self.log.error('image creation timed out') |
4669 | - return None |
4670 | + |
4671 | + # Wait for image to reach active status |
4672 | + img_id = image.id |
4673 | + ret = self.resource_reaches_status(glance.images, img_id, |
4674 | + expected_stat='active', |
4675 | + msg='Image status wait') |
4676 | + if not ret: |
4677 | + msg = 'Glance image failed to reach expected state.' |
4678 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4679 | + |
4680 | + # Re-validate new image |
4681 | + self.log.debug('Validating image attributes...') |
4682 | + val_img_name = glance.images.get(img_id).name |
4683 | + val_img_stat = glance.images.get(img_id).status |
4684 | + val_img_pub = glance.images.get(img_id).is_public |
4685 | + val_img_cfmt = glance.images.get(img_id).container_format |
4686 | + val_img_dfmt = glance.images.get(img_id).disk_format |
4687 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
4688 | + 'container fmt:{} disk fmt:{}'.format( |
4689 | + val_img_name, val_img_pub, img_id, |
4690 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
4691 | + |
4692 | + if val_img_name == image_name and val_img_stat == 'active' \ |
4693 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
4694 | + and val_img_dfmt == 'qcow2': |
4695 | + self.log.debug(msg_attr) |
4696 | + else: |
4697 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
4698 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4699 | |
4700 | return image |
4701 | |
4702 | def delete_image(self, glance, image): |
4703 | """Delete the specified image.""" |
4704 | - num_before = len(list(glance.images.list())) |
4705 | - glance.images.delete(image) |
4706 | - |
4707 | - count = 1 |
4708 | - num_after = len(list(glance.images.list())) |
4709 | - while num_after != (num_before - 1) and count < 10: |
4710 | - time.sleep(3) |
4711 | - num_after = len(list(glance.images.list())) |
4712 | - self.log.debug('number of images: {}'.format(num_after)) |
4713 | - count += 1 |
4714 | - |
4715 | - if num_after != (num_before - 1): |
4716 | - self.log.error('image deletion timed out') |
4717 | - return False |
4718 | - |
4719 | - return True |
4720 | + |
4721 | + # /!\ DEPRECATION WARNING |
4722 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
4723 | + 'delete_resource instead of delete_image.') |
4724 | + self.log.debug('Deleting glance image ({})...'.format(image)) |
4725 | + return self.delete_resource(glance.images, image, msg='glance image') |
4726 | |
4727 | def create_instance(self, nova, image_name, instance_name, flavor): |
4728 | """Create the specified instance.""" |
4729 | + self.log.debug('Creating instance ' |
4730 | + '({}|{}|{})'.format(instance_name, image_name, flavor)) |
4731 | image = nova.images.find(name=image_name) |
4732 | flavor = nova.flavors.find(name=flavor) |
4733 | instance = nova.servers.create(name=instance_name, image=image, |
4734 | @@ -276,19 +340,265 @@ |
4735 | |
4736 | def delete_instance(self, nova, instance): |
4737 | """Delete the specified instance.""" |
4738 | - num_before = len(list(nova.servers.list())) |
4739 | - nova.servers.delete(instance) |
4740 | - |
4741 | - count = 1 |
4742 | - num_after = len(list(nova.servers.list())) |
4743 | - while num_after != (num_before - 1) and count < 10: |
4744 | - time.sleep(3) |
4745 | - num_after = len(list(nova.servers.list())) |
4746 | - self.log.debug('number of instances: {}'.format(num_after)) |
4747 | - count += 1 |
4748 | - |
4749 | - if num_after != (num_before - 1): |
4750 | - self.log.error('instance deletion timed out') |
4751 | - return False |
4752 | - |
4753 | - return True |
4754 | + |
4755 | + # /!\ DEPRECATION WARNING |
4756 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
4757 | + 'delete_resource instead of delete_instance.') |
4758 | + self.log.debug('Deleting instance ({})...'.format(instance)) |
4759 | + return self.delete_resource(nova.servers, instance, |
4760 | + msg='nova instance') |
4761 | + |
4762 | + def create_or_get_keypair(self, nova, keypair_name="testkey"): |
4763 | + """Create a new keypair, or return pointer if it already exists.""" |
4764 | + try: |
4765 | + _keypair = nova.keypairs.get(keypair_name) |
4766 | + self.log.debug('Keypair ({}) already exists, ' |
4767 | + 'using it.'.format(keypair_name)) |
4768 | + return _keypair |
4769 | + except: |
4770 | + self.log.debug('Keypair ({}) does not exist, ' |
4771 | + 'creating it.'.format(keypair_name)) |
4772 | + |
4773 | + _keypair = nova.keypairs.create(name=keypair_name) |
4774 | + return _keypair |
4775 | + |
4776 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
4777 | + img_id=None, src_vol_id=None, snap_id=None): |
4778 | + """Create cinder volume, optionally from a glance image, OR |
4779 | + optionally as a clone of an existing volume, OR optionally |
4780 | + from a snapshot. Wait for the new volume status to reach |
4781 | + the expected status, validate and return a resource pointer. |
4782 | + |
4783 | + :param vol_name: cinder volume display name |
4784 | + :param vol_size: size in gigabytes |
4785 | + :param img_id: optional glance image id |
4786 | + :param src_vol_id: optional source volume id to clone |
4787 | + :param snap_id: optional snapshot id to use |
4788 | + :returns: cinder volume pointer |
4789 | + """ |
4790 | + # Handle parameter input and avoid impossible combinations |
4791 | + if img_id and not src_vol_id and not snap_id: |
4792 | + # Create volume from image |
4793 | + self.log.debug('Creating cinder volume from glance image...') |
4794 | + bootable = 'true' |
4795 | + elif src_vol_id and not img_id and not snap_id: |
4796 | + # Clone an existing volume |
4797 | + self.log.debug('Cloning cinder volume...') |
4798 | + bootable = cinder.volumes.get(src_vol_id).bootable |
4799 | + elif snap_id and not src_vol_id and not img_id: |
4800 | + # Create volume from snapshot |
4801 | + self.log.debug('Creating cinder volume from snapshot...') |
4802 | + snap = cinder.volume_snapshots.find(id=snap_id) |
4803 | + vol_size = snap.size |
4804 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
4805 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
4806 | + elif not img_id and not src_vol_id and not snap_id: |
4807 | + # Create volume |
4808 | + self.log.debug('Creating cinder volume...') |
4809 | + bootable = 'false' |
4810 | + else: |
4811 | + # Impossible combination of parameters |
4812 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
4813 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
4814 | + img_id, src_vol_id, |
4815 | + snap_id)) |
4816 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4817 | + |
4818 | + # Create new volume |
4819 | + try: |
4820 | + vol_new = cinder.volumes.create(display_name=vol_name, |
4821 | + imageRef=img_id, |
4822 | + size=vol_size, |
4823 | + source_volid=src_vol_id, |
4824 | + snapshot_id=snap_id) |
4825 | + vol_id = vol_new.id |
4826 | + except Exception as e: |
4827 | + msg = 'Failed to create volume: {}'.format(e) |
4828 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4829 | + |
4830 | + # Wait for volume to reach available status |
4831 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
4832 | + expected_stat="available", |
4833 | + msg="Volume status wait") |
4834 | + if not ret: |
4835 | + msg = 'Cinder volume failed to reach expected state.' |
4836 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4837 | + |
4838 | + # Re-validate new volume |
4839 | + self.log.debug('Validating volume attributes...') |
4840 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
4841 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
4842 | + val_vol_stat = cinder.volumes.get(vol_id).status |
4843 | + val_vol_size = cinder.volumes.get(vol_id).size |
4844 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
4845 | + '{} size:{}'.format(val_vol_name, vol_id, |
4846 | + val_vol_stat, val_vol_boot, |
4847 | + val_vol_size)) |
4848 | + |
4849 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
4850 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
4851 | + self.log.debug(msg_attr) |
4852 | + else: |
4853 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
4854 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4855 | + |
4856 | + return vol_new |
4857 | + |
4858 | + def delete_resource(self, resource, resource_id, |
4859 | + msg="resource", max_wait=120): |
4860 | + """Delete one openstack resource, such as one instance, keypair, |
4861 | + image, volume, stack, etc., and confirm deletion within max wait time. |
4862 | + |
4863 | + :param resource: pointer to os resource type, ex:glance_client.images |
4864 | + :param resource_id: unique name or id for the openstack resource |
4865 | + :param msg: text to identify purpose in logging |
4866 | + :param max_wait: maximum wait time in seconds |
4867 | + :returns: True if successful, otherwise False |
4868 | + """ |
4869 | + self.log.debug('Deleting OpenStack resource ' |
4870 | + '{} ({})'.format(resource_id, msg)) |
4871 | + num_before = len(list(resource.list())) |
4872 | + resource.delete(resource_id) |
4873 | + |
4874 | + tries = 0 |
4875 | + num_after = len(list(resource.list())) |
4876 | + while num_after != (num_before - 1) and tries < (max_wait / 4): |
4877 | + self.log.debug('{} delete check: ' |
4878 | + '{} [{}:{}] {}'.format(msg, tries, |
4879 | + num_before, |
4880 | + num_after, |
4881 | + resource_id)) |
4882 | + time.sleep(4) |
4883 | + num_after = len(list(resource.list())) |
4884 | + tries += 1 |
4885 | + |
4886 | + self.log.debug('{}: expected, actual count = {}, ' |
4887 | + '{}'.format(msg, num_before - 1, num_after)) |
4888 | + |
4889 | + if num_after == (num_before - 1): |
4890 | + return True |
4891 | + else: |
4892 | + self.log.error('{} delete timed out'.format(msg)) |
4893 | + return False |
4894 | + |
4895 | + def resource_reaches_status(self, resource, resource_id, |
4896 | + expected_stat='available', |
4897 | + msg='resource', max_wait=120): |
4898 | + """Wait for an openstack resources status to reach an |
4899 | + expected status within a specified time. Useful to confirm that |
4900 | + nova instances, cinder vols, snapshots, glance images, heat stacks |
4901 | + and other resources eventually reach the expected status. |
4902 | + |
4903 | + :param resource: pointer to os resource type, ex: heat_client.stacks |
4904 | + :param resource_id: unique id for the openstack resource |
4905 | + :param expected_stat: status to expect resource to reach |
4906 | + :param msg: text to identify purpose in logging |
4907 | + :param max_wait: maximum wait time in seconds |
4908 | + :returns: True if successful, False if status is not reached |
4909 | + """ |
4910 | + |
4911 | + tries = 0 |
4912 | + resource_stat = resource.get(resource_id).status |
4913 | + while resource_stat != expected_stat and tries < (max_wait / 4): |
4914 | + self.log.debug('{} status check: ' |
4915 | + '{} [{}:{}] {}'.format(msg, tries, |
4916 | + resource_stat, |
4917 | + expected_stat, |
4918 | + resource_id)) |
4919 | + time.sleep(4) |
4920 | + resource_stat = resource.get(resource_id).status |
4921 | + tries += 1 |
4922 | + |
4923 | + self.log.debug('{}: expected, actual status = {}, ' |
4924 | + '{}'.format(msg, resource_stat, expected_stat)) |
4925 | + |
4926 | + if resource_stat == expected_stat: |
4927 | + return True |
4928 | + else: |
4929 | + self.log.debug('{} never reached expected status: ' |
4930 | + '{}'.format(resource_id, expected_stat)) |
4931 | + return False |
4932 | + |
4933 | + def get_ceph_osd_id_cmd(self, index): |
4934 | + """Produce a shell command that will return a ceph-osd id.""" |
4935 | + return ("`initctl list | grep 'ceph-osd ' | " |
4936 | + "awk 'NR=={} {{ print $2 }}' | " |
4937 | + "grep -o '[0-9]*'`".format(index + 1)) |
4938 | + |
4939 | + def get_ceph_pools(self, sentry_unit): |
4940 | + """Return a dict of ceph pools from a single ceph unit, with |
4941 | + pool name as keys, pool id as vals.""" |
4942 | + pools = {} |
4943 | + cmd = 'sudo ceph osd lspools' |
4944 | + output, code = sentry_unit.run(cmd) |
4945 | + if code != 0: |
4946 | + msg = ('{} `{}` returned {} ' |
4947 | + '{}'.format(sentry_unit.info['unit_name'], |
4948 | + cmd, code, output)) |
4949 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4950 | + |
4951 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
4952 | + for pool in str(output).split(','): |
4953 | + pool_id_name = pool.split(' ') |
4954 | + if len(pool_id_name) == 2: |
4955 | + pool_id = pool_id_name[0] |
4956 | + pool_name = pool_id_name[1] |
4957 | + pools[pool_name] = int(pool_id) |
4958 | + |
4959 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
4960 | + pools)) |
4961 | + return pools |
4962 | + |
4963 | + def get_ceph_df(self, sentry_unit): |
4964 | + """Return dict of ceph df json output, including ceph pool state. |
4965 | + |
4966 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
4967 | + :returns: Dict of ceph df output |
4968 | + """ |
4969 | + cmd = 'sudo ceph df --format=json' |
4970 | + output, code = sentry_unit.run(cmd) |
4971 | + if code != 0: |
4972 | + msg = ('{} `{}` returned {} ' |
4973 | + '{}'.format(sentry_unit.info['unit_name'], |
4974 | + cmd, code, output)) |
4975 | + amulet.raise_status(amulet.FAIL, msg=msg) |
4976 | + return json.loads(output) |
4977 | + |
4978 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
4979 | + """Take a sample of attributes of a ceph pool, returning ceph |
4980 | + pool name, object count and disk space used for the specified |
4981 | + pool ID number. |
4982 | + |
4983 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
4984 | + :param pool_id: Ceph pool ID |
4985 | + :returns: List of pool name, object count, kb disk space used |
4986 | + """ |
4987 | + df = self.get_ceph_df(sentry_unit) |
4988 | + pool_name = df['pools'][pool_id]['name'] |
4989 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
4990 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
4991 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
4992 | + '{} kb used'.format(pool_name, pool_id, |
4993 | + obj_count, kb_used)) |
4994 | + return pool_name, obj_count, kb_used |
4995 | + |
4996 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
4997 | + """Validate ceph pool samples taken over time, such as pool |
4998 | + object counts or pool kb used, before adding, after adding, and |
4999 | + after deleting items which affect those pool attributes. The |
5000 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
The diff has been truncated for viewing.