Merge lp:~james-page/charms/trusty/cinder/lp1521604 into lp:~openstack-charmers-archive/charms/trusty/cinder/trunk
- Trusty Tahr (14.04)
- lp1521604
- Merge into trunk
Status: | Superseded |
---|---|
Proposed branch: | lp:~james-page/charms/trusty/cinder/lp1521604 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/cinder/trunk |
Diff against target: |
6465 lines (+4706/-553) (has conflicts) 50 files modified
.bzrignore (+2/-0) .testr.conf (+8/-0) actions/openstack_upgrade.py (+44/-0) config.yaml (+47/-10) hooks/charmhelpers/cli/__init__.py (+191/-0) hooks/charmhelpers/cli/benchmark.py (+36/-0) hooks/charmhelpers/cli/commands.py (+32/-0) hooks/charmhelpers/cli/hookenv.py (+23/-0) hooks/charmhelpers/cli/host.py (+31/-0) hooks/charmhelpers/cli/unitdata.py (+39/-0) hooks/charmhelpers/contrib/charmsupport/nrpe.py (+52/-14) hooks/charmhelpers/contrib/network/ip.py (+21/-19) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+150/-11) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+650/-1) hooks/charmhelpers/contrib/openstack/context.py (+122/-18) hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh (+7/-5) hooks/charmhelpers/contrib/openstack/neutron.py (+40/-0) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+6/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+16/-9) hooks/charmhelpers/contrib/openstack/utils.py (+359/-8) hooks/charmhelpers/contrib/python/packages.py (+13/-4) hooks/charmhelpers/contrib/storage/linux/ceph.py (+652/-49) hooks/charmhelpers/contrib/storage/linux/loopback.py (+10/-0) hooks/charmhelpers/core/files.py (+45/-0) hooks/charmhelpers/core/hookenv.py (+496/-175) hooks/charmhelpers/core/host.py (+107/-3) hooks/charmhelpers/core/hugepage.py (+71/-0) hooks/charmhelpers/core/kernel.py (+68/-0) hooks/charmhelpers/core/services/helpers.py (+40/-5) hooks/charmhelpers/core/templating.py (+21/-8) hooks/charmhelpers/fetch/__init__.py (+46/-9) hooks/charmhelpers/fetch/archiveurl.py (+1/-1) hooks/charmhelpers/fetch/bzrurl.py (+22/-32) hooks/charmhelpers/fetch/giturl.py (+29/-14) hooks/cinder_hooks.py (+13/-0) hooks/cinder_utils.py (+30/-9) metadata.yaml (+10/-2) requirements.txt (+11/-0) test-requirements.txt (+8/-0) tests/052-basic-trusty-kilo-git (+12/-0) tests/basic_deployment.py (+5/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+150/-11) tests/charmhelpers/contrib/openstack/amulet/utils.py (+650/-1) tests/tests.yaml (+20/-0) tox.ini (+29/-0) unit_tests/test_actions_git_reinstall.py (+6/-17) unit_tests/test_actions_openstack_upgrade.py (+68/-0) unit_tests/test_cinder_hooks.py (+17/-25) unit_tests/test_cinder_utils.py (+170/-75) unit_tests/test_cluster_hooks.py (+10/-18) Conflict adding file actions/openstack-upgrade. Moved existing file to actions/openstack-upgrade.moved. Conflict adding file actions/openstack_upgrade.py. Moved existing file to actions/openstack_upgrade.py.moved. Text conflict in config.yaml Conflict adding file hooks/backup-backend-relation-broken. Moved existing file to hooks/backup-backend-relation-broken.moved. Conflict adding file hooks/backup-backend-relation-changed. Moved existing file to hooks/backup-backend-relation-changed.moved. Conflict adding file hooks/backup-backend-relation-departed. Moved existing file to hooks/backup-backend-relation-departed.moved. Conflict adding file hooks/backup-backend-relation-joined. Moved existing file to hooks/backup-backend-relation-joined.moved. Conflict adding file hooks/charmhelpers/cli. Moved existing file to hooks/charmhelpers/cli.moved. Text conflict in hooks/charmhelpers/contrib/openstack/amulet/deployment.py Text conflict in hooks/charmhelpers/contrib/openstack/amulet/utils.py Text conflict in hooks/charmhelpers/contrib/openstack/context.py Text conflict in hooks/charmhelpers/contrib/openstack/neutron.py Text conflict in hooks/charmhelpers/contrib/openstack/utils.py Text conflict in hooks/charmhelpers/contrib/storage/linux/ceph.py Conflict adding file hooks/charmhelpers/core/files.py. Moved existing file to hooks/charmhelpers/core/files.py.moved. Text conflict in hooks/charmhelpers/core/hookenv.py Text conflict in hooks/charmhelpers/core/host.py Conflict adding file hooks/charmhelpers/core/hugepage.py. Moved existing file to hooks/charmhelpers/core/hugepage.py.moved. Conflict adding file hooks/charmhelpers/core/kernel.py. Moved existing file to hooks/charmhelpers/core/kernel.py.moved. Text conflict in hooks/charmhelpers/core/services/helpers.py Text conflict in hooks/charmhelpers/fetch/__init__.py Text conflict in hooks/charmhelpers/fetch/giturl.py Text conflict in hooks/cinder_hooks.py Text conflict in hooks/cinder_utils.py Conflict adding file hooks/install.real. Moved existing file to hooks/install.real.moved. Conflict adding file hooks/update-status. Moved existing file to hooks/update-status.moved. Text conflict in metadata.yaml Conflict adding file tests/052-basic-trusty-kilo-git. Moved existing file to tests/052-basic-trusty-kilo-git.moved. Text conflict in tests/basic_deployment.py Text conflict in tests/charmhelpers/contrib/openstack/amulet/deployment.py Text conflict in tests/charmhelpers/contrib/openstack/amulet/utils.py Conflict adding file tests/tests.yaml. Moved existing file to tests/tests.yaml.moved. Conflict adding file unit_tests/test_actions_openstack_upgrade.py. Moved existing file to unit_tests/test_actions_openstack_upgrade.py.moved. Text conflict in unit_tests/test_cinder_utils.py |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/cinder/lp1521604 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+281798@code.launchpad.net |
This proposal has been superseded by a proposal from 2016-01-06.
Commit message
Description of the change
Drop requirement for identity service unless api service is enabled.
- 141. By James Page
-
Also avoid overwrite of actual endpoint information for service instances where api service is not enabled
- 142. By James Page
-
Tidy lint
Unmerged revisions
- 142. By James Page
-
Tidy lint
- 141. By James Page
-
Also avoid overwrite of actual endpoint information for service instances where api service is not enabled
- 140. By James Page
-
Ensure that identity-service interface is only required when the api service is enabled.
- 139. By Liam Young
-
[james-page, r=gnuoy] Charmhelper sync
- 138. By Corey Bryant
-
[corey.
bryant, r=trivial] Sync charm-helpers. - 137. By James Page
-
Workaround upstream bug in quota authentication
- 136. By James Page
-
Add sane haproxy timeout defaults and make them configurable.
- 135. By James Page
-
Update maintainer
- 134. By Corey Bryant
-
[james-
pages,r= corey.bryant] Add tox support for lint and unit tests. - 133. By Liam Young
-
[hopem, r=gnuoy]
Add support for cinder-backup subordinate
Preview Diff
1 | === modified file '.bzrignore' | |||
2 | --- .bzrignore 2014-07-02 08:13:36 +0000 | |||
3 | +++ .bzrignore 2016-01-06 21:19:13 +0000 | |||
4 | @@ -1,2 +1,4 @@ | |||
5 | 1 | bin | 1 | bin |
6 | 2 | .coverage | 2 | .coverage |
7 | 3 | .testrepository | ||
8 | 4 | .tox | ||
9 | 3 | 5 | ||
10 | === added file '.testr.conf' | |||
11 | --- .testr.conf 1970-01-01 00:00:00 +0000 | |||
12 | +++ .testr.conf 2016-01-06 21:19:13 +0000 | |||
13 | @@ -0,0 +1,8 @@ | |||
14 | 1 | [DEFAULT] | ||
15 | 2 | test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ | ||
16 | 3 | OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ | ||
17 | 4 | OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ | ||
18 | 5 | ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION | ||
19 | 6 | |||
20 | 7 | test_id_option=--load-list $IDFILE | ||
21 | 8 | test_list_option=--list | ||
22 | 0 | 9 | ||
23 | === added symlink 'actions/openstack-upgrade' | |||
24 | === target is u'openstack_upgrade.py' | |||
25 | === renamed symlink 'actions/openstack-upgrade' => 'actions/openstack-upgrade.moved' | |||
26 | === added file 'actions/openstack_upgrade.py' | |||
27 | --- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000 | |||
28 | +++ actions/openstack_upgrade.py 2016-01-06 21:19:13 +0000 | |||
29 | @@ -0,0 +1,44 @@ | |||
30 | 1 | #!/usr/bin/python | ||
31 | 2 | import sys | ||
32 | 3 | import uuid | ||
33 | 4 | |||
34 | 5 | sys.path.append('hooks/') | ||
35 | 6 | |||
36 | 7 | from charmhelpers.contrib.openstack.utils import ( | ||
37 | 8 | do_action_openstack_upgrade, | ||
38 | 9 | ) | ||
39 | 10 | |||
40 | 11 | from charmhelpers.core.hookenv import ( | ||
41 | 12 | relation_ids, | ||
42 | 13 | relation_set, | ||
43 | 14 | ) | ||
44 | 15 | |||
45 | 16 | from cinder_hooks import ( | ||
46 | 17 | config_changed, | ||
47 | 18 | CONFIGS, | ||
48 | 19 | ) | ||
49 | 20 | |||
50 | 21 | from cinder_utils import ( | ||
51 | 22 | do_openstack_upgrade, | ||
52 | 23 | ) | ||
53 | 24 | |||
54 | 25 | |||
55 | 26 | def openstack_upgrade(): | ||
56 | 27 | """Upgrade packages to config-set Openstack version. | ||
57 | 28 | |||
58 | 29 | If the charm was installed from source we cannot upgrade it. | ||
59 | 30 | For backwards compatibility a config flag must be set for this | ||
60 | 31 | code to run, otherwise a full service level upgrade will fire | ||
61 | 32 | on config-changed.""" | ||
62 | 33 | |||
63 | 34 | if (do_action_openstack_upgrade('cinder-common', | ||
64 | 35 | do_openstack_upgrade, | ||
65 | 36 | CONFIGS)): | ||
66 | 37 | # tell any storage-backends we just upgraded | ||
67 | 38 | for rid in relation_ids('storage-backend'): | ||
68 | 39 | relation_set(relation_id=rid, | ||
69 | 40 | upgrade_nonce=uuid.uuid4()) | ||
70 | 41 | config_changed() | ||
71 | 42 | |||
72 | 43 | if __name__ == '__main__': | ||
73 | 44 | openstack_upgrade() | ||
74 | 0 | 45 | ||
75 | === renamed file 'actions/openstack_upgrade.py' => 'actions/openstack_upgrade.py.moved' | |||
76 | === modified file 'charm-helpers-hooks.yaml' | |||
77 | === modified file 'config.yaml' | |||
78 | --- config.yaml 2015-10-22 13:19:13 +0000 | |||
79 | +++ config.yaml 2016-01-06 21:19:13 +0000 | |||
80 | @@ -282,13 +282,50 @@ | |||
81 | 282 | description: | | 282 | description: | |
82 | 283 | A comma-separated list of nagios servicegroups. | 283 | A comma-separated list of nagios servicegroups. |
83 | 284 | If left empty, the nagios_context will be used as the servicegroup | 284 | If left empty, the nagios_context will be used as the servicegroup |
94 | 285 | action-managed-upgrade: | 285 | <<<<<<< TREE |
95 | 286 | type: boolean | 286 | action-managed-upgrade: |
96 | 287 | default: False | 287 | type: boolean |
97 | 288 | description: | | 288 | default: False |
98 | 289 | If True enables openstack upgrades for this charm via juju actions. | 289 | description: | |
99 | 290 | You will still need to set openstack-origin to the new repository but | 290 | If True enables openstack upgrades for this charm via juju actions. |
100 | 291 | instead of an upgrade running automatically across all units, it will | 291 | You will still need to set openstack-origin to the new repository but |
101 | 292 | wait for you to execute the openstack-upgrade action for this charm on | 292 | instead of an upgrade running automatically across all units, it will |
102 | 293 | each unit. If False it will revert to existing behavior of upgrading | 293 | wait for you to execute the openstack-upgrade action for this charm on |
103 | 294 | all units on config change. | 294 | each unit. If False it will revert to existing behavior of upgrading |
104 | 295 | all units on config change. | ||
105 | 296 | ======= | ||
106 | 297 | action-managed-upgrade: | ||
107 | 298 | type: boolean | ||
108 | 299 | default: False | ||
109 | 300 | description: | | ||
110 | 301 | If True enables openstack upgrades for this charm via juju actions. | ||
111 | 302 | You will still need to set openstack-origin to the new repository but | ||
112 | 303 | instead of an upgrade running automatically across all units, it will | ||
113 | 304 | wait for you to execute the openstack-upgrade action for this charm on | ||
114 | 305 | each unit. If False it will revert to existing behavior of upgrading | ||
115 | 306 | all units on config change. | ||
116 | 307 | haproxy-server-timeout: | ||
117 | 308 | type: int | ||
118 | 309 | default: | ||
119 | 310 | description: | | ||
120 | 311 | Server timeout configuration in ms for haproxy, used in HA | ||
121 | 312 | configurations. If not provided, default value of 30000ms is used. | ||
122 | 313 | haproxy-client-timeout: | ||
123 | 314 | type: int | ||
124 | 315 | default: | ||
125 | 316 | description: | | ||
126 | 317 | Client timeout configuration in ms for haproxy, used in HA | ||
127 | 318 | configurations. If not provided, default value of 30000ms is used. | ||
128 | 319 | haproxy-queue-timeout: | ||
129 | 320 | type: int | ||
130 | 321 | default: | ||
131 | 322 | description: | | ||
132 | 323 | Queue timeout configuration in ms for haproxy, used in HA | ||
133 | 324 | configurations. If not provided, default value of 5000ms is used. | ||
134 | 325 | haproxy-connect-timeout: | ||
135 | 326 | type: int | ||
136 | 327 | default: | ||
137 | 328 | description: | | ||
138 | 329 | Connect timeout configuration in ms for haproxy, used in HA | ||
139 | 330 | configurations. If not provided, default value of 5000ms is used. | ||
140 | 331 | >>>>>>> MERGE-SOURCE | ||
141 | 295 | 332 | ||
142 | === added symlink 'hooks/backup-backend-relation-broken' | |||
143 | === target is u'cinder_hooks.py' | |||
144 | === renamed symlink 'hooks/backup-backend-relation-broken' => 'hooks/backup-backend-relation-broken.moved' | |||
145 | === added symlink 'hooks/backup-backend-relation-changed' | |||
146 | === target is u'cinder_hooks.py' | |||
147 | === renamed symlink 'hooks/backup-backend-relation-changed' => 'hooks/backup-backend-relation-changed.moved' | |||
148 | === added symlink 'hooks/backup-backend-relation-departed' | |||
149 | === target is u'cinder_hooks.py' | |||
150 | === renamed symlink 'hooks/backup-backend-relation-departed' => 'hooks/backup-backend-relation-departed.moved' | |||
151 | === added symlink 'hooks/backup-backend-relation-joined' | |||
152 | === target is u'cinder_hooks.py' | |||
153 | === renamed symlink 'hooks/backup-backend-relation-joined' => 'hooks/backup-backend-relation-joined.moved' | |||
154 | === added directory 'hooks/charmhelpers/cli' | |||
155 | === renamed directory 'hooks/charmhelpers/cli' => 'hooks/charmhelpers/cli.moved' | |||
156 | === added file 'hooks/charmhelpers/cli/__init__.py' | |||
157 | --- hooks/charmhelpers/cli/__init__.py 1970-01-01 00:00:00 +0000 | |||
158 | +++ hooks/charmhelpers/cli/__init__.py 2016-01-06 21:19:13 +0000 | |||
159 | @@ -0,0 +1,191 @@ | |||
160 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
161 | 2 | # | ||
162 | 3 | # This file is part of charm-helpers. | ||
163 | 4 | # | ||
164 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
165 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
166 | 7 | # published by the Free Software Foundation. | ||
167 | 8 | # | ||
168 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
169 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
170 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
171 | 12 | # GNU Lesser General Public License for more details. | ||
172 | 13 | # | ||
173 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
174 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
175 | 16 | |||
176 | 17 | import inspect | ||
177 | 18 | import argparse | ||
178 | 19 | import sys | ||
179 | 20 | |||
180 | 21 | from six.moves import zip | ||
181 | 22 | |||
182 | 23 | import charmhelpers.core.unitdata | ||
183 | 24 | |||
184 | 25 | |||
185 | 26 | class OutputFormatter(object): | ||
186 | 27 | def __init__(self, outfile=sys.stdout): | ||
187 | 28 | self.formats = ( | ||
188 | 29 | "raw", | ||
189 | 30 | "json", | ||
190 | 31 | "py", | ||
191 | 32 | "yaml", | ||
192 | 33 | "csv", | ||
193 | 34 | "tab", | ||
194 | 35 | ) | ||
195 | 36 | self.outfile = outfile | ||
196 | 37 | |||
197 | 38 | def add_arguments(self, argument_parser): | ||
198 | 39 | formatgroup = argument_parser.add_mutually_exclusive_group() | ||
199 | 40 | choices = self.supported_formats | ||
200 | 41 | formatgroup.add_argument("--format", metavar='FMT', | ||
201 | 42 | help="Select output format for returned data, " | ||
202 | 43 | "where FMT is one of: {}".format(choices), | ||
203 | 44 | choices=choices, default='raw') | ||
204 | 45 | for fmt in self.formats: | ||
205 | 46 | fmtfunc = getattr(self, fmt) | ||
206 | 47 | formatgroup.add_argument("-{}".format(fmt[0]), | ||
207 | 48 | "--{}".format(fmt), action='store_const', | ||
208 | 49 | const=fmt, dest='format', | ||
209 | 50 | help=fmtfunc.__doc__) | ||
210 | 51 | |||
211 | 52 | @property | ||
212 | 53 | def supported_formats(self): | ||
213 | 54 | return self.formats | ||
214 | 55 | |||
215 | 56 | def raw(self, output): | ||
216 | 57 | """Output data as raw string (default)""" | ||
217 | 58 | if isinstance(output, (list, tuple)): | ||
218 | 59 | output = '\n'.join(map(str, output)) | ||
219 | 60 | self.outfile.write(str(output)) | ||
220 | 61 | |||
221 | 62 | def py(self, output): | ||
222 | 63 | """Output data as a nicely-formatted python data structure""" | ||
223 | 64 | import pprint | ||
224 | 65 | pprint.pprint(output, stream=self.outfile) | ||
225 | 66 | |||
226 | 67 | def json(self, output): | ||
227 | 68 | """Output data in JSON format""" | ||
228 | 69 | import json | ||
229 | 70 | json.dump(output, self.outfile) | ||
230 | 71 | |||
231 | 72 | def yaml(self, output): | ||
232 | 73 | """Output data in YAML format""" | ||
233 | 74 | import yaml | ||
234 | 75 | yaml.safe_dump(output, self.outfile) | ||
235 | 76 | |||
236 | 77 | def csv(self, output): | ||
237 | 78 | """Output data as excel-compatible CSV""" | ||
238 | 79 | import csv | ||
239 | 80 | csvwriter = csv.writer(self.outfile) | ||
240 | 81 | csvwriter.writerows(output) | ||
241 | 82 | |||
242 | 83 | def tab(self, output): | ||
243 | 84 | """Output data in excel-compatible tab-delimited format""" | ||
244 | 85 | import csv | ||
245 | 86 | csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) | ||
246 | 87 | csvwriter.writerows(output) | ||
247 | 88 | |||
248 | 89 | def format_output(self, output, fmt='raw'): | ||
249 | 90 | fmtfunc = getattr(self, fmt) | ||
250 | 91 | fmtfunc(output) | ||
251 | 92 | |||
252 | 93 | |||
253 | 94 | class CommandLine(object): | ||
254 | 95 | argument_parser = None | ||
255 | 96 | subparsers = None | ||
256 | 97 | formatter = None | ||
257 | 98 | exit_code = 0 | ||
258 | 99 | |||
259 | 100 | def __init__(self): | ||
260 | 101 | if not self.argument_parser: | ||
261 | 102 | self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') | ||
262 | 103 | if not self.formatter: | ||
263 | 104 | self.formatter = OutputFormatter() | ||
264 | 105 | self.formatter.add_arguments(self.argument_parser) | ||
265 | 106 | if not self.subparsers: | ||
266 | 107 | self.subparsers = self.argument_parser.add_subparsers(help='Commands') | ||
267 | 108 | |||
268 | 109 | def subcommand(self, command_name=None): | ||
269 | 110 | """ | ||
270 | 111 | Decorate a function as a subcommand. Use its arguments as the | ||
271 | 112 | command-line arguments""" | ||
272 | 113 | def wrapper(decorated): | ||
273 | 114 | cmd_name = command_name or decorated.__name__ | ||
274 | 115 | subparser = self.subparsers.add_parser(cmd_name, | ||
275 | 116 | description=decorated.__doc__) | ||
276 | 117 | for args, kwargs in describe_arguments(decorated): | ||
277 | 118 | subparser.add_argument(*args, **kwargs) | ||
278 | 119 | subparser.set_defaults(func=decorated) | ||
279 | 120 | return decorated | ||
280 | 121 | return wrapper | ||
281 | 122 | |||
282 | 123 | def test_command(self, decorated): | ||
283 | 124 | """ | ||
284 | 125 | Subcommand is a boolean test function, so bool return values should be | ||
285 | 126 | converted to a 0/1 exit code. | ||
286 | 127 | """ | ||
287 | 128 | decorated._cli_test_command = True | ||
288 | 129 | return decorated | ||
289 | 130 | |||
290 | 131 | def no_output(self, decorated): | ||
291 | 132 | """ | ||
292 | 133 | Subcommand is not expected to return a value, so don't print a spurious None. | ||
293 | 134 | """ | ||
294 | 135 | decorated._cli_no_output = True | ||
295 | 136 | return decorated | ||
296 | 137 | |||
297 | 138 | def subcommand_builder(self, command_name, description=None): | ||
298 | 139 | """ | ||
299 | 140 | Decorate a function that builds a subcommand. Builders should accept a | ||
300 | 141 | single argument (the subparser instance) and return the function to be | ||
301 | 142 | run as the command.""" | ||
302 | 143 | def wrapper(decorated): | ||
303 | 144 | subparser = self.subparsers.add_parser(command_name) | ||
304 | 145 | func = decorated(subparser) | ||
305 | 146 | subparser.set_defaults(func=func) | ||
306 | 147 | subparser.description = description or func.__doc__ | ||
307 | 148 | return wrapper | ||
308 | 149 | |||
309 | 150 | def run(self): | ||
310 | 151 | "Run cli, processing arguments and executing subcommands." | ||
311 | 152 | arguments = self.argument_parser.parse_args() | ||
312 | 153 | argspec = inspect.getargspec(arguments.func) | ||
313 | 154 | vargs = [] | ||
314 | 155 | for arg in argspec.args: | ||
315 | 156 | vargs.append(getattr(arguments, arg)) | ||
316 | 157 | if argspec.varargs: | ||
317 | 158 | vargs.extend(getattr(arguments, argspec.varargs)) | ||
318 | 159 | output = arguments.func(*vargs) | ||
319 | 160 | if getattr(arguments.func, '_cli_test_command', False): | ||
320 | 161 | self.exit_code = 0 if output else 1 | ||
321 | 162 | output = '' | ||
322 | 163 | if getattr(arguments.func, '_cli_no_output', False): | ||
323 | 164 | output = '' | ||
324 | 165 | self.formatter.format_output(output, arguments.format) | ||
325 | 166 | if charmhelpers.core.unitdata._KV: | ||
326 | 167 | charmhelpers.core.unitdata._KV.flush() | ||
327 | 168 | |||
328 | 169 | |||
329 | 170 | cmdline = CommandLine() | ||
330 | 171 | |||
331 | 172 | |||
332 | 173 | def describe_arguments(func): | ||
333 | 174 | """ | ||
334 | 175 | Analyze a function's signature and return a data structure suitable for | ||
335 | 176 | passing in as arguments to an argparse parser's add_argument() method.""" | ||
336 | 177 | |||
337 | 178 | argspec = inspect.getargspec(func) | ||
338 | 179 | # we should probably raise an exception somewhere if func includes **kwargs | ||
339 | 180 | if argspec.defaults: | ||
340 | 181 | positional_args = argspec.args[:-len(argspec.defaults)] | ||
341 | 182 | keyword_names = argspec.args[-len(argspec.defaults):] | ||
342 | 183 | for arg, default in zip(keyword_names, argspec.defaults): | ||
343 | 184 | yield ('--{}'.format(arg),), {'default': default} | ||
344 | 185 | else: | ||
345 | 186 | positional_args = argspec.args | ||
346 | 187 | |||
347 | 188 | for arg in positional_args: | ||
348 | 189 | yield (arg,), {} | ||
349 | 190 | if argspec.varargs: | ||
350 | 191 | yield (argspec.varargs,), {'nargs': '*'} | ||
351 | 0 | 192 | ||
352 | === added file 'hooks/charmhelpers/cli/benchmark.py' | |||
353 | --- hooks/charmhelpers/cli/benchmark.py 1970-01-01 00:00:00 +0000 | |||
354 | +++ hooks/charmhelpers/cli/benchmark.py 2016-01-06 21:19:13 +0000 | |||
355 | @@ -0,0 +1,36 @@ | |||
356 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
357 | 2 | # | ||
358 | 3 | # This file is part of charm-helpers. | ||
359 | 4 | # | ||
360 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
361 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
362 | 7 | # published by the Free Software Foundation. | ||
363 | 8 | # | ||
364 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
365 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
366 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
367 | 12 | # GNU Lesser General Public License for more details. | ||
368 | 13 | # | ||
369 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
370 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
371 | 16 | |||
372 | 17 | from . import cmdline | ||
373 | 18 | from charmhelpers.contrib.benchmark import Benchmark | ||
374 | 19 | |||
375 | 20 | |||
376 | 21 | @cmdline.subcommand(command_name='benchmark-start') | ||
377 | 22 | def start(): | ||
378 | 23 | Benchmark.start() | ||
379 | 24 | |||
380 | 25 | |||
381 | 26 | @cmdline.subcommand(command_name='benchmark-finish') | ||
382 | 27 | def finish(): | ||
383 | 28 | Benchmark.finish() | ||
384 | 29 | |||
385 | 30 | |||
386 | 31 | @cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") | ||
387 | 32 | def service(subparser): | ||
388 | 33 | subparser.add_argument("value", help="The composite score.") | ||
389 | 34 | subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") | ||
390 | 35 | subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") | ||
391 | 36 | return Benchmark.set_composite_score | ||
392 | 0 | 37 | ||
393 | === added file 'hooks/charmhelpers/cli/commands.py' | |||
394 | --- hooks/charmhelpers/cli/commands.py 1970-01-01 00:00:00 +0000 | |||
395 | +++ hooks/charmhelpers/cli/commands.py 2016-01-06 21:19:13 +0000 | |||
396 | @@ -0,0 +1,32 @@ | |||
397 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
398 | 2 | # | ||
399 | 3 | # This file is part of charm-helpers. | ||
400 | 4 | # | ||
401 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
402 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
403 | 7 | # published by the Free Software Foundation. | ||
404 | 8 | # | ||
405 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
406 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
407 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
408 | 12 | # GNU Lesser General Public License for more details. | ||
409 | 13 | # | ||
410 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
411 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
412 | 16 | |||
413 | 17 | """ | ||
414 | 18 | This module loads sub-modules into the python runtime so they can be | ||
415 | 19 | discovered via the inspect module. In order to prevent flake8 from (rightfully) | ||
416 | 20 | telling us these are unused modules, throw a ' # noqa' at the end of each import | ||
417 | 21 | so that the warning is suppressed. | ||
418 | 22 | """ | ||
419 | 23 | |||
420 | 24 | from . import CommandLine # noqa | ||
421 | 25 | |||
422 | 26 | """ | ||
423 | 27 | Import the sub-modules which have decorated subcommands to register with chlp. | ||
424 | 28 | """ | ||
425 | 29 | from . import host # noqa | ||
426 | 30 | from . import benchmark # noqa | ||
427 | 31 | from . import unitdata # noqa | ||
428 | 32 | from . import hookenv # noqa | ||
429 | 0 | 33 | ||
430 | === added file 'hooks/charmhelpers/cli/hookenv.py' | |||
431 | --- hooks/charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000 | |||
432 | +++ hooks/charmhelpers/cli/hookenv.py 2016-01-06 21:19:13 +0000 | |||
433 | @@ -0,0 +1,23 @@ | |||
434 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
435 | 2 | # | ||
436 | 3 | # This file is part of charm-helpers. | ||
437 | 4 | # | ||
438 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
439 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
440 | 7 | # published by the Free Software Foundation. | ||
441 | 8 | # | ||
442 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
443 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
444 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
445 | 12 | # GNU Lesser General Public License for more details. | ||
446 | 13 | # | ||
447 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
448 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
449 | 16 | |||
450 | 17 | from . import cmdline | ||
451 | 18 | from charmhelpers.core import hookenv | ||
452 | 19 | |||
453 | 20 | |||
454 | 21 | cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) | ||
455 | 22 | cmdline.subcommand('service-name')(hookenv.service_name) | ||
456 | 23 | cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) | ||
457 | 0 | 24 | ||
458 | === added file 'hooks/charmhelpers/cli/host.py' | |||
459 | --- hooks/charmhelpers/cli/host.py 1970-01-01 00:00:00 +0000 | |||
460 | +++ hooks/charmhelpers/cli/host.py 2016-01-06 21:19:13 +0000 | |||
461 | @@ -0,0 +1,31 @@ | |||
462 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
463 | 2 | # | ||
464 | 3 | # This file is part of charm-helpers. | ||
465 | 4 | # | ||
466 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
467 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
468 | 7 | # published by the Free Software Foundation. | ||
469 | 8 | # | ||
470 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
471 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
472 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
473 | 12 | # GNU Lesser General Public License for more details. | ||
474 | 13 | # | ||
475 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
476 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
477 | 16 | |||
478 | 17 | from . import cmdline | ||
479 | 18 | from charmhelpers.core import host | ||
480 | 19 | |||
481 | 20 | |||
482 | 21 | @cmdline.subcommand() | ||
483 | 22 | def mounts(): | ||
484 | 23 | "List mounts" | ||
485 | 24 | return host.mounts() | ||
486 | 25 | |||
487 | 26 | |||
488 | 27 | @cmdline.subcommand_builder('service', description="Control system services") | ||
489 | 28 | def service(subparser): | ||
490 | 29 | subparser.add_argument("action", help="The action to perform (start, stop, etc...)") | ||
491 | 30 | subparser.add_argument("service_name", help="Name of the service to control") | ||
492 | 31 | return host.service | ||
493 | 0 | 32 | ||
494 | === added file 'hooks/charmhelpers/cli/unitdata.py' | |||
495 | --- hooks/charmhelpers/cli/unitdata.py 1970-01-01 00:00:00 +0000 | |||
496 | +++ hooks/charmhelpers/cli/unitdata.py 2016-01-06 21:19:13 +0000 | |||
497 | @@ -0,0 +1,39 @@ | |||
498 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
499 | 2 | # | ||
500 | 3 | # This file is part of charm-helpers. | ||
501 | 4 | # | ||
502 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
503 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
504 | 7 | # published by the Free Software Foundation. | ||
505 | 8 | # | ||
506 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
507 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
508 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
509 | 12 | # GNU Lesser General Public License for more details. | ||
510 | 13 | # | ||
511 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
512 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
513 | 16 | |||
514 | 17 | from . import cmdline | ||
515 | 18 | from charmhelpers.core import unitdata | ||
516 | 19 | |||
517 | 20 | |||
518 | 21 | @cmdline.subcommand_builder('unitdata', description="Store and retrieve data") | ||
519 | 22 | def unitdata_cmd(subparser): | ||
520 | 23 | nested = subparser.add_subparsers() | ||
521 | 24 | get_cmd = nested.add_parser('get', help='Retrieve data') | ||
522 | 25 | get_cmd.add_argument('key', help='Key to retrieve the value of') | ||
523 | 26 | get_cmd.set_defaults(action='get', value=None) | ||
524 | 27 | set_cmd = nested.add_parser('set', help='Store data') | ||
525 | 28 | set_cmd.add_argument('key', help='Key to set') | ||
526 | 29 | set_cmd.add_argument('value', help='Value to store') | ||
527 | 30 | set_cmd.set_defaults(action='set') | ||
528 | 31 | |||
529 | 32 | def _unitdata_cmd(action, key, value): | ||
530 | 33 | if action == 'get': | ||
531 | 34 | return unitdata.kv().get(key) | ||
532 | 35 | elif action == 'set': | ||
533 | 36 | unitdata.kv().set(key, value) | ||
534 | 37 | unitdata.kv().flush() | ||
535 | 38 | return '' | ||
536 | 39 | return _unitdata_cmd | ||
537 | 0 | 40 | ||
538 | === modified file 'hooks/charmhelpers/contrib/charmsupport/nrpe.py' | |||
539 | --- hooks/charmhelpers/contrib/charmsupport/nrpe.py 2015-04-19 09:03:07 +0000 | |||
540 | +++ hooks/charmhelpers/contrib/charmsupport/nrpe.py 2016-01-06 21:19:13 +0000 | |||
541 | @@ -148,6 +148,13 @@ | |||
542 | 148 | self.description = description | 148 | self.description = description |
543 | 149 | self.check_cmd = self._locate_cmd(check_cmd) | 149 | self.check_cmd = self._locate_cmd(check_cmd) |
544 | 150 | 150 | ||
545 | 151 | def _get_check_filename(self): | ||
546 | 152 | return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) | ||
547 | 153 | |||
548 | 154 | def _get_service_filename(self, hostname): | ||
549 | 155 | return os.path.join(NRPE.nagios_exportdir, | ||
550 | 156 | 'service__{}_{}.cfg'.format(hostname, self.command)) | ||
551 | 157 | |||
552 | 151 | def _locate_cmd(self, check_cmd): | 158 | def _locate_cmd(self, check_cmd): |
553 | 152 | search_path = ( | 159 | search_path = ( |
554 | 153 | '/usr/lib/nagios/plugins', | 160 | '/usr/lib/nagios/plugins', |
555 | @@ -163,9 +170,21 @@ | |||
556 | 163 | log('Check command not found: {}'.format(parts[0])) | 170 | log('Check command not found: {}'.format(parts[0])) |
557 | 164 | return '' | 171 | return '' |
558 | 165 | 172 | ||
559 | 173 | def _remove_service_files(self): | ||
560 | 174 | if not os.path.exists(NRPE.nagios_exportdir): | ||
561 | 175 | return | ||
562 | 176 | for f in os.listdir(NRPE.nagios_exportdir): | ||
563 | 177 | if f.endswith('_{}.cfg'.format(self.command)): | ||
564 | 178 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
565 | 179 | |||
566 | 180 | def remove(self, hostname): | ||
567 | 181 | nrpe_check_file = self._get_check_filename() | ||
568 | 182 | if os.path.exists(nrpe_check_file): | ||
569 | 183 | os.remove(nrpe_check_file) | ||
570 | 184 | self._remove_service_files() | ||
571 | 185 | |||
572 | 166 | def write(self, nagios_context, hostname, nagios_servicegroups): | 186 | def write(self, nagios_context, hostname, nagios_servicegroups): |
575 | 167 | nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( | 187 | nrpe_check_file = self._get_check_filename() |
574 | 168 | self.command) | ||
576 | 169 | with open(nrpe_check_file, 'w') as nrpe_check_config: | 188 | with open(nrpe_check_file, 'w') as nrpe_check_config: |
577 | 170 | nrpe_check_config.write("# check {}\n".format(self.shortname)) | 189 | nrpe_check_config.write("# check {}\n".format(self.shortname)) |
578 | 171 | nrpe_check_config.write("command[{}]={}\n".format( | 190 | nrpe_check_config.write("command[{}]={}\n".format( |
579 | @@ -180,9 +199,7 @@ | |||
580 | 180 | 199 | ||
581 | 181 | def write_service_config(self, nagios_context, hostname, | 200 | def write_service_config(self, nagios_context, hostname, |
582 | 182 | nagios_servicegroups): | 201 | nagios_servicegroups): |
586 | 183 | for f in os.listdir(NRPE.nagios_exportdir): | 202 | self._remove_service_files() |
584 | 184 | if re.search('.*{}.cfg'.format(self.command), f): | ||
585 | 185 | os.remove(os.path.join(NRPE.nagios_exportdir, f)) | ||
587 | 186 | 203 | ||
588 | 187 | templ_vars = { | 204 | templ_vars = { |
589 | 188 | 'nagios_hostname': hostname, | 205 | 'nagios_hostname': hostname, |
590 | @@ -192,8 +209,7 @@ | |||
591 | 192 | 'command': self.command, | 209 | 'command': self.command, |
592 | 193 | } | 210 | } |
593 | 194 | nrpe_service_text = Check.service_template.format(**templ_vars) | 211 | nrpe_service_text = Check.service_template.format(**templ_vars) |
596 | 195 | nrpe_service_file = '{}/service__{}_{}.cfg'.format( | 212 | nrpe_service_file = self._get_service_filename(hostname) |
595 | 196 | NRPE.nagios_exportdir, hostname, self.command) | ||
597 | 197 | with open(nrpe_service_file, 'w') as nrpe_service_config: | 213 | with open(nrpe_service_file, 'w') as nrpe_service_config: |
598 | 198 | nrpe_service_config.write(str(nrpe_service_text)) | 214 | nrpe_service_config.write(str(nrpe_service_text)) |
599 | 199 | 215 | ||
600 | @@ -218,12 +234,32 @@ | |||
601 | 218 | if hostname: | 234 | if hostname: |
602 | 219 | self.hostname = hostname | 235 | self.hostname = hostname |
603 | 220 | else: | 236 | else: |
605 | 221 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | 237 | nagios_hostname = get_nagios_hostname() |
606 | 238 | if nagios_hostname: | ||
607 | 239 | self.hostname = nagios_hostname | ||
608 | 240 | else: | ||
609 | 241 | self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) | ||
610 | 222 | self.checks = [] | 242 | self.checks = [] |
611 | 223 | 243 | ||
612 | 224 | def add_check(self, *args, **kwargs): | 244 | def add_check(self, *args, **kwargs): |
613 | 225 | self.checks.append(Check(*args, **kwargs)) | 245 | self.checks.append(Check(*args, **kwargs)) |
614 | 226 | 246 | ||
615 | 247 | def remove_check(self, *args, **kwargs): | ||
616 | 248 | if kwargs.get('shortname') is None: | ||
617 | 249 | raise ValueError('shortname of check must be specified') | ||
618 | 250 | |||
619 | 251 | # Use sensible defaults if they're not specified - these are not | ||
620 | 252 | # actually used during removal, but they're required for constructing | ||
621 | 253 | # the Check object; check_disk is chosen because it's part of the | ||
622 | 254 | # nagios-plugins-basic package. | ||
623 | 255 | if kwargs.get('check_cmd') is None: | ||
624 | 256 | kwargs['check_cmd'] = 'check_disk' | ||
625 | 257 | if kwargs.get('description') is None: | ||
626 | 258 | kwargs['description'] = '' | ||
627 | 259 | |||
628 | 260 | check = Check(*args, **kwargs) | ||
629 | 261 | check.remove(self.hostname) | ||
630 | 262 | |||
631 | 227 | def write(self): | 263 | def write(self): |
632 | 228 | try: | 264 | try: |
633 | 229 | nagios_uid = pwd.getpwnam('nagios').pw_uid | 265 | nagios_uid = pwd.getpwnam('nagios').pw_uid |
634 | @@ -260,7 +296,7 @@ | |||
635 | 260 | :param str relation_name: Name of relation nrpe sub joined to | 296 | :param str relation_name: Name of relation nrpe sub joined to |
636 | 261 | """ | 297 | """ |
637 | 262 | for rel in relations_of_type(relation_name): | 298 | for rel in relations_of_type(relation_name): |
639 | 263 | if 'nagios_hostname' in rel: | 299 | if 'nagios_host_context' in rel: |
640 | 264 | return rel['nagios_host_context'] | 300 | return rel['nagios_host_context'] |
641 | 265 | 301 | ||
642 | 266 | 302 | ||
643 | @@ -301,11 +337,13 @@ | |||
644 | 301 | upstart_init = '/etc/init/%s.conf' % svc | 337 | upstart_init = '/etc/init/%s.conf' % svc |
645 | 302 | sysv_init = '/etc/init.d/%s' % svc | 338 | sysv_init = '/etc/init.d/%s' % svc |
646 | 303 | if os.path.exists(upstart_init): | 339 | if os.path.exists(upstart_init): |
652 | 304 | nrpe.add_check( | 340 | # Don't add a check for these services from neutron-gateway |
653 | 305 | shortname=svc, | 341 | if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: |
654 | 306 | description='process check {%s}' % unit_name, | 342 | nrpe.add_check( |
655 | 307 | check_cmd='check_upstart_job %s' % svc | 343 | shortname=svc, |
656 | 308 | ) | 344 | description='process check {%s}' % unit_name, |
657 | 345 | check_cmd='check_upstart_job %s' % svc | ||
658 | 346 | ) | ||
659 | 309 | elif os.path.exists(sysv_init): | 347 | elif os.path.exists(sysv_init): |
660 | 310 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc | 348 | cronpath = '/etc/cron.d/nagios-service-check-%s' % svc |
661 | 311 | cron_file = ('*/5 * * * * root ' | 349 | cron_file = ('*/5 * * * * root ' |
662 | 312 | 350 | ||
663 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
664 | --- hooks/charmhelpers/contrib/network/ip.py 2015-10-22 13:19:13 +0000 | |||
665 | +++ hooks/charmhelpers/contrib/network/ip.py 2016-01-06 21:19:13 +0000 | |||
666 | @@ -53,7 +53,7 @@ | |||
667 | 53 | 53 | ||
668 | 54 | 54 | ||
669 | 55 | def no_ip_found_error_out(network): | 55 | def no_ip_found_error_out(network): |
671 | 56 | errmsg = ("No IP address found in network: %s" % network) | 56 | errmsg = ("No IP address found in network(s): %s" % network) |
672 | 57 | raise ValueError(errmsg) | 57 | raise ValueError(errmsg) |
673 | 58 | 58 | ||
674 | 59 | 59 | ||
675 | @@ -61,7 +61,7 @@ | |||
676 | 61 | """Get an IPv4 or IPv6 address within the network from the host. | 61 | """Get an IPv4 or IPv6 address within the network from the host. |
677 | 62 | 62 | ||
678 | 63 | :param network (str): CIDR presentation format. For example, | 63 | :param network (str): CIDR presentation format. For example, |
680 | 64 | '192.168.1.0/24'. | 64 | '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
681 | 65 | :param fallback (str): If no address is found, return fallback. | 65 | :param fallback (str): If no address is found, return fallback. |
682 | 66 | :param fatal (boolean): If no address is found, fallback is not | 66 | :param fatal (boolean): If no address is found, fallback is not |
683 | 67 | set and fatal is True then exit(1). | 67 | set and fatal is True then exit(1). |
684 | @@ -75,24 +75,26 @@ | |||
685 | 75 | else: | 75 | else: |
686 | 76 | return None | 76 | return None |
687 | 77 | 77 | ||
698 | 78 | _validate_cidr(network) | 78 | networks = network.split() or [network] |
699 | 79 | network = netaddr.IPNetwork(network) | 79 | for network in networks: |
700 | 80 | for iface in netifaces.interfaces(): | 80 | _validate_cidr(network) |
701 | 81 | addresses = netifaces.ifaddresses(iface) | 81 | network = netaddr.IPNetwork(network) |
702 | 82 | if network.version == 4 and netifaces.AF_INET in addresses: | 82 | for iface in netifaces.interfaces(): |
703 | 83 | addr = addresses[netifaces.AF_INET][0]['addr'] | 83 | addresses = netifaces.ifaddresses(iface) |
704 | 84 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | 84 | if network.version == 4 and netifaces.AF_INET in addresses: |
705 | 85 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 85 | addr = addresses[netifaces.AF_INET][0]['addr'] |
706 | 86 | if cidr in network: | 86 | netmask = addresses[netifaces.AF_INET][0]['netmask'] |
707 | 87 | return str(cidr.ip) | 87 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
708 | 88 | if cidr in network: | ||
709 | 89 | return str(cidr.ip) | ||
710 | 88 | 90 | ||
718 | 89 | if network.version == 6 and netifaces.AF_INET6 in addresses: | 91 | if network.version == 6 and netifaces.AF_INET6 in addresses: |
719 | 90 | for addr in addresses[netifaces.AF_INET6]: | 92 | for addr in addresses[netifaces.AF_INET6]: |
720 | 91 | if not addr['addr'].startswith('fe80'): | 93 | if not addr['addr'].startswith('fe80'): |
721 | 92 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | 94 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
722 | 93 | addr['netmask'])) | 95 | addr['netmask'])) |
723 | 94 | if cidr in network: | 96 | if cidr in network: |
724 | 95 | return str(cidr.ip) | 97 | return str(cidr.ip) |
725 | 96 | 98 | ||
726 | 97 | if fallback is not None: | 99 | if fallback is not None: |
727 | 98 | return fallback | 100 | return fallback |
728 | 99 | 101 | ||
729 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
730 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-22 13:19:13 +0000 | |||
731 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-06 21:19:13 +0000 | |||
732 | @@ -14,12 +14,18 @@ | |||
733 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
734 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
735 | 16 | 16 | ||
736 | 17 | import logging | ||
737 | 18 | import re | ||
738 | 19 | import sys | ||
739 | 17 | import six | 20 | import six |
740 | 18 | from collections import OrderedDict | 21 | from collections import OrderedDict |
741 | 19 | from charmhelpers.contrib.amulet.deployment import ( | 22 | from charmhelpers.contrib.amulet.deployment import ( |
742 | 20 | AmuletDeployment | 23 | AmuletDeployment |
743 | 21 | ) | 24 | ) |
744 | 22 | 25 | ||
745 | 26 | DEBUG = logging.DEBUG | ||
746 | 27 | ERROR = logging.ERROR | ||
747 | 28 | |||
748 | 23 | 29 | ||
749 | 24 | class OpenStackAmuletDeployment(AmuletDeployment): | 30 | class OpenStackAmuletDeployment(AmuletDeployment): |
750 | 25 | """OpenStack amulet deployment. | 31 | """OpenStack amulet deployment. |
751 | @@ -28,9 +34,12 @@ | |||
752 | 28 | that is specifically for use by OpenStack charms. | 34 | that is specifically for use by OpenStack charms. |
753 | 29 | """ | 35 | """ |
754 | 30 | 36 | ||
756 | 31 | def __init__(self, series=None, openstack=None, source=None, stable=True): | 37 | def __init__(self, series=None, openstack=None, source=None, |
757 | 38 | stable=True, log_level=DEBUG): | ||
758 | 32 | """Initialize the deployment environment.""" | 39 | """Initialize the deployment environment.""" |
759 | 33 | super(OpenStackAmuletDeployment, self).__init__(series) | 40 | super(OpenStackAmuletDeployment, self).__init__(series) |
760 | 41 | self.log = self.get_logger(level=log_level) | ||
761 | 42 | self.log.info('OpenStackAmuletDeployment: init') | ||
762 | 34 | self.openstack = openstack | 43 | self.openstack = openstack |
763 | 35 | self.source = source | 44 | self.source = source |
764 | 36 | self.stable = stable | 45 | self.stable = stable |
765 | @@ -38,20 +47,49 @@ | |||
766 | 38 | # out. | 47 | # out. |
767 | 39 | self.current_next = "trusty" | 48 | self.current_next = "trusty" |
768 | 40 | 49 | ||
769 | 50 | def get_logger(self, name="deployment-logger", level=logging.DEBUG): | ||
770 | 51 | """Get a logger object that will log to stdout.""" | ||
771 | 52 | log = logging | ||
772 | 53 | logger = log.getLogger(name) | ||
773 | 54 | fmt = log.Formatter("%(asctime)s %(funcName)s " | ||
774 | 55 | "%(levelname)s: %(message)s") | ||
775 | 56 | |||
776 | 57 | handler = log.StreamHandler(stream=sys.stdout) | ||
777 | 58 | handler.setLevel(level) | ||
778 | 59 | handler.setFormatter(fmt) | ||
779 | 60 | |||
780 | 61 | logger.addHandler(handler) | ||
781 | 62 | logger.setLevel(level) | ||
782 | 63 | |||
783 | 64 | return logger | ||
784 | 65 | |||
785 | 41 | def _determine_branch_locations(self, other_services): | 66 | def _determine_branch_locations(self, other_services): |
786 | 42 | """Determine the branch locations for the other services. | 67 | """Determine the branch locations for the other services. |
787 | 43 | 68 | ||
788 | 44 | Determine if the local branch being tested is derived from its | 69 | Determine if the local branch being tested is derived from its |
789 | 45 | stable or next (dev) branch, and based on this, use the corresonding | 70 | stable or next (dev) branch, and based on this, use the corresonding |
790 | 46 | stable or next branches for the other_services.""" | 71 | stable or next branches for the other_services.""" |
799 | 47 | 72 | <<<<<<< TREE | |
800 | 48 | # Charms outside the lp:~openstack-charmers namespace | 73 | |
801 | 49 | base_charms = ['mysql', 'mongodb', 'nrpe'] | 74 | # Charms outside the lp:~openstack-charmers namespace |
802 | 50 | 75 | base_charms = ['mysql', 'mongodb', 'nrpe'] | |
803 | 51 | # Force these charms to current series even when using an older series. | 76 | |
804 | 52 | # ie. Use trusty/nrpe even when series is precise, as the P charm | 77 | # Force these charms to current series even when using an older series. |
805 | 53 | # does not possess the necessary external master config and hooks. | 78 | # ie. Use trusty/nrpe even when series is precise, as the P charm |
806 | 54 | force_series_current = ['nrpe'] | 79 | # does not possess the necessary external master config and hooks. |
807 | 80 | force_series_current = ['nrpe'] | ||
808 | 81 | ======= | ||
809 | 82 | |||
810 | 83 | self.log.info('OpenStackAmuletDeployment: determine branch locations') | ||
811 | 84 | |||
812 | 85 | # Charms outside the lp:~openstack-charmers namespace | ||
813 | 86 | base_charms = ['mysql', 'mongodb', 'nrpe'] | ||
814 | 87 | |||
815 | 88 | # Force these charms to current series even when using an older series. | ||
816 | 89 | # ie. Use trusty/nrpe even when series is precise, as the P charm | ||
817 | 90 | # does not possess the necessary external master config and hooks. | ||
818 | 91 | force_series_current = ['nrpe'] | ||
819 | 92 | >>>>>>> MERGE-SOURCE | ||
820 | 55 | 93 | ||
821 | 56 | if self.series in ['precise', 'trusty']: | 94 | if self.series in ['precise', 'trusty']: |
822 | 57 | base_series = self.series | 95 | base_series = self.series |
823 | @@ -82,6 +120,8 @@ | |||
824 | 82 | 120 | ||
825 | 83 | def _add_services(self, this_service, other_services): | 121 | def _add_services(self, this_service, other_services): |
826 | 84 | """Add services to the deployment and set openstack-origin/source.""" | 122 | """Add services to the deployment and set openstack-origin/source.""" |
827 | 123 | self.log.info('OpenStackAmuletDeployment: adding services') | ||
828 | 124 | |||
829 | 85 | other_services = self._determine_branch_locations(other_services) | 125 | other_services = self._determine_branch_locations(other_services) |
830 | 86 | 126 | ||
831 | 87 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | 127 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
832 | @@ -93,9 +133,16 @@ | |||
833 | 93 | # Charms which should use the source config option | 133 | # Charms which should use the source config option |
834 | 94 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 134 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
835 | 95 | 'ceph-osd', 'ceph-radosgw'] | 135 | 'ceph-osd', 'ceph-radosgw'] |
836 | 136 | <<<<<<< TREE | ||
837 | 96 | 137 | ||
838 | 97 | # Charms which can not use openstack-origin, ie. many subordinates | 138 | # Charms which can not use openstack-origin, ie. many subordinates |
839 | 98 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] | 139 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] |
840 | 140 | ======= | ||
841 | 141 | |||
842 | 142 | # Charms which can not use openstack-origin, ie. many subordinates | ||
843 | 143 | no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', | ||
844 | 144 | 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] | ||
845 | 145 | >>>>>>> MERGE-SOURCE | ||
846 | 99 | 146 | ||
847 | 100 | if self.openstack: | 147 | if self.openstack: |
848 | 101 | for svc in services: | 148 | for svc in services: |
849 | @@ -111,9 +158,79 @@ | |||
850 | 111 | 158 | ||
851 | 112 | def _configure_services(self, configs): | 159 | def _configure_services(self, configs): |
852 | 113 | """Configure all of the services.""" | 160 | """Configure all of the services.""" |
853 | 161 | self.log.info('OpenStackAmuletDeployment: configure services') | ||
854 | 114 | for service, config in six.iteritems(configs): | 162 | for service, config in six.iteritems(configs): |
855 | 115 | self.d.configure(service, config) | 163 | self.d.configure(service, config) |
856 | 116 | 164 | ||
857 | 165 | def _auto_wait_for_status(self, message=None, exclude_services=None, | ||
858 | 166 | include_only=None, timeout=1800): | ||
859 | 167 | """Wait for all units to have a specific extended status, except | ||
860 | 168 | for any defined as excluded. Unless specified via message, any | ||
861 | 169 | status containing any case of 'ready' will be considered a match. | ||
862 | 170 | |||
863 | 171 | Examples of message usage: | ||
864 | 172 | |||
865 | 173 | Wait for all unit status to CONTAIN any case of 'ready' or 'ok': | ||
866 | 174 | message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) | ||
867 | 175 | |||
868 | 176 | Wait for all units to reach this status (exact match): | ||
869 | 177 | message = re.compile('^Unit is ready and clustered$') | ||
870 | 178 | |||
871 | 179 | Wait for all units to reach any one of these (exact match): | ||
872 | 180 | message = re.compile('Unit is ready|OK|Ready') | ||
873 | 181 | |||
874 | 182 | Wait for at least one unit to reach this status (exact match): | ||
875 | 183 | message = {'ready'} | ||
876 | 184 | |||
877 | 185 | See Amulet's sentry.wait_for_messages() for message usage detail. | ||
878 | 186 | https://github.com/juju/amulet/blob/master/amulet/sentry.py | ||
879 | 187 | |||
880 | 188 | :param message: Expected status match | ||
881 | 189 | :param exclude_services: List of juju service names to ignore, | ||
882 | 190 | not to be used in conjuction with include_only. | ||
883 | 191 | :param include_only: List of juju service names to exclusively check, | ||
884 | 192 | not to be used in conjuction with exclude_services. | ||
885 | 193 | :param timeout: Maximum time in seconds to wait for status match | ||
886 | 194 | :returns: None. Raises if timeout is hit. | ||
887 | 195 | """ | ||
888 | 196 | self.log.info('Waiting for extended status on units...') | ||
889 | 197 | |||
890 | 198 | all_services = self.d.services.keys() | ||
891 | 199 | |||
892 | 200 | if exclude_services and include_only: | ||
893 | 201 | raise ValueError('exclude_services can not be used ' | ||
894 | 202 | 'with include_only') | ||
895 | 203 | |||
896 | 204 | if message: | ||
897 | 205 | if isinstance(message, re._pattern_type): | ||
898 | 206 | match = message.pattern | ||
899 | 207 | else: | ||
900 | 208 | match = message | ||
901 | 209 | |||
902 | 210 | self.log.debug('Custom extended status wait match: ' | ||
903 | 211 | '{}'.format(match)) | ||
904 | 212 | else: | ||
905 | 213 | self.log.debug('Default extended status wait match: contains ' | ||
906 | 214 | 'READY (case-insensitive)') | ||
907 | 215 | message = re.compile('.*ready.*', re.IGNORECASE) | ||
908 | 216 | |||
909 | 217 | if exclude_services: | ||
910 | 218 | self.log.debug('Excluding services from extended status match: ' | ||
911 | 219 | '{}'.format(exclude_services)) | ||
912 | 220 | else: | ||
913 | 221 | exclude_services = [] | ||
914 | 222 | |||
915 | 223 | if include_only: | ||
916 | 224 | services = include_only | ||
917 | 225 | else: | ||
918 | 226 | services = list(set(all_services) - set(exclude_services)) | ||
919 | 227 | |||
920 | 228 | self.log.debug('Waiting up to {}s for extended status on services: ' | ||
921 | 229 | '{}'.format(timeout, services)) | ||
922 | 230 | service_messages = {service: message for service in services} | ||
923 | 231 | self.d.sentry.wait_for_messages(service_messages, timeout=timeout) | ||
924 | 232 | self.log.info('OK') | ||
925 | 233 | |||
926 | 117 | def _get_openstack_release(self): | 234 | def _get_openstack_release(self): |
927 | 118 | """Get openstack release. | 235 | """Get openstack release. |
928 | 119 | 236 | ||
929 | @@ -124,8 +241,14 @@ | |||
930 | 124 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | 241 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
931 | 125 | self.precise_havana, self.precise_icehouse, | 242 | self.precise_havana, self.precise_icehouse, |
932 | 126 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 243 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
933 | 244 | <<<<<<< TREE | ||
934 | 127 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | 245 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
935 | 128 | self.wily_liberty) = range(12) | 246 | self.wily_liberty) = range(12) |
936 | 247 | ======= | ||
937 | 248 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, | ||
938 | 249 | self.wily_liberty, self.trusty_mitaka, | ||
939 | 250 | self.xenial_mitaka) = range(14) | ||
940 | 251 | >>>>>>> MERGE-SOURCE | ||
941 | 129 | 252 | ||
942 | 130 | releases = { | 253 | releases = { |
943 | 131 | ('precise', None): self.precise_essex, | 254 | ('precise', None): self.precise_essex, |
944 | @@ -136,10 +259,21 @@ | |||
945 | 136 | ('trusty', None): self.trusty_icehouse, | 259 | ('trusty', None): self.trusty_icehouse, |
946 | 137 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 260 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
947 | 138 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 261 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
949 | 139 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | 262 | <<<<<<< TREE |
950 | 263 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | ||
951 | 264 | ======= | ||
952 | 265 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | ||
953 | 266 | ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, | ||
954 | 267 | >>>>>>> MERGE-SOURCE | ||
955 | 140 | ('utopic', None): self.utopic_juno, | 268 | ('utopic', None): self.utopic_juno, |
956 | 269 | <<<<<<< TREE | ||
957 | 141 | ('vivid', None): self.vivid_kilo, | 270 | ('vivid', None): self.vivid_kilo, |
958 | 142 | ('wily', None): self.wily_liberty} | 271 | ('wily', None): self.wily_liberty} |
959 | 272 | ======= | ||
960 | 273 | ('vivid', None): self.vivid_kilo, | ||
961 | 274 | ('wily', None): self.wily_liberty, | ||
962 | 275 | ('xenial', None): self.xenial_mitaka} | ||
963 | 276 | >>>>>>> MERGE-SOURCE | ||
964 | 143 | return releases[(self.series, self.openstack)] | 277 | return releases[(self.series, self.openstack)] |
965 | 144 | 278 | ||
966 | 145 | def _get_openstack_release_string(self): | 279 | def _get_openstack_release_string(self): |
967 | @@ -155,7 +289,12 @@ | |||
968 | 155 | ('trusty', 'icehouse'), | 289 | ('trusty', 'icehouse'), |
969 | 156 | ('utopic', 'juno'), | 290 | ('utopic', 'juno'), |
970 | 157 | ('vivid', 'kilo'), | 291 | ('vivid', 'kilo'), |
972 | 158 | ('wily', 'liberty'), | 292 | <<<<<<< TREE |
973 | 293 | ('wily', 'liberty'), | ||
974 | 294 | ======= | ||
975 | 295 | ('wily', 'liberty'), | ||
976 | 296 | ('xenial', 'mitaka'), | ||
977 | 297 | >>>>>>> MERGE-SOURCE | ||
978 | 159 | ]) | 298 | ]) |
979 | 160 | if self.openstack: | 299 | if self.openstack: |
980 | 161 | os_origin = self.openstack.split(':')[1] | 300 | os_origin = self.openstack.split(':')[1] |
981 | 162 | 301 | ||
982 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
983 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-10-22 13:19:13 +0000 | |||
984 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2016-01-06 21:19:13 +0000 | |||
985 | @@ -18,7 +18,12 @@ | |||
986 | 18 | import json | 18 | import json |
987 | 19 | import logging | 19 | import logging |
988 | 20 | import os | 20 | import os |
990 | 21 | import six | 21 | <<<<<<< TREE |
991 | 22 | import six | ||
992 | 23 | ======= | ||
993 | 24 | import re | ||
994 | 25 | import six | ||
995 | 26 | >>>>>>> MERGE-SOURCE | ||
996 | 22 | import time | 27 | import time |
997 | 23 | import urllib | 28 | import urllib |
998 | 24 | 29 | ||
999 | @@ -341,6 +346,7 @@ | |||
1000 | 341 | 346 | ||
1001 | 342 | def delete_instance(self, nova, instance): | 347 | def delete_instance(self, nova, instance): |
1002 | 343 | """Delete the specified instance.""" | 348 | """Delete the specified instance.""" |
1003 | 349 | <<<<<<< TREE | ||
1004 | 344 | 350 | ||
1005 | 345 | # /!\ DEPRECATION WARNING | 351 | # /!\ DEPRECATION WARNING |
1006 | 346 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | 352 | self.log.warn('/!\\ DEPRECATION WARNING: use ' |
1007 | @@ -961,3 +967,646 @@ | |||
1008 | 961 | else: | 967 | else: |
1009 | 962 | msg = 'No message retrieved.' | 968 | msg = 'No message retrieved.' |
1010 | 963 | amulet.raise_status(amulet.FAIL, msg) | 969 | amulet.raise_status(amulet.FAIL, msg) |
1011 | 970 | ======= | ||
1012 | 971 | |||
1013 | 972 | # /!\ DEPRECATION WARNING | ||
1014 | 973 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
1015 | 974 | 'delete_resource instead of delete_instance.') | ||
1016 | 975 | self.log.debug('Deleting instance ({})...'.format(instance)) | ||
1017 | 976 | return self.delete_resource(nova.servers, instance, | ||
1018 | 977 | msg='nova instance') | ||
1019 | 978 | |||
1020 | 979 | def create_or_get_keypair(self, nova, keypair_name="testkey"): | ||
1021 | 980 | """Create a new keypair, or return pointer if it already exists.""" | ||
1022 | 981 | try: | ||
1023 | 982 | _keypair = nova.keypairs.get(keypair_name) | ||
1024 | 983 | self.log.debug('Keypair ({}) already exists, ' | ||
1025 | 984 | 'using it.'.format(keypair_name)) | ||
1026 | 985 | return _keypair | ||
1027 | 986 | except: | ||
1028 | 987 | self.log.debug('Keypair ({}) does not exist, ' | ||
1029 | 988 | 'creating it.'.format(keypair_name)) | ||
1030 | 989 | |||
1031 | 990 | _keypair = nova.keypairs.create(name=keypair_name) | ||
1032 | 991 | return _keypair | ||
1033 | 992 | |||
1034 | 993 | def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, | ||
1035 | 994 | img_id=None, src_vol_id=None, snap_id=None): | ||
1036 | 995 | """Create cinder volume, optionally from a glance image, OR | ||
1037 | 996 | optionally as a clone of an existing volume, OR optionally | ||
1038 | 997 | from a snapshot. Wait for the new volume status to reach | ||
1039 | 998 | the expected status, validate and return a resource pointer. | ||
1040 | 999 | |||
1041 | 1000 | :param vol_name: cinder volume display name | ||
1042 | 1001 | :param vol_size: size in gigabytes | ||
1043 | 1002 | :param img_id: optional glance image id | ||
1044 | 1003 | :param src_vol_id: optional source volume id to clone | ||
1045 | 1004 | :param snap_id: optional snapshot id to use | ||
1046 | 1005 | :returns: cinder volume pointer | ||
1047 | 1006 | """ | ||
1048 | 1007 | # Handle parameter input and avoid impossible combinations | ||
1049 | 1008 | if img_id and not src_vol_id and not snap_id: | ||
1050 | 1009 | # Create volume from image | ||
1051 | 1010 | self.log.debug('Creating cinder volume from glance image...') | ||
1052 | 1011 | bootable = 'true' | ||
1053 | 1012 | elif src_vol_id and not img_id and not snap_id: | ||
1054 | 1013 | # Clone an existing volume | ||
1055 | 1014 | self.log.debug('Cloning cinder volume...') | ||
1056 | 1015 | bootable = cinder.volumes.get(src_vol_id).bootable | ||
1057 | 1016 | elif snap_id and not src_vol_id and not img_id: | ||
1058 | 1017 | # Create volume from snapshot | ||
1059 | 1018 | self.log.debug('Creating cinder volume from snapshot...') | ||
1060 | 1019 | snap = cinder.volume_snapshots.find(id=snap_id) | ||
1061 | 1020 | vol_size = snap.size | ||
1062 | 1021 | snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id | ||
1063 | 1022 | bootable = cinder.volumes.get(snap_vol_id).bootable | ||
1064 | 1023 | elif not img_id and not src_vol_id and not snap_id: | ||
1065 | 1024 | # Create volume | ||
1066 | 1025 | self.log.debug('Creating cinder volume...') | ||
1067 | 1026 | bootable = 'false' | ||
1068 | 1027 | else: | ||
1069 | 1028 | # Impossible combination of parameters | ||
1070 | 1029 | msg = ('Invalid method use - name:{} size:{} img_id:{} ' | ||
1071 | 1030 | 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, | ||
1072 | 1031 | img_id, src_vol_id, | ||
1073 | 1032 | snap_id)) | ||
1074 | 1033 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1075 | 1034 | |||
1076 | 1035 | # Create new volume | ||
1077 | 1036 | try: | ||
1078 | 1037 | vol_new = cinder.volumes.create(display_name=vol_name, | ||
1079 | 1038 | imageRef=img_id, | ||
1080 | 1039 | size=vol_size, | ||
1081 | 1040 | source_volid=src_vol_id, | ||
1082 | 1041 | snapshot_id=snap_id) | ||
1083 | 1042 | vol_id = vol_new.id | ||
1084 | 1043 | except Exception as e: | ||
1085 | 1044 | msg = 'Failed to create volume: {}'.format(e) | ||
1086 | 1045 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1087 | 1046 | |||
1088 | 1047 | # Wait for volume to reach available status | ||
1089 | 1048 | ret = self.resource_reaches_status(cinder.volumes, vol_id, | ||
1090 | 1049 | expected_stat="available", | ||
1091 | 1050 | msg="Volume status wait") | ||
1092 | 1051 | if not ret: | ||
1093 | 1052 | msg = 'Cinder volume failed to reach expected state.' | ||
1094 | 1053 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1095 | 1054 | |||
1096 | 1055 | # Re-validate new volume | ||
1097 | 1056 | self.log.debug('Validating volume attributes...') | ||
1098 | 1057 | val_vol_name = cinder.volumes.get(vol_id).display_name | ||
1099 | 1058 | val_vol_boot = cinder.volumes.get(vol_id).bootable | ||
1100 | 1059 | val_vol_stat = cinder.volumes.get(vol_id).status | ||
1101 | 1060 | val_vol_size = cinder.volumes.get(vol_id).size | ||
1102 | 1061 | msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' | ||
1103 | 1062 | '{} size:{}'.format(val_vol_name, vol_id, | ||
1104 | 1063 | val_vol_stat, val_vol_boot, | ||
1105 | 1064 | val_vol_size)) | ||
1106 | 1065 | |||
1107 | 1066 | if val_vol_boot == bootable and val_vol_stat == 'available' \ | ||
1108 | 1067 | and val_vol_name == vol_name and val_vol_size == vol_size: | ||
1109 | 1068 | self.log.debug(msg_attr) | ||
1110 | 1069 | else: | ||
1111 | 1070 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
1112 | 1071 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1113 | 1072 | |||
1114 | 1073 | return vol_new | ||
1115 | 1074 | |||
1116 | 1075 | def delete_resource(self, resource, resource_id, | ||
1117 | 1076 | msg="resource", max_wait=120): | ||
1118 | 1077 | """Delete one openstack resource, such as one instance, keypair, | ||
1119 | 1078 | image, volume, stack, etc., and confirm deletion within max wait time. | ||
1120 | 1079 | |||
1121 | 1080 | :param resource: pointer to os resource type, ex:glance_client.images | ||
1122 | 1081 | :param resource_id: unique name or id for the openstack resource | ||
1123 | 1082 | :param msg: text to identify purpose in logging | ||
1124 | 1083 | :param max_wait: maximum wait time in seconds | ||
1125 | 1084 | :returns: True if successful, otherwise False | ||
1126 | 1085 | """ | ||
1127 | 1086 | self.log.debug('Deleting OpenStack resource ' | ||
1128 | 1087 | '{} ({})'.format(resource_id, msg)) | ||
1129 | 1088 | num_before = len(list(resource.list())) | ||
1130 | 1089 | resource.delete(resource_id) | ||
1131 | 1090 | |||
1132 | 1091 | tries = 0 | ||
1133 | 1092 | num_after = len(list(resource.list())) | ||
1134 | 1093 | while num_after != (num_before - 1) and tries < (max_wait / 4): | ||
1135 | 1094 | self.log.debug('{} delete check: ' | ||
1136 | 1095 | '{} [{}:{}] {}'.format(msg, tries, | ||
1137 | 1096 | num_before, | ||
1138 | 1097 | num_after, | ||
1139 | 1098 | resource_id)) | ||
1140 | 1099 | time.sleep(4) | ||
1141 | 1100 | num_after = len(list(resource.list())) | ||
1142 | 1101 | tries += 1 | ||
1143 | 1102 | |||
1144 | 1103 | self.log.debug('{}: expected, actual count = {}, ' | ||
1145 | 1104 | '{}'.format(msg, num_before - 1, num_after)) | ||
1146 | 1105 | |||
1147 | 1106 | if num_after == (num_before - 1): | ||
1148 | 1107 | return True | ||
1149 | 1108 | else: | ||
1150 | 1109 | self.log.error('{} delete timed out'.format(msg)) | ||
1151 | 1110 | return False | ||
1152 | 1111 | |||
1153 | 1112 | def resource_reaches_status(self, resource, resource_id, | ||
1154 | 1113 | expected_stat='available', | ||
1155 | 1114 | msg='resource', max_wait=120): | ||
1156 | 1115 | """Wait for an openstack resources status to reach an | ||
1157 | 1116 | expected status within a specified time. Useful to confirm that | ||
1158 | 1117 | nova instances, cinder vols, snapshots, glance images, heat stacks | ||
1159 | 1118 | and other resources eventually reach the expected status. | ||
1160 | 1119 | |||
1161 | 1120 | :param resource: pointer to os resource type, ex: heat_client.stacks | ||
1162 | 1121 | :param resource_id: unique id for the openstack resource | ||
1163 | 1122 | :param expected_stat: status to expect resource to reach | ||
1164 | 1123 | :param msg: text to identify purpose in logging | ||
1165 | 1124 | :param max_wait: maximum wait time in seconds | ||
1166 | 1125 | :returns: True if successful, False if status is not reached | ||
1167 | 1126 | """ | ||
1168 | 1127 | |||
1169 | 1128 | tries = 0 | ||
1170 | 1129 | resource_stat = resource.get(resource_id).status | ||
1171 | 1130 | while resource_stat != expected_stat and tries < (max_wait / 4): | ||
1172 | 1131 | self.log.debug('{} status check: ' | ||
1173 | 1132 | '{} [{}:{}] {}'.format(msg, tries, | ||
1174 | 1133 | resource_stat, | ||
1175 | 1134 | expected_stat, | ||
1176 | 1135 | resource_id)) | ||
1177 | 1136 | time.sleep(4) | ||
1178 | 1137 | resource_stat = resource.get(resource_id).status | ||
1179 | 1138 | tries += 1 | ||
1180 | 1139 | |||
1181 | 1140 | self.log.debug('{}: expected, actual status = {}, ' | ||
1182 | 1141 | '{}'.format(msg, resource_stat, expected_stat)) | ||
1183 | 1142 | |||
1184 | 1143 | if resource_stat == expected_stat: | ||
1185 | 1144 | return True | ||
1186 | 1145 | else: | ||
1187 | 1146 | self.log.debug('{} never reached expected status: ' | ||
1188 | 1147 | '{}'.format(resource_id, expected_stat)) | ||
1189 | 1148 | return False | ||
1190 | 1149 | |||
1191 | 1150 | def get_ceph_osd_id_cmd(self, index): | ||
1192 | 1151 | """Produce a shell command that will return a ceph-osd id.""" | ||
1193 | 1152 | return ("`initctl list | grep 'ceph-osd ' | " | ||
1194 | 1153 | "awk 'NR=={} {{ print $2 }}' | " | ||
1195 | 1154 | "grep -o '[0-9]*'`".format(index + 1)) | ||
1196 | 1155 | |||
1197 | 1156 | def get_ceph_pools(self, sentry_unit): | ||
1198 | 1157 | """Return a dict of ceph pools from a single ceph unit, with | ||
1199 | 1158 | pool name as keys, pool id as vals.""" | ||
1200 | 1159 | pools = {} | ||
1201 | 1160 | cmd = 'sudo ceph osd lspools' | ||
1202 | 1161 | output, code = sentry_unit.run(cmd) | ||
1203 | 1162 | if code != 0: | ||
1204 | 1163 | msg = ('{} `{}` returned {} ' | ||
1205 | 1164 | '{}'.format(sentry_unit.info['unit_name'], | ||
1206 | 1165 | cmd, code, output)) | ||
1207 | 1166 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1208 | 1167 | |||
1209 | 1168 | # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, | ||
1210 | 1169 | for pool in str(output).split(','): | ||
1211 | 1170 | pool_id_name = pool.split(' ') | ||
1212 | 1171 | if len(pool_id_name) == 2: | ||
1213 | 1172 | pool_id = pool_id_name[0] | ||
1214 | 1173 | pool_name = pool_id_name[1] | ||
1215 | 1174 | pools[pool_name] = int(pool_id) | ||
1216 | 1175 | |||
1217 | 1176 | self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], | ||
1218 | 1177 | pools)) | ||
1219 | 1178 | return pools | ||
1220 | 1179 | |||
1221 | 1180 | def get_ceph_df(self, sentry_unit): | ||
1222 | 1181 | """Return dict of ceph df json output, including ceph pool state. | ||
1223 | 1182 | |||
1224 | 1183 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
1225 | 1184 | :returns: Dict of ceph df output | ||
1226 | 1185 | """ | ||
1227 | 1186 | cmd = 'sudo ceph df --format=json' | ||
1228 | 1187 | output, code = sentry_unit.run(cmd) | ||
1229 | 1188 | if code != 0: | ||
1230 | 1189 | msg = ('{} `{}` returned {} ' | ||
1231 | 1190 | '{}'.format(sentry_unit.info['unit_name'], | ||
1232 | 1191 | cmd, code, output)) | ||
1233 | 1192 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1234 | 1193 | return json.loads(output) | ||
1235 | 1194 | |||
1236 | 1195 | def get_ceph_pool_sample(self, sentry_unit, pool_id=0): | ||
1237 | 1196 | """Take a sample of attributes of a ceph pool, returning ceph | ||
1238 | 1197 | pool name, object count and disk space used for the specified | ||
1239 | 1198 | pool ID number. | ||
1240 | 1199 | |||
1241 | 1200 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
1242 | 1201 | :param pool_id: Ceph pool ID | ||
1243 | 1202 | :returns: List of pool name, object count, kb disk space used | ||
1244 | 1203 | """ | ||
1245 | 1204 | df = self.get_ceph_df(sentry_unit) | ||
1246 | 1205 | pool_name = df['pools'][pool_id]['name'] | ||
1247 | 1206 | obj_count = df['pools'][pool_id]['stats']['objects'] | ||
1248 | 1207 | kb_used = df['pools'][pool_id]['stats']['kb_used'] | ||
1249 | 1208 | self.log.debug('Ceph {} pool (ID {}): {} objects, ' | ||
1250 | 1209 | '{} kb used'.format(pool_name, pool_id, | ||
1251 | 1210 | obj_count, kb_used)) | ||
1252 | 1211 | return pool_name, obj_count, kb_used | ||
1253 | 1212 | |||
1254 | 1213 | def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): | ||
1255 | 1214 | """Validate ceph pool samples taken over time, such as pool | ||
1256 | 1215 | object counts or pool kb used, before adding, after adding, and | ||
1257 | 1216 | after deleting items which affect those pool attributes. The | ||
1258 | 1217 | 2nd element is expected to be greater than the 1st; 3rd is expected | ||
1259 | 1218 | to be less than the 2nd. | ||
1260 | 1219 | |||
1261 | 1220 | :param samples: List containing 3 data samples | ||
1262 | 1221 | :param sample_type: String for logging and usage context | ||
1263 | 1222 | :returns: None if successful, Failure message otherwise | ||
1264 | 1223 | """ | ||
1265 | 1224 | original, created, deleted = range(3) | ||
1266 | 1225 | if samples[created] <= samples[original] or \ | ||
1267 | 1226 | samples[deleted] >= samples[created]: | ||
1268 | 1227 | return ('Ceph {} samples ({}) ' | ||
1269 | 1228 | 'unexpected.'.format(sample_type, samples)) | ||
1270 | 1229 | else: | ||
1271 | 1230 | self.log.debug('Ceph {} samples (OK): ' | ||
1272 | 1231 | '{}'.format(sample_type, samples)) | ||
1273 | 1232 | return None | ||
1274 | 1233 | |||
1275 | 1234 | # rabbitmq/amqp specific helpers: | ||
1276 | 1235 | |||
1277 | 1236 | def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): | ||
1278 | 1237 | """Wait for rmq units extended status to show cluster readiness, | ||
1279 | 1238 | after an optional initial sleep period. Initial sleep is likely | ||
1280 | 1239 | necessary to be effective following a config change, as status | ||
1281 | 1240 | message may not instantly update to non-ready.""" | ||
1282 | 1241 | |||
1283 | 1242 | if init_sleep: | ||
1284 | 1243 | time.sleep(init_sleep) | ||
1285 | 1244 | |||
1286 | 1245 | message = re.compile('^Unit is ready and clustered$') | ||
1287 | 1246 | deployment._auto_wait_for_status(message=message, | ||
1288 | 1247 | timeout=timeout, | ||
1289 | 1248 | include_only=['rabbitmq-server']) | ||
1290 | 1249 | |||
1291 | 1250 | def add_rmq_test_user(self, sentry_units, | ||
1292 | 1251 | username="testuser1", password="changeme"): | ||
1293 | 1252 | """Add a test user via the first rmq juju unit, check connection as | ||
1294 | 1253 | the new user against all sentry units. | ||
1295 | 1254 | |||
1296 | 1255 | :param sentry_units: list of sentry unit pointers | ||
1297 | 1256 | :param username: amqp user name, default to testuser1 | ||
1298 | 1257 | :param password: amqp user password | ||
1299 | 1258 | :returns: None if successful. Raise on error. | ||
1300 | 1259 | """ | ||
1301 | 1260 | self.log.debug('Adding rmq user ({})...'.format(username)) | ||
1302 | 1261 | |||
1303 | 1262 | # Check that user does not already exist | ||
1304 | 1263 | cmd_user_list = 'rabbitmqctl list_users' | ||
1305 | 1264 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
1306 | 1265 | if username in output: | ||
1307 | 1266 | self.log.warning('User ({}) already exists, returning ' | ||
1308 | 1267 | 'gracefully.'.format(username)) | ||
1309 | 1268 | return | ||
1310 | 1269 | |||
1311 | 1270 | perms = '".*" ".*" ".*"' | ||
1312 | 1271 | cmds = ['rabbitmqctl add_user {} {}'.format(username, password), | ||
1313 | 1272 | 'rabbitmqctl set_permissions {} {}'.format(username, perms)] | ||
1314 | 1273 | |||
1315 | 1274 | # Add user via first unit | ||
1316 | 1275 | for cmd in cmds: | ||
1317 | 1276 | output, _ = self.run_cmd_unit(sentry_units[0], cmd) | ||
1318 | 1277 | |||
1319 | 1278 | # Check connection against the other sentry_units | ||
1320 | 1279 | self.log.debug('Checking user connect against units...') | ||
1321 | 1280 | for sentry_unit in sentry_units: | ||
1322 | 1281 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, | ||
1323 | 1282 | username=username, | ||
1324 | 1283 | password=password) | ||
1325 | 1284 | connection.close() | ||
1326 | 1285 | |||
1327 | 1286 | def delete_rmq_test_user(self, sentry_units, username="testuser1"): | ||
1328 | 1287 | """Delete a rabbitmq user via the first rmq juju unit. | ||
1329 | 1288 | |||
1330 | 1289 | :param sentry_units: list of sentry unit pointers | ||
1331 | 1290 | :param username: amqp user name, default to testuser1 | ||
1332 | 1291 | :param password: amqp user password | ||
1333 | 1292 | :returns: None if successful or no such user. | ||
1334 | 1293 | """ | ||
1335 | 1294 | self.log.debug('Deleting rmq user ({})...'.format(username)) | ||
1336 | 1295 | |||
1337 | 1296 | # Check that the user exists | ||
1338 | 1297 | cmd_user_list = 'rabbitmqctl list_users' | ||
1339 | 1298 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) | ||
1340 | 1299 | |||
1341 | 1300 | if username not in output: | ||
1342 | 1301 | self.log.warning('User ({}) does not exist, returning ' | ||
1343 | 1302 | 'gracefully.'.format(username)) | ||
1344 | 1303 | return | ||
1345 | 1304 | |||
1346 | 1305 | # Delete the user | ||
1347 | 1306 | cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) | ||
1348 | 1307 | output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) | ||
1349 | 1308 | |||
1350 | 1309 | def get_rmq_cluster_status(self, sentry_unit): | ||
1351 | 1310 | """Execute rabbitmq cluster status command on a unit and return | ||
1352 | 1311 | the full output. | ||
1353 | 1312 | |||
1354 | 1313 | :param unit: sentry unit | ||
1355 | 1314 | :returns: String containing console output of cluster status command | ||
1356 | 1315 | """ | ||
1357 | 1316 | cmd = 'rabbitmqctl cluster_status' | ||
1358 | 1317 | output, _ = self.run_cmd_unit(sentry_unit, cmd) | ||
1359 | 1318 | self.log.debug('{} cluster_status:\n{}'.format( | ||
1360 | 1319 | sentry_unit.info['unit_name'], output)) | ||
1361 | 1320 | return str(output) | ||
1362 | 1321 | |||
1363 | 1322 | def get_rmq_cluster_running_nodes(self, sentry_unit): | ||
1364 | 1323 | """Parse rabbitmqctl cluster_status output string, return list of | ||
1365 | 1324 | running rabbitmq cluster nodes. | ||
1366 | 1325 | |||
1367 | 1326 | :param unit: sentry unit | ||
1368 | 1327 | :returns: List containing node names of running nodes | ||
1369 | 1328 | """ | ||
1370 | 1329 | # NOTE(beisner): rabbitmqctl cluster_status output is not | ||
1371 | 1330 | # json-parsable, do string chop foo, then json.loads that. | ||
1372 | 1331 | str_stat = self.get_rmq_cluster_status(sentry_unit) | ||
1373 | 1332 | if 'running_nodes' in str_stat: | ||
1374 | 1333 | pos_start = str_stat.find("{running_nodes,") + 15 | ||
1375 | 1334 | pos_end = str_stat.find("]},", pos_start) + 1 | ||
1376 | 1335 | str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') | ||
1377 | 1336 | run_nodes = json.loads(str_run_nodes) | ||
1378 | 1337 | return run_nodes | ||
1379 | 1338 | else: | ||
1380 | 1339 | return [] | ||
1381 | 1340 | |||
1382 | 1341 | def validate_rmq_cluster_running_nodes(self, sentry_units): | ||
1383 | 1342 | """Check that all rmq unit hostnames are represented in the | ||
1384 | 1343 | cluster_status output of all units. | ||
1385 | 1344 | |||
1386 | 1345 | :param host_names: dict of juju unit names to host names | ||
1387 | 1346 | :param units: list of sentry unit pointers (all rmq units) | ||
1388 | 1347 | :returns: None if successful, otherwise return error message | ||
1389 | 1348 | """ | ||
1390 | 1349 | host_names = self.get_unit_hostnames(sentry_units) | ||
1391 | 1350 | errors = [] | ||
1392 | 1351 | |||
1393 | 1352 | # Query every unit for cluster_status running nodes | ||
1394 | 1353 | for query_unit in sentry_units: | ||
1395 | 1354 | query_unit_name = query_unit.info['unit_name'] | ||
1396 | 1355 | running_nodes = self.get_rmq_cluster_running_nodes(query_unit) | ||
1397 | 1356 | |||
1398 | 1357 | # Confirm that every unit is represented in the queried unit's | ||
1399 | 1358 | # cluster_status running nodes output. | ||
1400 | 1359 | for validate_unit in sentry_units: | ||
1401 | 1360 | val_host_name = host_names[validate_unit.info['unit_name']] | ||
1402 | 1361 | val_node_name = 'rabbit@{}'.format(val_host_name) | ||
1403 | 1362 | |||
1404 | 1363 | if val_node_name not in running_nodes: | ||
1405 | 1364 | errors.append('Cluster member check failed on {}: {} not ' | ||
1406 | 1365 | 'in {}\n'.format(query_unit_name, | ||
1407 | 1366 | val_node_name, | ||
1408 | 1367 | running_nodes)) | ||
1409 | 1368 | if errors: | ||
1410 | 1369 | return ''.join(errors) | ||
1411 | 1370 | |||
1412 | 1371 | def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): | ||
1413 | 1372 | """Check a single juju rmq unit for ssl and port in the config file.""" | ||
1414 | 1373 | host = sentry_unit.info['public-address'] | ||
1415 | 1374 | unit_name = sentry_unit.info['unit_name'] | ||
1416 | 1375 | |||
1417 | 1376 | conf_file = '/etc/rabbitmq/rabbitmq.config' | ||
1418 | 1377 | conf_contents = str(self.file_contents_safe(sentry_unit, | ||
1419 | 1378 | conf_file, max_wait=16)) | ||
1420 | 1379 | # Checks | ||
1421 | 1380 | conf_ssl = 'ssl' in conf_contents | ||
1422 | 1381 | conf_port = str(port) in conf_contents | ||
1423 | 1382 | |||
1424 | 1383 | # Port explicitly checked in config | ||
1425 | 1384 | if port and conf_port and conf_ssl: | ||
1426 | 1385 | self.log.debug('SSL is enabled @{}:{} ' | ||
1427 | 1386 | '({})'.format(host, port, unit_name)) | ||
1428 | 1387 | return True | ||
1429 | 1388 | elif port and not conf_port and conf_ssl: | ||
1430 | 1389 | self.log.debug('SSL is enabled @{} but not on port {} ' | ||
1431 | 1390 | '({})'.format(host, port, unit_name)) | ||
1432 | 1391 | return False | ||
1433 | 1392 | # Port not checked (useful when checking that ssl is disabled) | ||
1434 | 1393 | elif not port and conf_ssl: | ||
1435 | 1394 | self.log.debug('SSL is enabled @{}:{} ' | ||
1436 | 1395 | '({})'.format(host, port, unit_name)) | ||
1437 | 1396 | return True | ||
1438 | 1397 | elif not conf_ssl: | ||
1439 | 1398 | self.log.debug('SSL not enabled @{}:{} ' | ||
1440 | 1399 | '({})'.format(host, port, unit_name)) | ||
1441 | 1400 | return False | ||
1442 | 1401 | else: | ||
1443 | 1402 | msg = ('Unknown condition when checking SSL status @{}:{} ' | ||
1444 | 1403 | '({})'.format(host, port, unit_name)) | ||
1445 | 1404 | amulet.raise_status(amulet.FAIL, msg) | ||
1446 | 1405 | |||
1447 | 1406 | def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): | ||
1448 | 1407 | """Check that ssl is enabled on rmq juju sentry units. | ||
1449 | 1408 | |||
1450 | 1409 | :param sentry_units: list of all rmq sentry units | ||
1451 | 1410 | :param port: optional ssl port override to validate | ||
1452 | 1411 | :returns: None if successful, otherwise return error message | ||
1453 | 1412 | """ | ||
1454 | 1413 | for sentry_unit in sentry_units: | ||
1455 | 1414 | if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): | ||
1456 | 1415 | return ('Unexpected condition: ssl is disabled on unit ' | ||
1457 | 1416 | '({})'.format(sentry_unit.info['unit_name'])) | ||
1458 | 1417 | return None | ||
1459 | 1418 | |||
1460 | 1419 | def validate_rmq_ssl_disabled_units(self, sentry_units): | ||
1461 | 1420 | """Check that ssl is enabled on listed rmq juju sentry units. | ||
1462 | 1421 | |||
1463 | 1422 | :param sentry_units: list of all rmq sentry units | ||
1464 | 1423 | :returns: True if successful. Raise on error. | ||
1465 | 1424 | """ | ||
1466 | 1425 | for sentry_unit in sentry_units: | ||
1467 | 1426 | if self.rmq_ssl_is_enabled_on_unit(sentry_unit): | ||
1468 | 1427 | return ('Unexpected condition: ssl is enabled on unit ' | ||
1469 | 1428 | '({})'.format(sentry_unit.info['unit_name'])) | ||
1470 | 1429 | return None | ||
1471 | 1430 | |||
1472 | 1431 | def configure_rmq_ssl_on(self, sentry_units, deployment, | ||
1473 | 1432 | port=None, max_wait=60): | ||
1474 | 1433 | """Turn ssl charm config option on, with optional non-default | ||
1475 | 1434 | ssl port specification. Confirm that it is enabled on every | ||
1476 | 1435 | unit. | ||
1477 | 1436 | |||
1478 | 1437 | :param sentry_units: list of sentry units | ||
1479 | 1438 | :param deployment: amulet deployment object pointer | ||
1480 | 1439 | :param port: amqp port, use defaults if None | ||
1481 | 1440 | :param max_wait: maximum time to wait in seconds to confirm | ||
1482 | 1441 | :returns: None if successful. Raise on error. | ||
1483 | 1442 | """ | ||
1484 | 1443 | self.log.debug('Setting ssl charm config option: on') | ||
1485 | 1444 | |||
1486 | 1445 | # Enable RMQ SSL | ||
1487 | 1446 | config = {'ssl': 'on'} | ||
1488 | 1447 | if port: | ||
1489 | 1448 | config['ssl_port'] = port | ||
1490 | 1449 | |||
1491 | 1450 | deployment.d.configure('rabbitmq-server', config) | ||
1492 | 1451 | |||
1493 | 1452 | # Wait for unit status | ||
1494 | 1453 | self.rmq_wait_for_cluster(deployment) | ||
1495 | 1454 | |||
1496 | 1455 | # Confirm | ||
1497 | 1456 | tries = 0 | ||
1498 | 1457 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1499 | 1458 | while ret and tries < (max_wait / 4): | ||
1500 | 1459 | time.sleep(4) | ||
1501 | 1460 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1502 | 1461 | ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) | ||
1503 | 1462 | tries += 1 | ||
1504 | 1463 | |||
1505 | 1464 | if ret: | ||
1506 | 1465 | amulet.raise_status(amulet.FAIL, ret) | ||
1507 | 1466 | |||
1508 | 1467 | def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): | ||
1509 | 1468 | """Turn ssl charm config option off, confirm that it is disabled | ||
1510 | 1469 | on every unit. | ||
1511 | 1470 | |||
1512 | 1471 | :param sentry_units: list of sentry units | ||
1513 | 1472 | :param deployment: amulet deployment object pointer | ||
1514 | 1473 | :param max_wait: maximum time to wait in seconds to confirm | ||
1515 | 1474 | :returns: None if successful. Raise on error. | ||
1516 | 1475 | """ | ||
1517 | 1476 | self.log.debug('Setting ssl charm config option: off') | ||
1518 | 1477 | |||
1519 | 1478 | # Disable RMQ SSL | ||
1520 | 1479 | config = {'ssl': 'off'} | ||
1521 | 1480 | deployment.d.configure('rabbitmq-server', config) | ||
1522 | 1481 | |||
1523 | 1482 | # Wait for unit status | ||
1524 | 1483 | self.rmq_wait_for_cluster(deployment) | ||
1525 | 1484 | |||
1526 | 1485 | # Confirm | ||
1527 | 1486 | tries = 0 | ||
1528 | 1487 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1529 | 1488 | while ret and tries < (max_wait / 4): | ||
1530 | 1489 | time.sleep(4) | ||
1531 | 1490 | self.log.debug('Attempt {}: {}'.format(tries, ret)) | ||
1532 | 1491 | ret = self.validate_rmq_ssl_disabled_units(sentry_units) | ||
1533 | 1492 | tries += 1 | ||
1534 | 1493 | |||
1535 | 1494 | if ret: | ||
1536 | 1495 | amulet.raise_status(amulet.FAIL, ret) | ||
1537 | 1496 | |||
1538 | 1497 | def connect_amqp_by_unit(self, sentry_unit, ssl=False, | ||
1539 | 1498 | port=None, fatal=True, | ||
1540 | 1499 | username="testuser1", password="changeme"): | ||
1541 | 1500 | """Establish and return a pika amqp connection to the rabbitmq service | ||
1542 | 1501 | running on a rmq juju unit. | ||
1543 | 1502 | |||
1544 | 1503 | :param sentry_unit: sentry unit pointer | ||
1545 | 1504 | :param ssl: boolean, default to False | ||
1546 | 1505 | :param port: amqp port, use defaults if None | ||
1547 | 1506 | :param fatal: boolean, default to True (raises on connect error) | ||
1548 | 1507 | :param username: amqp user name, default to testuser1 | ||
1549 | 1508 | :param password: amqp user password | ||
1550 | 1509 | :returns: pika amqp connection pointer or None if failed and non-fatal | ||
1551 | 1510 | """ | ||
1552 | 1511 | host = sentry_unit.info['public-address'] | ||
1553 | 1512 | unit_name = sentry_unit.info['unit_name'] | ||
1554 | 1513 | |||
1555 | 1514 | # Default port logic if port is not specified | ||
1556 | 1515 | if ssl and not port: | ||
1557 | 1516 | port = 5671 | ||
1558 | 1517 | elif not ssl and not port: | ||
1559 | 1518 | port = 5672 | ||
1560 | 1519 | |||
1561 | 1520 | self.log.debug('Connecting to amqp on {}:{} ({}) as ' | ||
1562 | 1521 | '{}...'.format(host, port, unit_name, username)) | ||
1563 | 1522 | |||
1564 | 1523 | try: | ||
1565 | 1524 | credentials = pika.PlainCredentials(username, password) | ||
1566 | 1525 | parameters = pika.ConnectionParameters(host=host, port=port, | ||
1567 | 1526 | credentials=credentials, | ||
1568 | 1527 | ssl=ssl, | ||
1569 | 1528 | connection_attempts=3, | ||
1570 | 1529 | retry_delay=5, | ||
1571 | 1530 | socket_timeout=1) | ||
1572 | 1531 | connection = pika.BlockingConnection(parameters) | ||
1573 | 1532 | assert connection.server_properties['product'] == 'RabbitMQ' | ||
1574 | 1533 | self.log.debug('Connect OK') | ||
1575 | 1534 | return connection | ||
1576 | 1535 | except Exception as e: | ||
1577 | 1536 | msg = ('amqp connection failed to {}:{} as ' | ||
1578 | 1537 | '{} ({})'.format(host, port, username, str(e))) | ||
1579 | 1538 | if fatal: | ||
1580 | 1539 | amulet.raise_status(amulet.FAIL, msg) | ||
1581 | 1540 | else: | ||
1582 | 1541 | self.log.warn(msg) | ||
1583 | 1542 | return None | ||
1584 | 1543 | |||
1585 | 1544 | def publish_amqp_message_by_unit(self, sentry_unit, message, | ||
1586 | 1545 | queue="test", ssl=False, | ||
1587 | 1546 | username="testuser1", | ||
1588 | 1547 | password="changeme", | ||
1589 | 1548 | port=None): | ||
1590 | 1549 | """Publish an amqp message to a rmq juju unit. | ||
1591 | 1550 | |||
1592 | 1551 | :param sentry_unit: sentry unit pointer | ||
1593 | 1552 | :param message: amqp message string | ||
1594 | 1553 | :param queue: message queue, default to test | ||
1595 | 1554 | :param username: amqp user name, default to testuser1 | ||
1596 | 1555 | :param password: amqp user password | ||
1597 | 1556 | :param ssl: boolean, default to False | ||
1598 | 1557 | :param port: amqp port, use defaults if None | ||
1599 | 1558 | :returns: None. Raises exception if publish failed. | ||
1600 | 1559 | """ | ||
1601 | 1560 | self.log.debug('Publishing message to {} queue:\n{}'.format(queue, | ||
1602 | 1561 | message)) | ||
1603 | 1562 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1604 | 1563 | port=port, | ||
1605 | 1564 | username=username, | ||
1606 | 1565 | password=password) | ||
1607 | 1566 | |||
1608 | 1567 | # NOTE(beisner): extra debug here re: pika hang potential: | ||
1609 | 1568 | # https://github.com/pika/pika/issues/297 | ||
1610 | 1569 | # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw | ||
1611 | 1570 | self.log.debug('Defining channel...') | ||
1612 | 1571 | channel = connection.channel() | ||
1613 | 1572 | self.log.debug('Declaring queue...') | ||
1614 | 1573 | channel.queue_declare(queue=queue, auto_delete=False, durable=True) | ||
1615 | 1574 | self.log.debug('Publishing message...') | ||
1616 | 1575 | channel.basic_publish(exchange='', routing_key=queue, body=message) | ||
1617 | 1576 | self.log.debug('Closing channel...') | ||
1618 | 1577 | channel.close() | ||
1619 | 1578 | self.log.debug('Closing connection...') | ||
1620 | 1579 | connection.close() | ||
1621 | 1580 | |||
1622 | 1581 | def get_amqp_message_by_unit(self, sentry_unit, queue="test", | ||
1623 | 1582 | username="testuser1", | ||
1624 | 1583 | password="changeme", | ||
1625 | 1584 | ssl=False, port=None): | ||
1626 | 1585 | """Get an amqp message from a rmq juju unit. | ||
1627 | 1586 | |||
1628 | 1587 | :param sentry_unit: sentry unit pointer | ||
1629 | 1588 | :param queue: message queue, default to test | ||
1630 | 1589 | :param username: amqp user name, default to testuser1 | ||
1631 | 1590 | :param password: amqp user password | ||
1632 | 1591 | :param ssl: boolean, default to False | ||
1633 | 1592 | :param port: amqp port, use defaults if None | ||
1634 | 1593 | :returns: amqp message body as string. Raise if get fails. | ||
1635 | 1594 | """ | ||
1636 | 1595 | connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, | ||
1637 | 1596 | port=port, | ||
1638 | 1597 | username=username, | ||
1639 | 1598 | password=password) | ||
1640 | 1599 | channel = connection.channel() | ||
1641 | 1600 | method_frame, _, body = channel.basic_get(queue) | ||
1642 | 1601 | |||
1643 | 1602 | if method_frame: | ||
1644 | 1603 | self.log.debug('Retreived message from {} queue:\n{}'.format(queue, | ||
1645 | 1604 | body)) | ||
1646 | 1605 | channel.basic_ack(method_frame.delivery_tag) | ||
1647 | 1606 | channel.close() | ||
1648 | 1607 | connection.close() | ||
1649 | 1608 | return body | ||
1650 | 1609 | else: | ||
1651 | 1610 | msg = 'No message retrieved.' | ||
1652 | 1611 | amulet.raise_status(amulet.FAIL, msg) | ||
1653 | 1612 | >>>>>>> MERGE-SOURCE | ||
1654 | 964 | 1613 | ||
1655 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
1656 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-10-22 13:19:13 +0000 | |||
1657 | +++ hooks/charmhelpers/contrib/openstack/context.py 2016-01-06 21:19:13 +0000 | |||
1658 | @@ -14,6 +14,7 @@ | |||
1659 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
1660 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1661 | 16 | 16 | ||
1662 | 17 | import glob | ||
1663 | 17 | import json | 18 | import json |
1664 | 18 | import os | 19 | import os |
1665 | 19 | import re | 20 | import re |
1666 | @@ -625,6 +626,12 @@ | |||
1667 | 625 | if config('haproxy-client-timeout'): | 626 | if config('haproxy-client-timeout'): |
1668 | 626 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | 627 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
1669 | 627 | 628 | ||
1670 | 629 | if config('haproxy-queue-timeout'): | ||
1671 | 630 | ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') | ||
1672 | 631 | |||
1673 | 632 | if config('haproxy-connect-timeout'): | ||
1674 | 633 | ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') | ||
1675 | 634 | |||
1676 | 628 | if config('prefer-ipv6'): | 635 | if config('prefer-ipv6'): |
1677 | 629 | ctxt['ipv6'] = True | 636 | ctxt['ipv6'] = True |
1678 | 630 | ctxt['local_host'] = 'ip6-localhost' | 637 | ctxt['local_host'] = 'ip6-localhost' |
1679 | @@ -939,18 +946,46 @@ | |||
1680 | 939 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} | 946 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
1681 | 940 | return ctxt | 947 | return ctxt |
1682 | 941 | 948 | ||
1695 | 942 | def pg_ctxt(self): | 949 | <<<<<<< TREE |
1696 | 943 | driver = neutron_plugin_attribute(self.plugin, 'driver', | 950 | def pg_ctxt(self): |
1697 | 944 | self.network_manager) | 951 | driver = neutron_plugin_attribute(self.plugin, 'driver', |
1698 | 945 | config = neutron_plugin_attribute(self.plugin, 'config', | 952 | self.network_manager) |
1699 | 946 | self.network_manager) | 953 | config = neutron_plugin_attribute(self.plugin, 'config', |
1700 | 947 | ovs_ctxt = {'core_plugin': driver, | 954 | self.network_manager) |
1701 | 948 | 'neutron_plugin': 'plumgrid', | 955 | ovs_ctxt = {'core_plugin': driver, |
1702 | 949 | 'neutron_security_groups': self.neutron_security_groups, | 956 | 'neutron_plugin': 'plumgrid', |
1703 | 950 | 'local_ip': unit_private_ip(), | 957 | 'neutron_security_groups': self.neutron_security_groups, |
1704 | 951 | 'config': config} | 958 | 'local_ip': unit_private_ip(), |
1705 | 952 | return ovs_ctxt | 959 | 'config': config} |
1706 | 953 | 960 | return ovs_ctxt | |
1707 | 961 | |||
1708 | 962 | ======= | ||
1709 | 963 | def pg_ctxt(self): | ||
1710 | 964 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1711 | 965 | self.network_manager) | ||
1712 | 966 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1713 | 967 | self.network_manager) | ||
1714 | 968 | ovs_ctxt = {'core_plugin': driver, | ||
1715 | 969 | 'neutron_plugin': 'plumgrid', | ||
1716 | 970 | 'neutron_security_groups': self.neutron_security_groups, | ||
1717 | 971 | 'local_ip': unit_private_ip(), | ||
1718 | 972 | 'config': config} | ||
1719 | 973 | return ovs_ctxt | ||
1720 | 974 | |||
1721 | 975 | def midonet_ctxt(self): | ||
1722 | 976 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1723 | 977 | self.network_manager) | ||
1724 | 978 | midonet_config = neutron_plugin_attribute(self.plugin, 'config', | ||
1725 | 979 | self.network_manager) | ||
1726 | 980 | mido_ctxt = {'core_plugin': driver, | ||
1727 | 981 | 'neutron_plugin': 'midonet', | ||
1728 | 982 | 'neutron_security_groups': self.neutron_security_groups, | ||
1729 | 983 | 'local_ip': unit_private_ip(), | ||
1730 | 984 | 'config': midonet_config} | ||
1731 | 985 | |||
1732 | 986 | return mido_ctxt | ||
1733 | 987 | |||
1734 | 988 | >>>>>>> MERGE-SOURCE | ||
1735 | 954 | def __call__(self): | 989 | def __call__(self): |
1736 | 955 | if self.network_manager not in ['quantum', 'neutron']: | 990 | if self.network_manager not in ['quantum', 'neutron']: |
1737 | 956 | return {} | 991 | return {} |
1738 | @@ -970,8 +1005,15 @@ | |||
1739 | 970 | ctxt.update(self.calico_ctxt()) | 1005 | ctxt.update(self.calico_ctxt()) |
1740 | 971 | elif self.plugin == 'vsp': | 1006 | elif self.plugin == 'vsp': |
1741 | 972 | ctxt.update(self.nuage_ctxt()) | 1007 | ctxt.update(self.nuage_ctxt()) |
1744 | 973 | elif self.plugin == 'plumgrid': | 1008 | <<<<<<< TREE |
1745 | 974 | ctxt.update(self.pg_ctxt()) | 1009 | elif self.plugin == 'plumgrid': |
1746 | 1010 | ctxt.update(self.pg_ctxt()) | ||
1747 | 1011 | ======= | ||
1748 | 1012 | elif self.plugin == 'plumgrid': | ||
1749 | 1013 | ctxt.update(self.pg_ctxt()) | ||
1750 | 1014 | elif self.plugin == 'midonet': | ||
1751 | 1015 | ctxt.update(self.midonet_ctxt()) | ||
1752 | 1016 | >>>>>>> MERGE-SOURCE | ||
1753 | 975 | 1017 | ||
1754 | 976 | alchemy_flags = config('neutron-alchemy-flags') | 1018 | alchemy_flags = config('neutron-alchemy-flags') |
1755 | 977 | if alchemy_flags: | 1019 | if alchemy_flags: |
1756 | @@ -1072,6 +1114,20 @@ | |||
1757 | 1072 | config_flags_parser(config_flags)} | 1114 | config_flags_parser(config_flags)} |
1758 | 1073 | 1115 | ||
1759 | 1074 | 1116 | ||
1760 | 1117 | class LibvirtConfigFlagsContext(OSContextGenerator): | ||
1761 | 1118 | """ | ||
1762 | 1119 | This context provides support for extending | ||
1763 | 1120 | the libvirt section through user-defined flags. | ||
1764 | 1121 | """ | ||
1765 | 1122 | def __call__(self): | ||
1766 | 1123 | ctxt = {} | ||
1767 | 1124 | libvirt_flags = config('libvirt-flags') | ||
1768 | 1125 | if libvirt_flags: | ||
1769 | 1126 | ctxt['libvirt_flags'] = config_flags_parser( | ||
1770 | 1127 | libvirt_flags) | ||
1771 | 1128 | return ctxt | ||
1772 | 1129 | |||
1773 | 1130 | |||
1774 | 1075 | class SubordinateConfigContext(OSContextGenerator): | 1131 | class SubordinateConfigContext(OSContextGenerator): |
1775 | 1076 | 1132 | ||
1776 | 1077 | """ | 1133 | """ |
1777 | @@ -1104,7 +1160,7 @@ | |||
1778 | 1104 | 1160 | ||
1779 | 1105 | ctxt = { | 1161 | ctxt = { |
1780 | 1106 | ... other context ... | 1162 | ... other context ... |
1782 | 1107 | 'subordinate_config': { | 1163 | 'subordinate_configuration': { |
1783 | 1108 | 'DEFAULT': { | 1164 | 'DEFAULT': { |
1784 | 1109 | 'key1': 'value1', | 1165 | 'key1': 'value1', |
1785 | 1110 | }, | 1166 | }, |
1786 | @@ -1145,6 +1201,7 @@ | |||
1787 | 1145 | try: | 1201 | try: |
1788 | 1146 | sub_config = json.loads(sub_config) | 1202 | sub_config = json.loads(sub_config) |
1789 | 1147 | except: | 1203 | except: |
1790 | 1204 | <<<<<<< TREE | ||
1791 | 1148 | log('Could not parse JSON from subordinate_config ' | 1205 | log('Could not parse JSON from subordinate_config ' |
1792 | 1149 | 'setting from %s' % rid, level=ERROR) | 1206 | 'setting from %s' % rid, level=ERROR) |
1793 | 1150 | continue | 1207 | continue |
1794 | @@ -1175,6 +1232,39 @@ | |||
1795 | 1175 | ctxt[k][section] = config_list | 1232 | ctxt[k][section] = config_list |
1796 | 1176 | else: | 1233 | else: |
1797 | 1177 | ctxt[k] = v | 1234 | ctxt[k] = v |
1798 | 1235 | ======= | ||
1799 | 1236 | log('Could not parse JSON from ' | ||
1800 | 1237 | 'subordinate_configuration setting from %s' | ||
1801 | 1238 | % rid, level=ERROR) | ||
1802 | 1239 | continue | ||
1803 | 1240 | |||
1804 | 1241 | for service in self.services: | ||
1805 | 1242 | if service not in sub_config: | ||
1806 | 1243 | log('Found subordinate_configuration on %s but it ' | ||
1807 | 1244 | 'contained nothing for %s service' | ||
1808 | 1245 | % (rid, service), level=INFO) | ||
1809 | 1246 | continue | ||
1810 | 1247 | |||
1811 | 1248 | sub_config = sub_config[service] | ||
1812 | 1249 | if self.config_file not in sub_config: | ||
1813 | 1250 | log('Found subordinate_configuration on %s but it ' | ||
1814 | 1251 | 'contained nothing for %s' | ||
1815 | 1252 | % (rid, self.config_file), level=INFO) | ||
1816 | 1253 | continue | ||
1817 | 1254 | |||
1818 | 1255 | sub_config = sub_config[self.config_file] | ||
1819 | 1256 | for k, v in six.iteritems(sub_config): | ||
1820 | 1257 | if k == 'sections': | ||
1821 | 1258 | for section, config_list in six.iteritems(v): | ||
1822 | 1259 | log("adding section '%s'" % (section), | ||
1823 | 1260 | level=DEBUG) | ||
1824 | 1261 | if ctxt[k].get(section): | ||
1825 | 1262 | ctxt[k][section].extend(config_list) | ||
1826 | 1263 | else: | ||
1827 | 1264 | ctxt[k][section] = config_list | ||
1828 | 1265 | else: | ||
1829 | 1266 | ctxt[k] = v | ||
1830 | 1267 | >>>>>>> MERGE-SOURCE | ||
1831 | 1178 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) | 1268 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
1832 | 1179 | return ctxt | 1269 | return ctxt |
1833 | 1180 | 1270 | ||
1834 | @@ -1363,7 +1453,11 @@ | |||
1835 | 1363 | normalized.update({port: port for port in resolved | 1453 | normalized.update({port: port for port in resolved |
1836 | 1364 | if port in ports}) | 1454 | if port in ports}) |
1837 | 1365 | if resolved: | 1455 | if resolved: |
1838 | 1456 | <<<<<<< TREE | ||
1839 | 1366 | return {bridge: normalized[port] for port, bridge in | 1457 | return {bridge: normalized[port] for port, bridge in |
1840 | 1458 | ======= | ||
1841 | 1459 | return {normalized[port]: bridge for port, bridge in | ||
1842 | 1460 | >>>>>>> MERGE-SOURCE | ||
1843 | 1367 | six.iteritems(portmap) if port in normalized.keys()} | 1461 | six.iteritems(portmap) if port in normalized.keys()} |
1844 | 1368 | 1462 | ||
1845 | 1369 | return None | 1463 | return None |
1846 | @@ -1374,12 +1468,22 @@ | |||
1847 | 1374 | def __call__(self): | 1468 | def __call__(self): |
1848 | 1375 | ctxt = {} | 1469 | ctxt = {} |
1849 | 1376 | mappings = super(PhyNICMTUContext, self).__call__() | 1470 | mappings = super(PhyNICMTUContext, self).__call__() |
1852 | 1377 | if mappings and mappings.values(): | 1471 | if mappings and mappings.keys(): |
1853 | 1378 | ports = mappings.values() | 1472 | ports = sorted(mappings.keys()) |
1854 | 1379 | napi_settings = NeutronAPIContext()() | 1473 | napi_settings = NeutronAPIContext()() |
1855 | 1380 | mtu = napi_settings.get('network_device_mtu') | 1474 | mtu = napi_settings.get('network_device_mtu') |
1856 | 1475 | all_ports = set() | ||
1857 | 1476 | # If any of ports is a vlan device, its underlying device must have | ||
1858 | 1477 | # mtu applied first. | ||
1859 | 1478 | for port in ports: | ||
1860 | 1479 | for lport in glob.glob("/sys/class/net/%s/lower_*" % port): | ||
1861 | 1480 | lport = os.path.basename(lport) | ||
1862 | 1481 | all_ports.add(lport.split('_')[1]) | ||
1863 | 1482 | |||
1864 | 1483 | all_ports = list(all_ports) | ||
1865 | 1484 | all_ports.extend(ports) | ||
1866 | 1381 | if mtu: | 1485 | if mtu: |
1868 | 1382 | ctxt["devs"] = '\\n'.join(ports) | 1486 | ctxt["devs"] = '\\n'.join(all_ports) |
1869 | 1383 | ctxt['mtu'] = mtu | 1487 | ctxt['mtu'] = mtu |
1870 | 1384 | 1488 | ||
1871 | 1385 | return ctxt | 1489 | return ctxt |
1872 | 1386 | 1490 | ||
1873 | === modified file 'hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh' | |||
1874 | --- hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2015-02-19 03:38:40 +0000 | |||
1875 | +++ hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh 2016-01-06 21:19:13 +0000 | |||
1876 | @@ -9,15 +9,17 @@ | |||
1877 | 9 | CRITICAL=0 | 9 | CRITICAL=0 |
1878 | 10 | NOTACTIVE='' | 10 | NOTACTIVE='' |
1879 | 11 | LOGFILE=/var/log/nagios/check_haproxy.log | 11 | LOGFILE=/var/log/nagios/check_haproxy.log |
1881 | 12 | AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') | 12 | AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}') |
1882 | 13 | 13 | ||
1884 | 14 | for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); | 14 | typeset -i N_INSTANCES=0 |
1885 | 15 | for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg) | ||
1886 | 15 | do | 16 | do |
1888 | 16 | output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') | 17 | N_INSTANCES=N_INSTANCES+1 |
1889 | 18 | output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK') | ||
1890 | 17 | if [ $? != 0 ]; then | 19 | if [ $? != 0 ]; then |
1891 | 18 | date >> $LOGFILE | 20 | date >> $LOGFILE |
1892 | 19 | echo $output >> $LOGFILE | 21 | echo $output >> $LOGFILE |
1894 | 20 | /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 | 22 | /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1 |
1895 | 21 | CRITICAL=1 | 23 | CRITICAL=1 |
1896 | 22 | NOTACTIVE="${NOTACTIVE} $appserver" | 24 | NOTACTIVE="${NOTACTIVE} $appserver" |
1897 | 23 | fi | 25 | fi |
1898 | @@ -28,5 +30,5 @@ | |||
1899 | 28 | exit 2 | 30 | exit 2 |
1900 | 29 | fi | 31 | fi |
1901 | 30 | 32 | ||
1903 | 31 | echo "OK: All haproxy instances looking good" | 33 | echo "OK: All haproxy instances ($N_INSTANCES) looking good" |
1904 | 32 | exit 0 | 34 | exit 0 |
1905 | 33 | 35 | ||
1906 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
1907 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-10-22 13:19:13 +0000 | |||
1908 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2016-01-06 21:19:13 +0000 | |||
1909 | @@ -195,6 +195,7 @@ | |||
1910 | 195 | 'packages': [], | 195 | 'packages': [], |
1911 | 196 | 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], | 196 | 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], |
1912 | 197 | 'server_services': ['neutron-server'] | 197 | 'server_services': ['neutron-server'] |
1913 | 198 | <<<<<<< TREE | ||
1914 | 198 | }, | 199 | }, |
1915 | 199 | 'plumgrid': { | 200 | 'plumgrid': { |
1916 | 200 | 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', | 201 | 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', |
1917 | @@ -209,6 +210,36 @@ | |||
1918 | 209 | 'server_packages': ['neutron-server', | 210 | 'server_packages': ['neutron-server', |
1919 | 210 | 'neutron-plugin-plumgrid'], | 211 | 'neutron-plugin-plumgrid'], |
1920 | 211 | 'server_services': ['neutron-server'] | 212 | 'server_services': ['neutron-server'] |
1921 | 213 | ======= | ||
1922 | 214 | }, | ||
1923 | 215 | 'plumgrid': { | ||
1924 | 216 | 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', | ||
1925 | 217 | 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', | ||
1926 | 218 | 'contexts': [ | ||
1927 | 219 | context.SharedDBContext(user=config('database-user'), | ||
1928 | 220 | database=config('database'), | ||
1929 | 221 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1930 | 222 | 'services': [], | ||
1931 | 223 | 'packages': ['plumgrid-lxc', | ||
1932 | 224 | 'iovisor-dkms'], | ||
1933 | 225 | 'server_packages': ['neutron-server', | ||
1934 | 226 | 'neutron-plugin-plumgrid'], | ||
1935 | 227 | 'server_services': ['neutron-server'] | ||
1936 | 228 | }, | ||
1937 | 229 | 'midonet': { | ||
1938 | 230 | 'config': '/etc/neutron/plugins/midonet/midonet.ini', | ||
1939 | 231 | 'driver': 'midonet.neutron.plugin.MidonetPluginV2', | ||
1940 | 232 | 'contexts': [ | ||
1941 | 233 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1942 | 234 | database=config('neutron-database'), | ||
1943 | 235 | relation_prefix='neutron', | ||
1944 | 236 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1945 | 237 | 'services': [], | ||
1946 | 238 | 'packages': [[headers_package()] + determine_dkms_package()], | ||
1947 | 239 | 'server_packages': ['neutron-server', | ||
1948 | 240 | 'python-neutron-plugin-midonet'], | ||
1949 | 241 | 'server_services': ['neutron-server'] | ||
1950 | 242 | >>>>>>> MERGE-SOURCE | ||
1951 | 212 | } | 243 | } |
1952 | 213 | } | 244 | } |
1953 | 214 | if release >= 'icehouse': | 245 | if release >= 'icehouse': |
1954 | @@ -310,10 +341,19 @@ | |||
1955 | 310 | def parse_data_port_mappings(mappings, default_bridge='br-data'): | 341 | def parse_data_port_mappings(mappings, default_bridge='br-data'): |
1956 | 311 | """Parse data port mappings. | 342 | """Parse data port mappings. |
1957 | 312 | 343 | ||
1958 | 344 | <<<<<<< TREE | ||
1959 | 313 | Mappings must be a space-delimited list of port:bridge mappings. | 345 | Mappings must be a space-delimited list of port:bridge mappings. |
1960 | 346 | ======= | ||
1961 | 347 | Mappings must be a space-delimited list of bridge:port. | ||
1962 | 348 | >>>>>>> MERGE-SOURCE | ||
1963 | 314 | 349 | ||
1964 | 350 | <<<<<<< TREE | ||
1965 | 315 | Returns dict of the form {port:bridge} where port may be an mac address or | 351 | Returns dict of the form {port:bridge} where port may be an mac address or |
1966 | 316 | interface name. | 352 | interface name. |
1967 | 353 | ======= | ||
1968 | 354 | Returns dict of the form {port:bridge} where ports may be mac addresses or | ||
1969 | 355 | interface names. | ||
1970 | 356 | >>>>>>> MERGE-SOURCE | ||
1971 | 317 | """ | 357 | """ |
1972 | 318 | 358 | ||
1973 | 319 | # NOTE(dosaboy): we use rvalue for key to allow multiple values to be | 359 | # NOTE(dosaboy): we use rvalue for key to allow multiple values to be |
1974 | 320 | 360 | ||
1975 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' | |||
1976 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-08-10 16:34:04 +0000 | |||
1977 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2016-01-06 21:19:13 +0000 | |||
1978 | @@ -13,3 +13,9 @@ | |||
1979 | 13 | err to syslog = {{ use_syslog }} | 13 | err to syslog = {{ use_syslog }} |
1980 | 14 | clog to syslog = {{ use_syslog }} | 14 | clog to syslog = {{ use_syslog }} |
1981 | 15 | 15 | ||
1982 | 16 | [client] | ||
1983 | 17 | {% if rbd_client_cache_settings -%} | ||
1984 | 18 | {% for key, value in rbd_client_cache_settings.iteritems() -%} | ||
1985 | 19 | {{ key }} = {{ value }} | ||
1986 | 20 | {% endfor -%} | ||
1987 | 21 | {%- endif %} | ||
1988 | 16 | \ No newline at end of file | 22 | \ No newline at end of file |
1989 | 17 | 23 | ||
1990 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
1991 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-01-13 14:36:44 +0000 | |||
1992 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2016-01-06 21:19:13 +0000 | |||
1993 | @@ -12,19 +12,26 @@ | |||
1994 | 12 | option tcplog | 12 | option tcplog |
1995 | 13 | option dontlognull | 13 | option dontlognull |
1996 | 14 | retries 3 | 14 | retries 3 |
2000 | 15 | timeout queue 1000 | 15 | {%- if haproxy_queue_timeout %} |
2001 | 16 | timeout connect 1000 | 16 | timeout queue {{ haproxy_queue_timeout }} |
2002 | 17 | {% if haproxy_client_timeout -%} | 17 | {%- else %} |
2003 | 18 | timeout queue 5000 | ||
2004 | 19 | {%- endif %} | ||
2005 | 20 | {%- if haproxy_connect_timeout %} | ||
2006 | 21 | timeout connect {{ haproxy_connect_timeout }} | ||
2007 | 22 | {%- else %} | ||
2008 | 23 | timeout connect 5000 | ||
2009 | 24 | {%- endif %} | ||
2010 | 25 | {%- if haproxy_client_timeout %} | ||
2011 | 18 | timeout client {{ haproxy_client_timeout }} | 26 | timeout client {{ haproxy_client_timeout }} |
2013 | 19 | {% else -%} | 27 | {%- else %} |
2014 | 20 | timeout client 30000 | 28 | timeout client 30000 |
2018 | 21 | {% endif -%} | 29 | {%- endif %} |
2019 | 22 | 30 | {%- if haproxy_server_timeout %} | |
2017 | 23 | {% if haproxy_server_timeout -%} | ||
2020 | 24 | timeout server {{ haproxy_server_timeout }} | 31 | timeout server {{ haproxy_server_timeout }} |
2022 | 25 | {% else -%} | 32 | {%- else %} |
2023 | 26 | timeout server 30000 | 33 | timeout server 30000 |
2025 | 27 | {% endif -%} | 34 | {%- endif %} |
2026 | 28 | 35 | ||
2027 | 29 | listen stats {{ stat_port }} | 36 | listen stats {{ stat_port }} |
2028 | 30 | mode http | 37 | mode http |
2029 | 31 | 38 | ||
2030 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
2031 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-10-22 13:19:13 +0000 | |||
2032 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2016-01-06 21:19:13 +0000 | |||
2033 | @@ -25,7 +25,12 @@ | |||
2034 | 25 | import re | 25 | import re |
2035 | 26 | 26 | ||
2036 | 27 | import six | 27 | import six |
2038 | 28 | import traceback | 28 | <<<<<<< TREE |
2039 | 29 | import traceback | ||
2040 | 30 | ======= | ||
2041 | 31 | import traceback | ||
2042 | 32 | import uuid | ||
2043 | 33 | >>>>>>> MERGE-SOURCE | ||
2044 | 29 | import yaml | 34 | import yaml |
2045 | 30 | 35 | ||
2046 | 31 | from charmhelpers.contrib.network import ip | 36 | from charmhelpers.contrib.network import ip |
2047 | @@ -41,6 +46,7 @@ | |||
2048 | 41 | log as juju_log, | 46 | log as juju_log, |
2049 | 42 | charm_dir, | 47 | charm_dir, |
2050 | 43 | INFO, | 48 | INFO, |
2051 | 49 | related_units, | ||
2052 | 44 | relation_ids, | 50 | relation_ids, |
2053 | 45 | relation_set, | 51 | relation_set, |
2054 | 46 | status_set, | 52 | status_set, |
2055 | @@ -83,7 +89,12 @@ | |||
2056 | 83 | ('trusty', 'icehouse'), | 89 | ('trusty', 'icehouse'), |
2057 | 84 | ('utopic', 'juno'), | 90 | ('utopic', 'juno'), |
2058 | 85 | ('vivid', 'kilo'), | 91 | ('vivid', 'kilo'), |
2060 | 86 | ('wily', 'liberty'), | 92 | <<<<<<< TREE |
2061 | 93 | ('wily', 'liberty'), | ||
2062 | 94 | ======= | ||
2063 | 95 | ('wily', 'liberty'), | ||
2064 | 96 | ('xenial', 'mitaka'), | ||
2065 | 97 | >>>>>>> MERGE-SOURCE | ||
2066 | 87 | ]) | 98 | ]) |
2067 | 88 | 99 | ||
2068 | 89 | 100 | ||
2069 | @@ -96,7 +107,12 @@ | |||
2070 | 96 | ('2014.1', 'icehouse'), | 107 | ('2014.1', 'icehouse'), |
2071 | 97 | ('2014.2', 'juno'), | 108 | ('2014.2', 'juno'), |
2072 | 98 | ('2015.1', 'kilo'), | 109 | ('2015.1', 'kilo'), |
2074 | 99 | ('2015.2', 'liberty'), | 110 | <<<<<<< TREE |
2075 | 111 | ('2015.2', 'liberty'), | ||
2076 | 112 | ======= | ||
2077 | 113 | ('2015.2', 'liberty'), | ||
2078 | 114 | ('2016.1', 'mitaka'), | ||
2079 | 115 | >>>>>>> MERGE-SOURCE | ||
2080 | 100 | ]) | 116 | ]) |
2081 | 101 | 117 | ||
2082 | 102 | # The ugly duckling | 118 | # The ugly duckling |
2083 | @@ -119,10 +135,17 @@ | |||
2084 | 119 | ('2.2.0', 'juno'), | 135 | ('2.2.0', 'juno'), |
2085 | 120 | ('2.2.1', 'kilo'), | 136 | ('2.2.1', 'kilo'), |
2086 | 121 | ('2.2.2', 'kilo'), | 137 | ('2.2.2', 'kilo'), |
2089 | 122 | ('2.3.0', 'liberty'), | 138 | <<<<<<< TREE |
2090 | 123 | ('2.4.0', 'liberty'), | 139 | ('2.3.0', 'liberty'), |
2091 | 140 | ('2.4.0', 'liberty'), | ||
2092 | 141 | ======= | ||
2093 | 142 | ('2.3.0', 'liberty'), | ||
2094 | 143 | ('2.4.0', 'liberty'), | ||
2095 | 144 | ('2.5.0', 'liberty'), | ||
2096 | 145 | >>>>>>> MERGE-SOURCE | ||
2097 | 124 | ]) | 146 | ]) |
2098 | 125 | 147 | ||
2099 | 148 | <<<<<<< TREE | ||
2100 | 126 | # >= Liberty version->codename mapping | 149 | # >= Liberty version->codename mapping |
2101 | 127 | PACKAGE_CODENAMES = { | 150 | PACKAGE_CODENAMES = { |
2102 | 128 | 'nova-common': OrderedDict([ | 151 | 'nova-common': OrderedDict([ |
2103 | @@ -154,6 +177,48 @@ | |||
2104 | 154 | ]), | 177 | ]), |
2105 | 155 | } | 178 | } |
2106 | 156 | 179 | ||
2107 | 180 | ======= | ||
2108 | 181 | # >= Liberty version->codename mapping | ||
2109 | 182 | PACKAGE_CODENAMES = { | ||
2110 | 183 | 'nova-common': OrderedDict([ | ||
2111 | 184 | ('12.0', 'liberty'), | ||
2112 | 185 | ('13.0', 'mitaka'), | ||
2113 | 186 | ]), | ||
2114 | 187 | 'neutron-common': OrderedDict([ | ||
2115 | 188 | ('7.0', 'liberty'), | ||
2116 | 189 | ('8.0', 'mitaka'), | ||
2117 | 190 | ]), | ||
2118 | 191 | 'cinder-common': OrderedDict([ | ||
2119 | 192 | ('7.0', 'liberty'), | ||
2120 | 193 | ('8.0', 'mitaka'), | ||
2121 | 194 | ]), | ||
2122 | 195 | 'keystone': OrderedDict([ | ||
2123 | 196 | ('8.0', 'liberty'), | ||
2124 | 197 | ('9.0', 'mitaka'), | ||
2125 | 198 | ]), | ||
2126 | 199 | 'horizon-common': OrderedDict([ | ||
2127 | 200 | ('8.0', 'liberty'), | ||
2128 | 201 | ('9.0', 'mitaka'), | ||
2129 | 202 | ]), | ||
2130 | 203 | 'ceilometer-common': OrderedDict([ | ||
2131 | 204 | ('5.0', 'liberty'), | ||
2132 | 205 | ('6.0', 'mitaka'), | ||
2133 | 206 | ]), | ||
2134 | 207 | 'heat-common': OrderedDict([ | ||
2135 | 208 | ('5.0', 'liberty'), | ||
2136 | 209 | ('6.0', 'mitaka'), | ||
2137 | 210 | ]), | ||
2138 | 211 | 'glance-common': OrderedDict([ | ||
2139 | 212 | ('11.0', 'liberty'), | ||
2140 | 213 | ('12.0', 'mitaka'), | ||
2141 | 214 | ]), | ||
2142 | 215 | 'openstack-dashboard': OrderedDict([ | ||
2143 | 216 | ('8.0', 'liberty'), | ||
2144 | 217 | ('9.0', 'mitaka'), | ||
2145 | 218 | ]), | ||
2146 | 219 | } | ||
2147 | 220 | |||
2148 | 221 | >>>>>>> MERGE-SOURCE | ||
2149 | 157 | DEFAULT_LOOPBACK_SIZE = '5G' | 222 | DEFAULT_LOOPBACK_SIZE = '5G' |
2150 | 158 | 223 | ||
2151 | 159 | 224 | ||
2152 | @@ -237,6 +302,7 @@ | |||
2153 | 237 | error_out(e) | 302 | error_out(e) |
2154 | 238 | 303 | ||
2155 | 239 | vers = apt.upstream_version(pkg.current_ver.ver_str) | 304 | vers = apt.upstream_version(pkg.current_ver.ver_str) |
2156 | 305 | <<<<<<< TREE | ||
2157 | 240 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) | 306 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) |
2158 | 241 | if match: | 307 | if match: |
2159 | 242 | vers = match.group(0) | 308 | vers = match.group(0) |
2160 | @@ -262,6 +328,35 @@ | |||
2161 | 262 | return None | 328 | return None |
2162 | 263 | e = 'Could not determine OpenStack codename for version %s' % vers | 329 | e = 'Could not determine OpenStack codename for version %s' % vers |
2163 | 264 | error_out(e) | 330 | error_out(e) |
2164 | 331 | ======= | ||
2165 | 332 | if 'swift' in pkg.name: | ||
2166 | 333 | # Fully x.y.z match for swift versions | ||
2167 | 334 | match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) | ||
2168 | 335 | else: | ||
2169 | 336 | # x.y match only for 20XX.X | ||
2170 | 337 | # and ignore patch level for other packages | ||
2171 | 338 | match = re.match('^(\d+)\.(\d+)', vers) | ||
2172 | 339 | |||
2173 | 340 | if match: | ||
2174 | 341 | vers = match.group(0) | ||
2175 | 342 | |||
2176 | 343 | # >= Liberty independent project versions | ||
2177 | 344 | if (package in PACKAGE_CODENAMES and | ||
2178 | 345 | vers in PACKAGE_CODENAMES[package]): | ||
2179 | 346 | return PACKAGE_CODENAMES[package][vers] | ||
2180 | 347 | else: | ||
2181 | 348 | # < Liberty co-ordinated project versions | ||
2182 | 349 | try: | ||
2183 | 350 | if 'swift' in pkg.name: | ||
2184 | 351 | return SWIFT_CODENAMES[vers] | ||
2185 | 352 | else: | ||
2186 | 353 | return OPENSTACK_CODENAMES[vers] | ||
2187 | 354 | except KeyError: | ||
2188 | 355 | if not fatal: | ||
2189 | 356 | return None | ||
2190 | 357 | e = 'Could not determine OpenStack codename for version %s' % vers | ||
2191 | 358 | error_out(e) | ||
2192 | 359 | >>>>>>> MERGE-SOURCE | ||
2193 | 265 | 360 | ||
2194 | 266 | 361 | ||
2195 | 267 | def get_os_version_package(pkg, fatal=True): | 362 | def get_os_version_package(pkg, fatal=True): |
2196 | @@ -371,9 +466,18 @@ | |||
2197 | 371 | 'kilo': 'trusty-updates/kilo', | 466 | 'kilo': 'trusty-updates/kilo', |
2198 | 372 | 'kilo/updates': 'trusty-updates/kilo', | 467 | 'kilo/updates': 'trusty-updates/kilo', |
2199 | 373 | 'kilo/proposed': 'trusty-proposed/kilo', | 468 | 'kilo/proposed': 'trusty-proposed/kilo', |
2203 | 374 | 'liberty': 'trusty-updates/liberty', | 469 | <<<<<<< TREE |
2204 | 375 | 'liberty/updates': 'trusty-updates/liberty', | 470 | 'liberty': 'trusty-updates/liberty', |
2205 | 376 | 'liberty/proposed': 'trusty-proposed/liberty', | 471 | 'liberty/updates': 'trusty-updates/liberty', |
2206 | 472 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
2207 | 473 | ======= | ||
2208 | 474 | 'liberty': 'trusty-updates/liberty', | ||
2209 | 475 | 'liberty/updates': 'trusty-updates/liberty', | ||
2210 | 476 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
2211 | 477 | 'mitaka': 'trusty-updates/mitaka', | ||
2212 | 478 | 'mitaka/updates': 'trusty-updates/mitaka', | ||
2213 | 479 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
2214 | 480 | >>>>>>> MERGE-SOURCE | ||
2215 | 377 | } | 481 | } |
2216 | 378 | 482 | ||
2217 | 379 | try: | 483 | try: |
2218 | @@ -749,6 +853,7 @@ | |||
2219 | 749 | return os.path.join(parent_dir, os.path.basename(p['repository'])) | 853 | return os.path.join(parent_dir, os.path.basename(p['repository'])) |
2220 | 750 | 854 | ||
2221 | 751 | return None | 855 | return None |
2222 | 856 | <<<<<<< TREE | ||
2223 | 752 | 857 | ||
2224 | 753 | 858 | ||
2225 | 754 | def git_yaml_value(projects_yaml, key): | 859 | def git_yaml_value(projects_yaml, key): |
2226 | @@ -975,3 +1080,249 @@ | |||
2227 | 975 | action_set({'outcome': 'no upgrade available.'}) | 1080 | action_set({'outcome': 'no upgrade available.'}) |
2228 | 976 | 1081 | ||
2229 | 977 | return ret | 1082 | return ret |
2230 | 1083 | ======= | ||
2231 | 1084 | |||
2232 | 1085 | |||
2233 | 1086 | def git_yaml_value(projects_yaml, key): | ||
2234 | 1087 | """ | ||
2235 | 1088 | Return the value in projects_yaml for the specified key. | ||
2236 | 1089 | """ | ||
2237 | 1090 | projects = _git_yaml_load(projects_yaml) | ||
2238 | 1091 | |||
2239 | 1092 | if key in projects.keys(): | ||
2240 | 1093 | return projects[key] | ||
2241 | 1094 | |||
2242 | 1095 | return None | ||
2243 | 1096 | |||
2244 | 1097 | |||
2245 | 1098 | def os_workload_status(configs, required_interfaces, charm_func=None): | ||
2246 | 1099 | """ | ||
2247 | 1100 | Decorator to set workload status based on complete contexts | ||
2248 | 1101 | """ | ||
2249 | 1102 | def wrap(f): | ||
2250 | 1103 | @wraps(f) | ||
2251 | 1104 | def wrapped_f(*args, **kwargs): | ||
2252 | 1105 | # Run the original function first | ||
2253 | 1106 | f(*args, **kwargs) | ||
2254 | 1107 | # Set workload status now that contexts have been | ||
2255 | 1108 | # acted on | ||
2256 | 1109 | set_os_workload_status(configs, required_interfaces, charm_func) | ||
2257 | 1110 | return wrapped_f | ||
2258 | 1111 | return wrap | ||
2259 | 1112 | |||
2260 | 1113 | |||
2261 | 1114 | def set_os_workload_status(configs, required_interfaces, charm_func=None): | ||
2262 | 1115 | """ | ||
2263 | 1116 | Set workload status based on complete contexts. | ||
2264 | 1117 | status-set missing or incomplete contexts | ||
2265 | 1118 | and juju-log details of missing required data. | ||
2266 | 1119 | charm_func is a charm specific function to run checking | ||
2267 | 1120 | for charm specific requirements such as a VIP setting. | ||
2268 | 1121 | """ | ||
2269 | 1122 | incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) | ||
2270 | 1123 | state = 'active' | ||
2271 | 1124 | missing_relations = [] | ||
2272 | 1125 | incomplete_relations = [] | ||
2273 | 1126 | message = None | ||
2274 | 1127 | charm_state = None | ||
2275 | 1128 | charm_message = None | ||
2276 | 1129 | |||
2277 | 1130 | for generic_interface in incomplete_rel_data.keys(): | ||
2278 | 1131 | related_interface = None | ||
2279 | 1132 | missing_data = {} | ||
2280 | 1133 | # Related or not? | ||
2281 | 1134 | for interface in incomplete_rel_data[generic_interface]: | ||
2282 | 1135 | if incomplete_rel_data[generic_interface][interface].get('related'): | ||
2283 | 1136 | related_interface = interface | ||
2284 | 1137 | missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') | ||
2285 | 1138 | # No relation ID for the generic_interface | ||
2286 | 1139 | if not related_interface: | ||
2287 | 1140 | juju_log("{} relation is missing and must be related for " | ||
2288 | 1141 | "functionality. ".format(generic_interface), 'WARN') | ||
2289 | 1142 | state = 'blocked' | ||
2290 | 1143 | if generic_interface not in missing_relations: | ||
2291 | 1144 | missing_relations.append(generic_interface) | ||
2292 | 1145 | else: | ||
2293 | 1146 | # Relation ID exists but no related unit | ||
2294 | 1147 | if not missing_data: | ||
2295 | 1148 | # Edge case relation ID exists but departing | ||
2296 | 1149 | if ('departed' in hook_name() or 'broken' in hook_name()) \ | ||
2297 | 1150 | and related_interface in hook_name(): | ||
2298 | 1151 | state = 'blocked' | ||
2299 | 1152 | if generic_interface not in missing_relations: | ||
2300 | 1153 | missing_relations.append(generic_interface) | ||
2301 | 1154 | juju_log("{} relation's interface, {}, " | ||
2302 | 1155 | "relationship is departed or broken " | ||
2303 | 1156 | "and is required for functionality." | ||
2304 | 1157 | "".format(generic_interface, related_interface), "WARN") | ||
2305 | 1158 | # Normal case relation ID exists but no related unit | ||
2306 | 1159 | # (joining) | ||
2307 | 1160 | else: | ||
2308 | 1161 | juju_log("{} relations's interface, {}, is related but has " | ||
2309 | 1162 | "no units in the relation." | ||
2310 | 1163 | "".format(generic_interface, related_interface), "INFO") | ||
2311 | 1164 | # Related unit exists and data missing on the relation | ||
2312 | 1165 | else: | ||
2313 | 1166 | juju_log("{} relation's interface, {}, is related awaiting " | ||
2314 | 1167 | "the following data from the relationship: {}. " | ||
2315 | 1168 | "".format(generic_interface, related_interface, | ||
2316 | 1169 | ", ".join(missing_data)), "INFO") | ||
2317 | 1170 | if state != 'blocked': | ||
2318 | 1171 | state = 'waiting' | ||
2319 | 1172 | if generic_interface not in incomplete_relations \ | ||
2320 | 1173 | and generic_interface not in missing_relations: | ||
2321 | 1174 | incomplete_relations.append(generic_interface) | ||
2322 | 1175 | |||
2323 | 1176 | if missing_relations: | ||
2324 | 1177 | message = "Missing relations: {}".format(", ".join(missing_relations)) | ||
2325 | 1178 | if incomplete_relations: | ||
2326 | 1179 | message += "; incomplete relations: {}" \ | ||
2327 | 1180 | "".format(", ".join(incomplete_relations)) | ||
2328 | 1181 | state = 'blocked' | ||
2329 | 1182 | elif incomplete_relations: | ||
2330 | 1183 | message = "Incomplete relations: {}" \ | ||
2331 | 1184 | "".format(", ".join(incomplete_relations)) | ||
2332 | 1185 | state = 'waiting' | ||
2333 | 1186 | |||
2334 | 1187 | # Run charm specific checks | ||
2335 | 1188 | if charm_func: | ||
2336 | 1189 | charm_state, charm_message = charm_func(configs) | ||
2337 | 1190 | if charm_state != 'active' and charm_state != 'unknown': | ||
2338 | 1191 | state = workload_state_compare(state, charm_state) | ||
2339 | 1192 | if message: | ||
2340 | 1193 | charm_message = charm_message.replace("Incomplete relations: ", | ||
2341 | 1194 | "") | ||
2342 | 1195 | message = "{}, {}".format(message, charm_message) | ||
2343 | 1196 | else: | ||
2344 | 1197 | message = charm_message | ||
2345 | 1198 | |||
2346 | 1199 | # Set to active if all requirements have been met | ||
2347 | 1200 | if state == 'active': | ||
2348 | 1201 | message = "Unit is ready" | ||
2349 | 1202 | juju_log(message, "INFO") | ||
2350 | 1203 | |||
2351 | 1204 | status_set(state, message) | ||
2352 | 1205 | |||
2353 | 1206 | |||
2354 | 1207 | def workload_state_compare(current_workload_state, workload_state): | ||
2355 | 1208 | """ Return highest priority of two states""" | ||
2356 | 1209 | hierarchy = {'unknown': -1, | ||
2357 | 1210 | 'active': 0, | ||
2358 | 1211 | 'maintenance': 1, | ||
2359 | 1212 | 'waiting': 2, | ||
2360 | 1213 | 'blocked': 3, | ||
2361 | 1214 | } | ||
2362 | 1215 | |||
2363 | 1216 | if hierarchy.get(workload_state) is None: | ||
2364 | 1217 | workload_state = 'unknown' | ||
2365 | 1218 | if hierarchy.get(current_workload_state) is None: | ||
2366 | 1219 | current_workload_state = 'unknown' | ||
2367 | 1220 | |||
2368 | 1221 | # Set workload_state based on hierarchy of statuses | ||
2369 | 1222 | if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): | ||
2370 | 1223 | return current_workload_state | ||
2371 | 1224 | else: | ||
2372 | 1225 | return workload_state | ||
2373 | 1226 | |||
2374 | 1227 | |||
2375 | 1228 | def incomplete_relation_data(configs, required_interfaces): | ||
2376 | 1229 | """ | ||
2377 | 1230 | Check complete contexts against required_interfaces | ||
2378 | 1231 | Return dictionary of incomplete relation data. | ||
2379 | 1232 | |||
2380 | 1233 | configs is an OSConfigRenderer object with configs registered | ||
2381 | 1234 | |||
2382 | 1235 | required_interfaces is a dictionary of required general interfaces | ||
2383 | 1236 | with dictionary values of possible specific interfaces. | ||
2384 | 1237 | Example: | ||
2385 | 1238 | required_interfaces = {'database': ['shared-db', 'pgsql-db']} | ||
2386 | 1239 | |||
2387 | 1240 | The interface is said to be satisfied if anyone of the interfaces in the | ||
2388 | 1241 | list has a complete context. | ||
2389 | 1242 | |||
2390 | 1243 | Return dictionary of incomplete or missing required contexts with relation | ||
2391 | 1244 | status of interfaces and any missing data points. Example: | ||
2392 | 1245 | {'message': | ||
2393 | 1246 | {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, | ||
2394 | 1247 | 'zeromq-configuration': {'related': False}}, | ||
2395 | 1248 | 'identity': | ||
2396 | 1249 | {'identity-service': {'related': False}}, | ||
2397 | 1250 | 'database': | ||
2398 | 1251 | {'pgsql-db': {'related': False}, | ||
2399 | 1252 | 'shared-db': {'related': True}}} | ||
2400 | 1253 | """ | ||
2401 | 1254 | complete_ctxts = configs.complete_contexts() | ||
2402 | 1255 | incomplete_relations = [] | ||
2403 | 1256 | for svc_type in required_interfaces.keys(): | ||
2404 | 1257 | # Avoid duplicates | ||
2405 | 1258 | found_ctxt = False | ||
2406 | 1259 | for interface in required_interfaces[svc_type]: | ||
2407 | 1260 | if interface in complete_ctxts: | ||
2408 | 1261 | found_ctxt = True | ||
2409 | 1262 | if not found_ctxt: | ||
2410 | 1263 | incomplete_relations.append(svc_type) | ||
2411 | 1264 | incomplete_context_data = {} | ||
2412 | 1265 | for i in incomplete_relations: | ||
2413 | 1266 | incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) | ||
2414 | 1267 | return incomplete_context_data | ||
2415 | 1268 | |||
2416 | 1269 | |||
2417 | 1270 | def do_action_openstack_upgrade(package, upgrade_callback, configs): | ||
2418 | 1271 | """Perform action-managed OpenStack upgrade. | ||
2419 | 1272 | |||
2420 | 1273 | Upgrades packages to the configured openstack-origin version and sets | ||
2421 | 1274 | the corresponding action status as a result. | ||
2422 | 1275 | |||
2423 | 1276 | If the charm was installed from source we cannot upgrade it. | ||
2424 | 1277 | For backwards compatibility a config flag (action-managed-upgrade) must | ||
2425 | 1278 | be set for this code to run, otherwise a full service level upgrade will | ||
2426 | 1279 | fire on config-changed. | ||
2427 | 1280 | |||
2428 | 1281 | @param package: package name for determining if upgrade available | ||
2429 | 1282 | @param upgrade_callback: function callback to charm's upgrade function | ||
2430 | 1283 | @param configs: templating object derived from OSConfigRenderer class | ||
2431 | 1284 | |||
2432 | 1285 | @return: True if upgrade successful; False if upgrade failed or skipped | ||
2433 | 1286 | """ | ||
2434 | 1287 | ret = False | ||
2435 | 1288 | |||
2436 | 1289 | if git_install_requested(): | ||
2437 | 1290 | action_set({'outcome': 'installed from source, skipped upgrade.'}) | ||
2438 | 1291 | else: | ||
2439 | 1292 | if openstack_upgrade_available(package): | ||
2440 | 1293 | if config('action-managed-upgrade'): | ||
2441 | 1294 | juju_log('Upgrading OpenStack release') | ||
2442 | 1295 | |||
2443 | 1296 | try: | ||
2444 | 1297 | upgrade_callback(configs=configs) | ||
2445 | 1298 | action_set({'outcome': 'success, upgrade completed.'}) | ||
2446 | 1299 | ret = True | ||
2447 | 1300 | except: | ||
2448 | 1301 | action_set({'outcome': 'upgrade failed, see traceback.'}) | ||
2449 | 1302 | action_set({'traceback': traceback.format_exc()}) | ||
2450 | 1303 | action_fail('do_openstack_upgrade resulted in an ' | ||
2451 | 1304 | 'unexpected error') | ||
2452 | 1305 | else: | ||
2453 | 1306 | action_set({'outcome': 'action-managed-upgrade config is ' | ||
2454 | 1307 | 'False, skipped upgrade.'}) | ||
2455 | 1308 | else: | ||
2456 | 1309 | action_set({'outcome': 'no upgrade available.'}) | ||
2457 | 1310 | |||
2458 | 1311 | return ret | ||
2459 | 1312 | |||
2460 | 1313 | |||
2461 | 1314 | def remote_restart(rel_name, remote_service=None): | ||
2462 | 1315 | trigger = { | ||
2463 | 1316 | 'restart-trigger': str(uuid.uuid4()), | ||
2464 | 1317 | } | ||
2465 | 1318 | if remote_service: | ||
2466 | 1319 | trigger['remote-service'] = remote_service | ||
2467 | 1320 | for rid in relation_ids(rel_name): | ||
2468 | 1321 | # This subordinate can be related to two seperate services using | ||
2469 | 1322 | # different subordinate relations so only issue the restart if | ||
2470 | 1323 | # the principle is conencted down the relation we think it is | ||
2471 | 1324 | if related_units(relid=rid): | ||
2472 | 1325 | relation_set(relation_id=rid, | ||
2473 | 1326 | relation_settings=trigger, | ||
2474 | 1327 | ) | ||
2475 | 1328 | >>>>>>> MERGE-SOURCE | ||
2476 | 978 | 1329 | ||
2477 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' | |||
2478 | --- hooks/charmhelpers/contrib/python/packages.py 2015-08-10 16:34:04 +0000 | |||
2479 | +++ hooks/charmhelpers/contrib/python/packages.py 2016-01-06 21:19:13 +0000 | |||
2480 | @@ -42,8 +42,12 @@ | |||
2481 | 42 | yield "--{0}={1}".format(key, value) | 42 | yield "--{0}={1}".format(key, value) |
2482 | 43 | 43 | ||
2483 | 44 | 44 | ||
2486 | 45 | def pip_install_requirements(requirements, **options): | 45 | def pip_install_requirements(requirements, constraints=None, **options): |
2487 | 46 | """Install a requirements file """ | 46 | """Install a requirements file. |
2488 | 47 | |||
2489 | 48 | :param constraints: Path to pip constraints file. | ||
2490 | 49 | http://pip.readthedocs.org/en/stable/user_guide/#constraints-files | ||
2491 | 50 | """ | ||
2492 | 47 | command = ["install"] | 51 | command = ["install"] |
2493 | 48 | 52 | ||
2494 | 49 | available_options = ('proxy', 'src', 'log', ) | 53 | available_options = ('proxy', 'src', 'log', ) |
2495 | @@ -51,8 +55,13 @@ | |||
2496 | 51 | command.append(option) | 55 | command.append(option) |
2497 | 52 | 56 | ||
2498 | 53 | command.append("-r {0}".format(requirements)) | 57 | command.append("-r {0}".format(requirements)) |
2501 | 54 | log("Installing from file: {} with options: {}".format(requirements, | 58 | if constraints: |
2502 | 55 | command)) | 59 | command.append("-c {0}".format(constraints)) |
2503 | 60 | log("Installing from file: {} with constraints {} " | ||
2504 | 61 | "and options: {}".format(requirements, constraints, command)) | ||
2505 | 62 | else: | ||
2506 | 63 | log("Installing from file: {} with options: {}".format(requirements, | ||
2507 | 64 | command)) | ||
2508 | 56 | pip_execute(command) | 65 | pip_execute(command) |
2509 | 57 | 66 | ||
2510 | 58 | 67 | ||
2511 | 59 | 68 | ||
2512 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
2513 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-10-22 13:19:13 +0000 | |||
2514 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2016-01-06 21:19:13 +0000 | |||
2515 | @@ -23,6 +23,8 @@ | |||
2516 | 23 | # James Page <james.page@ubuntu.com> | 23 | # James Page <james.page@ubuntu.com> |
2517 | 24 | # Adam Gandelman <adamg@ubuntu.com> | 24 | # Adam Gandelman <adamg@ubuntu.com> |
2518 | 25 | # | 25 | # |
2519 | 26 | import bisect | ||
2520 | 27 | import six | ||
2521 | 26 | 28 | ||
2522 | 27 | import os | 29 | import os |
2523 | 28 | import shutil | 30 | import shutil |
2524 | @@ -72,6 +74,394 @@ | |||
2525 | 72 | err to syslog = {use_syslog} | 74 | err to syslog = {use_syslog} |
2526 | 73 | clog to syslog = {use_syslog} | 75 | clog to syslog = {use_syslog} |
2527 | 74 | """ | 76 | """ |
2528 | 77 | # For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs) | ||
2529 | 78 | powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608] | ||
2530 | 79 | |||
2531 | 80 | |||
2532 | 81 | def validator(value, valid_type, valid_range=None): | ||
2533 | 82 | """ | ||
2534 | 83 | Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values | ||
2535 | 84 | Example input: | ||
2536 | 85 | validator(value=1, | ||
2537 | 86 | valid_type=int, | ||
2538 | 87 | valid_range=[0, 2]) | ||
2539 | 88 | This says I'm testing value=1. It must be an int inclusive in [0,2] | ||
2540 | 89 | |||
2541 | 90 | :param value: The value to validate | ||
2542 | 91 | :param valid_type: The type that value should be. | ||
2543 | 92 | :param valid_range: A range of values that value can assume. | ||
2544 | 93 | :return: | ||
2545 | 94 | """ | ||
2546 | 95 | assert isinstance(value, valid_type), "{} is not a {}".format( | ||
2547 | 96 | value, | ||
2548 | 97 | valid_type) | ||
2549 | 98 | if valid_range is not None: | ||
2550 | 99 | assert isinstance(valid_range, list), \ | ||
2551 | 100 | "valid_range must be a list, was given {}".format(valid_range) | ||
2552 | 101 | # If we're dealing with strings | ||
2553 | 102 | if valid_type is six.string_types: | ||
2554 | 103 | assert value in valid_range, \ | ||
2555 | 104 | "{} is not in the list {}".format(value, valid_range) | ||
2556 | 105 | # Integer, float should have a min and max | ||
2557 | 106 | else: | ||
2558 | 107 | if len(valid_range) != 2: | ||
2559 | 108 | raise ValueError( | ||
2560 | 109 | "Invalid valid_range list of {} for {}. " | ||
2561 | 110 | "List must be [min,max]".format(valid_range, value)) | ||
2562 | 111 | assert value >= valid_range[0], \ | ||
2563 | 112 | "{} is less than minimum allowed value of {}".format( | ||
2564 | 113 | value, valid_range[0]) | ||
2565 | 114 | assert value <= valid_range[1], \ | ||
2566 | 115 | "{} is greater than maximum allowed value of {}".format( | ||
2567 | 116 | value, valid_range[1]) | ||
2568 | 117 | |||
2569 | 118 | |||
2570 | 119 | class PoolCreationError(Exception): | ||
2571 | 120 | """ | ||
2572 | 121 | A custom error to inform the caller that a pool creation failed. Provides an error message | ||
2573 | 122 | """ | ||
2574 | 123 | def __init__(self, message): | ||
2575 | 124 | super(PoolCreationError, self).__init__(message) | ||
2576 | 125 | |||
2577 | 126 | |||
2578 | 127 | class Pool(object): | ||
2579 | 128 | """ | ||
2580 | 129 | An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. | ||
2581 | 130 | Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). | ||
2582 | 131 | """ | ||
2583 | 132 | def __init__(self, service, name): | ||
2584 | 133 | self.service = service | ||
2585 | 134 | self.name = name | ||
2586 | 135 | |||
2587 | 136 | # Create the pool if it doesn't exist already | ||
2588 | 137 | # To be implemented by subclasses | ||
2589 | 138 | def create(self): | ||
2590 | 139 | pass | ||
2591 | 140 | |||
2592 | 141 | def add_cache_tier(self, cache_pool, mode): | ||
2593 | 142 | """ | ||
2594 | 143 | Adds a new cache tier to an existing pool. | ||
2595 | 144 | :param cache_pool: six.string_types. The cache tier pool name to add. | ||
2596 | 145 | :param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"] | ||
2597 | 146 | :return: None | ||
2598 | 147 | """ | ||
2599 | 148 | # Check the input types and values | ||
2600 | 149 | validator(value=cache_pool, valid_type=six.string_types) | ||
2601 | 150 | validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"]) | ||
2602 | 151 | |||
2603 | 152 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool]) | ||
2604 | 153 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode]) | ||
2605 | 154 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool]) | ||
2606 | 155 | check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom']) | ||
2607 | 156 | |||
2608 | 157 | def remove_cache_tier(self, cache_pool): | ||
2609 | 158 | """ | ||
2610 | 159 | Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete. | ||
2611 | 160 | :param cache_pool: six.string_types. The cache tier pool name to remove. | ||
2612 | 161 | :return: None | ||
2613 | 162 | """ | ||
2614 | 163 | # read-only is easy, writeback is much harder | ||
2615 | 164 | mode = get_cache_mode(cache_pool) | ||
2616 | 165 | if mode == 'readonly': | ||
2617 | 166 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) | ||
2618 | 167 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) | ||
2619 | 168 | |||
2620 | 169 | elif mode == 'writeback': | ||
2621 | 170 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) | ||
2622 | 171 | # Flush the cache and wait for it to return | ||
2623 | 172 | check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) | ||
2624 | 173 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) | ||
2625 | 174 | check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) | ||
2626 | 175 | |||
2627 | 176 | def get_pgs(self, pool_size): | ||
2628 | 177 | """ | ||
2629 | 178 | :param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for | ||
2630 | 179 | erasure coded pools | ||
2631 | 180 | :return: int. The number of pgs to use. | ||
2632 | 181 | """ | ||
2633 | 182 | validator(value=pool_size, valid_type=int) | ||
2634 | 183 | osds = get_osds(self.service) | ||
2635 | 184 | if not osds: | ||
2636 | 185 | # NOTE(james-page): Default to 200 for older ceph versions | ||
2637 | 186 | # which don't support OSD query from cli | ||
2638 | 187 | return 200 | ||
2639 | 188 | |||
2640 | 189 | # Calculate based on Ceph best practices | ||
2641 | 190 | if osds < 5: | ||
2642 | 191 | return 128 | ||
2643 | 192 | elif 5 < osds < 10: | ||
2644 | 193 | return 512 | ||
2645 | 194 | elif 10 < osds < 50: | ||
2646 | 195 | return 4096 | ||
2647 | 196 | else: | ||
2648 | 197 | estimate = (osds * 100) / pool_size | ||
2649 | 198 | # Return the next nearest power of 2 | ||
2650 | 199 | index = bisect.bisect_right(powers_of_two, estimate) | ||
2651 | 200 | return powers_of_two[index] | ||
2652 | 201 | |||
2653 | 202 | |||
2654 | 203 | class ReplicatedPool(Pool): | ||
2655 | 204 | def __init__(self, service, name, replicas=2): | ||
2656 | 205 | super(ReplicatedPool, self).__init__(service=service, name=name) | ||
2657 | 206 | self.replicas = replicas | ||
2658 | 207 | |||
2659 | 208 | def create(self): | ||
2660 | 209 | if not pool_exists(self.service, self.name): | ||
2661 | 210 | # Create it | ||
2662 | 211 | pgs = self.get_pgs(self.replicas) | ||
2663 | 212 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] | ||
2664 | 213 | try: | ||
2665 | 214 | check_call(cmd) | ||
2666 | 215 | except CalledProcessError: | ||
2667 | 216 | raise | ||
2668 | 217 | |||
2669 | 218 | |||
2670 | 219 | # Default jerasure erasure coded pool | ||
2671 | 220 | class ErasurePool(Pool): | ||
2672 | 221 | def __init__(self, service, name, erasure_code_profile="default"): | ||
2673 | 222 | super(ErasurePool, self).__init__(service=service, name=name) | ||
2674 | 223 | self.erasure_code_profile = erasure_code_profile | ||
2675 | 224 | |||
2676 | 225 | def create(self): | ||
2677 | 226 | if not pool_exists(self.service, self.name): | ||
2678 | 227 | # Try to find the erasure profile information so we can properly size the pgs | ||
2679 | 228 | erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile) | ||
2680 | 229 | |||
2681 | 230 | # Check for errors | ||
2682 | 231 | if erasure_profile is None: | ||
2683 | 232 | log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile), | ||
2684 | 233 | level=ERROR) | ||
2685 | 234 | raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile)) | ||
2686 | 235 | if 'k' not in erasure_profile or 'm' not in erasure_profile: | ||
2687 | 236 | # Error | ||
2688 | 237 | log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile), | ||
2689 | 238 | level=ERROR) | ||
2690 | 239 | raise PoolCreationError( | ||
2691 | 240 | message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile)) | ||
2692 | 241 | |||
2693 | 242 | pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) | ||
2694 | 243 | # Create it | ||
2695 | 244 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), | ||
2696 | 245 | 'erasure', self.erasure_code_profile] | ||
2697 | 246 | try: | ||
2698 | 247 | check_call(cmd) | ||
2699 | 248 | except CalledProcessError: | ||
2700 | 249 | raise | ||
2701 | 250 | |||
2702 | 251 | """Get an existing erasure code profile if it already exists. | ||
2703 | 252 | Returns json formatted output""" | ||
2704 | 253 | |||
2705 | 254 | |||
2706 | 255 | def get_erasure_profile(service, name): | ||
2707 | 256 | """ | ||
2708 | 257 | :param service: six.string_types. The Ceph user name to run the command under | ||
2709 | 258 | :param name: | ||
2710 | 259 | :return: | ||
2711 | 260 | """ | ||
2712 | 261 | try: | ||
2713 | 262 | out = check_output(['ceph', '--id', service, | ||
2714 | 263 | 'osd', 'erasure-code-profile', 'get', | ||
2715 | 264 | name, '--format=json']) | ||
2716 | 265 | return json.loads(out) | ||
2717 | 266 | except (CalledProcessError, OSError, ValueError): | ||
2718 | 267 | return None | ||
2719 | 268 | |||
2720 | 269 | |||
2721 | 270 | def pool_set(service, pool_name, key, value): | ||
2722 | 271 | """ | ||
2723 | 272 | Sets a value for a RADOS pool in ceph. | ||
2724 | 273 | :param service: six.string_types. The Ceph user name to run the command under | ||
2725 | 274 | :param pool_name: six.string_types | ||
2726 | 275 | :param key: six.string_types | ||
2727 | 276 | :param value: | ||
2728 | 277 | :return: None. Can raise CalledProcessError | ||
2729 | 278 | """ | ||
2730 | 279 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value] | ||
2731 | 280 | try: | ||
2732 | 281 | check_call(cmd) | ||
2733 | 282 | except CalledProcessError: | ||
2734 | 283 | raise | ||
2735 | 284 | |||
2736 | 285 | |||
2737 | 286 | def snapshot_pool(service, pool_name, snapshot_name): | ||
2738 | 287 | """ | ||
2739 | 288 | Snapshots a RADOS pool in ceph. | ||
2740 | 289 | :param service: six.string_types. The Ceph user name to run the command under | ||
2741 | 290 | :param pool_name: six.string_types | ||
2742 | 291 | :param snapshot_name: six.string_types | ||
2743 | 292 | :return: None. Can raise CalledProcessError | ||
2744 | 293 | """ | ||
2745 | 294 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name] | ||
2746 | 295 | try: | ||
2747 | 296 | check_call(cmd) | ||
2748 | 297 | except CalledProcessError: | ||
2749 | 298 | raise | ||
2750 | 299 | |||
2751 | 300 | |||
2752 | 301 | def remove_pool_snapshot(service, pool_name, snapshot_name): | ||
2753 | 302 | """ | ||
2754 | 303 | Remove a snapshot from a RADOS pool in ceph. | ||
2755 | 304 | :param service: six.string_types. The Ceph user name to run the command under | ||
2756 | 305 | :param pool_name: six.string_types | ||
2757 | 306 | :param snapshot_name: six.string_types | ||
2758 | 307 | :return: None. Can raise CalledProcessError | ||
2759 | 308 | """ | ||
2760 | 309 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name] | ||
2761 | 310 | try: | ||
2762 | 311 | check_call(cmd) | ||
2763 | 312 | except CalledProcessError: | ||
2764 | 313 | raise | ||
2765 | 314 | |||
2766 | 315 | |||
2767 | 316 | # max_bytes should be an int or long | ||
2768 | 317 | def set_pool_quota(service, pool_name, max_bytes): | ||
2769 | 318 | """ | ||
2770 | 319 | :param service: six.string_types. The Ceph user name to run the command under | ||
2771 | 320 | :param pool_name: six.string_types | ||
2772 | 321 | :param max_bytes: int or long | ||
2773 | 322 | :return: None. Can raise CalledProcessError | ||
2774 | 323 | """ | ||
2775 | 324 | # Set a byte quota on a RADOS pool in ceph. | ||
2776 | 325 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] | ||
2777 | 326 | try: | ||
2778 | 327 | check_call(cmd) | ||
2779 | 328 | except CalledProcessError: | ||
2780 | 329 | raise | ||
2781 | 330 | |||
2782 | 331 | |||
2783 | 332 | def remove_pool_quota(service, pool_name): | ||
2784 | 333 | """ | ||
2785 | 334 | Set a byte quota on a RADOS pool in ceph. | ||
2786 | 335 | :param service: six.string_types. The Ceph user name to run the command under | ||
2787 | 336 | :param pool_name: six.string_types | ||
2788 | 337 | :return: None. Can raise CalledProcessError | ||
2789 | 338 | """ | ||
2790 | 339 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0'] | ||
2791 | 340 | try: | ||
2792 | 341 | check_call(cmd) | ||
2793 | 342 | except CalledProcessError: | ||
2794 | 343 | raise | ||
2795 | 344 | |||
2796 | 345 | |||
2797 | 346 | def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', | ||
2798 | 347 | data_chunks=2, coding_chunks=1, | ||
2799 | 348 | locality=None, durability_estimator=None): | ||
2800 | 349 | """ | ||
2801 | 350 | Create a new erasure code profile if one does not already exist for it. Updates | ||
2802 | 351 | the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ | ||
2803 | 352 | for more details | ||
2804 | 353 | :param service: six.string_types. The Ceph user name to run the command under | ||
2805 | 354 | :param profile_name: six.string_types | ||
2806 | 355 | :param erasure_plugin_name: six.string_types | ||
2807 | 356 | :param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', | ||
2808 | 357 | 'room', 'root', 'row']) | ||
2809 | 358 | :param data_chunks: int | ||
2810 | 359 | :param coding_chunks: int | ||
2811 | 360 | :param locality: int | ||
2812 | 361 | :param durability_estimator: int | ||
2813 | 362 | :return: None. Can raise CalledProcessError | ||
2814 | 363 | """ | ||
2815 | 364 | # Ensure this failure_domain is allowed by Ceph | ||
2816 | 365 | validator(failure_domain, six.string_types, | ||
2817 | 366 | ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) | ||
2818 | 367 | |||
2819 | 368 | cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name, | ||
2820 | 369 | 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks), | ||
2821 | 370 | 'ruleset_failure_domain=' + failure_domain] | ||
2822 | 371 | if locality is not None and durability_estimator is not None: | ||
2823 | 372 | raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") | ||
2824 | 373 | |||
2825 | 374 | # Add plugin specific information | ||
2826 | 375 | if locality is not None: | ||
2827 | 376 | # For local erasure codes | ||
2828 | 377 | cmd.append('l=' + str(locality)) | ||
2829 | 378 | if durability_estimator is not None: | ||
2830 | 379 | # For Shec erasure codes | ||
2831 | 380 | cmd.append('c=' + str(durability_estimator)) | ||
2832 | 381 | |||
2833 | 382 | if erasure_profile_exists(service, profile_name): | ||
2834 | 383 | cmd.append('--force') | ||
2835 | 384 | |||
2836 | 385 | try: | ||
2837 | 386 | check_call(cmd) | ||
2838 | 387 | except CalledProcessError: | ||
2839 | 388 | raise | ||
2840 | 389 | |||
2841 | 390 | |||
2842 | 391 | def rename_pool(service, old_name, new_name): | ||
2843 | 392 | """ | ||
2844 | 393 | Rename a Ceph pool from old_name to new_name | ||
2845 | 394 | :param service: six.string_types. The Ceph user name to run the command under | ||
2846 | 395 | :param old_name: six.string_types | ||
2847 | 396 | :param new_name: six.string_types | ||
2848 | 397 | :return: None | ||
2849 | 398 | """ | ||
2850 | 399 | validator(value=old_name, valid_type=six.string_types) | ||
2851 | 400 | validator(value=new_name, valid_type=six.string_types) | ||
2852 | 401 | |||
2853 | 402 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name] | ||
2854 | 403 | check_call(cmd) | ||
2855 | 404 | |||
2856 | 405 | |||
2857 | 406 | def erasure_profile_exists(service, name): | ||
2858 | 407 | """ | ||
2859 | 408 | Check to see if an Erasure code profile already exists. | ||
2860 | 409 | :param service: six.string_types. The Ceph user name to run the command under | ||
2861 | 410 | :param name: six.string_types | ||
2862 | 411 | :return: int or None | ||
2863 | 412 | """ | ||
2864 | 413 | validator(value=name, valid_type=six.string_types) | ||
2865 | 414 | try: | ||
2866 | 415 | check_call(['ceph', '--id', service, | ||
2867 | 416 | 'osd', 'erasure-code-profile', 'get', | ||
2868 | 417 | name]) | ||
2869 | 418 | return True | ||
2870 | 419 | except CalledProcessError: | ||
2871 | 420 | return False | ||
2872 | 421 | |||
2873 | 422 | |||
2874 | 423 | def get_cache_mode(service, pool_name): | ||
2875 | 424 | """ | ||
2876 | 425 | Find the current caching mode of the pool_name given. | ||
2877 | 426 | :param service: six.string_types. The Ceph user name to run the command under | ||
2878 | 427 | :param pool_name: six.string_types | ||
2879 | 428 | :return: int or None | ||
2880 | 429 | """ | ||
2881 | 430 | validator(value=service, valid_type=six.string_types) | ||
2882 | 431 | validator(value=pool_name, valid_type=six.string_types) | ||
2883 | 432 | out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json']) | ||
2884 | 433 | try: | ||
2885 | 434 | osd_json = json.loads(out) | ||
2886 | 435 | for pool in osd_json['pools']: | ||
2887 | 436 | if pool['pool_name'] == pool_name: | ||
2888 | 437 | return pool['cache_mode'] | ||
2889 | 438 | return None | ||
2890 | 439 | except ValueError: | ||
2891 | 440 | raise | ||
2892 | 441 | |||
2893 | 442 | |||
2894 | 443 | def pool_exists(service, name): | ||
2895 | 444 | """Check to see if a RADOS pool already exists.""" | ||
2896 | 445 | try: | ||
2897 | 446 | out = check_output(['rados', '--id', service, | ||
2898 | 447 | 'lspools']).decode('UTF-8') | ||
2899 | 448 | except CalledProcessError: | ||
2900 | 449 | return False | ||
2901 | 450 | |||
2902 | 451 | return name in out | ||
2903 | 452 | |||
2904 | 453 | |||
2905 | 454 | def get_osds(service): | ||
2906 | 455 | """Return a list of all Ceph Object Storage Daemons currently in the | ||
2907 | 456 | cluster. | ||
2908 | 457 | """ | ||
2909 | 458 | version = ceph_version() | ||
2910 | 459 | if version and version >= '0.56': | ||
2911 | 460 | return json.loads(check_output(['ceph', '--id', service, | ||
2912 | 461 | 'osd', 'ls', | ||
2913 | 462 | '--format=json']).decode('UTF-8')) | ||
2914 | 463 | |||
2915 | 464 | return None | ||
2916 | 75 | 465 | ||
2917 | 76 | 466 | ||
2918 | 77 | def install(): | 467 | def install(): |
2919 | @@ -101,53 +491,37 @@ | |||
2920 | 101 | check_call(cmd) | 491 | check_call(cmd) |
2921 | 102 | 492 | ||
2922 | 103 | 493 | ||
2948 | 104 | def pool_exists(service, name): | 494 | def update_pool(client, pool, settings): |
2949 | 105 | """Check to see if a RADOS pool already exists.""" | 495 | cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool] |
2950 | 106 | try: | 496 | for k, v in six.iteritems(settings): |
2951 | 107 | out = check_output(['rados', '--id', service, | 497 | cmd.append(k) |
2952 | 108 | 'lspools']).decode('UTF-8') | 498 | cmd.append(v) |
2953 | 109 | except CalledProcessError: | 499 | |
2954 | 110 | return False | 500 | check_call(cmd) |
2955 | 111 | 501 | ||
2956 | 112 | return name in out | 502 | |
2957 | 113 | 503 | def create_pool(service, name, replicas=3, pg_num=None): | |
2933 | 114 | |||
2934 | 115 | def get_osds(service): | ||
2935 | 116 | """Return a list of all Ceph Object Storage Daemons currently in the | ||
2936 | 117 | cluster. | ||
2937 | 118 | """ | ||
2938 | 119 | version = ceph_version() | ||
2939 | 120 | if version and version >= '0.56': | ||
2940 | 121 | return json.loads(check_output(['ceph', '--id', service, | ||
2941 | 122 | 'osd', 'ls', | ||
2942 | 123 | '--format=json']).decode('UTF-8')) | ||
2943 | 124 | |||
2944 | 125 | return None | ||
2945 | 126 | |||
2946 | 127 | |||
2947 | 128 | def create_pool(service, name, replicas=3): | ||
2958 | 129 | """Create a new RADOS pool.""" | 504 | """Create a new RADOS pool.""" |
2959 | 130 | if pool_exists(service, name): | 505 | if pool_exists(service, name): |
2960 | 131 | log("Ceph pool {} already exists, skipping creation".format(name), | 506 | log("Ceph pool {} already exists, skipping creation".format(name), |
2961 | 132 | level=WARNING) | 507 | level=WARNING) |
2962 | 133 | return | 508 | return |
2963 | 134 | 509 | ||
2980 | 135 | # Calculate the number of placement groups based | 510 | if not pg_num: |
2981 | 136 | # on upstream recommended best practices. | 511 | # Calculate the number of placement groups based |
2982 | 137 | osds = get_osds(service) | 512 | # on upstream recommended best practices. |
2983 | 138 | if osds: | 513 | osds = get_osds(service) |
2984 | 139 | pgnum = (len(osds) * 100 // replicas) | 514 | if osds: |
2985 | 140 | else: | 515 | pg_num = (len(osds) * 100 // replicas) |
2986 | 141 | # NOTE(james-page): Default to 200 for older ceph versions | 516 | else: |
2987 | 142 | # which don't support OSD query from cli | 517 | # NOTE(james-page): Default to 200 for older ceph versions |
2988 | 143 | pgnum = 200 | 518 | # which don't support OSD query from cli |
2989 | 144 | 519 | pg_num = 200 | |
2990 | 145 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] | 520 | |
2991 | 146 | check_call(cmd) | 521 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)] |
2992 | 147 | 522 | check_call(cmd) | |
2993 | 148 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', | 523 | |
2994 | 149 | str(replicas)] | 524 | update_pool(service, name, settings={'size': str(replicas)}) |
2979 | 150 | check_call(cmd) | ||
2995 | 151 | 525 | ||
2996 | 152 | 526 | ||
2997 | 153 | def delete_pool(service, name): | 527 | def delete_pool(service, name): |
2998 | @@ -202,10 +576,10 @@ | |||
2999 | 202 | log('Created new keyfile at %s.' % keyfile, level=INFO) | 576 | log('Created new keyfile at %s.' % keyfile, level=INFO) |
3000 | 203 | 577 | ||
3001 | 204 | 578 | ||
3004 | 205 | def get_ceph_nodes(): | 579 | def get_ceph_nodes(relation='ceph'): |
3005 | 206 | """Query named relation 'ceph' to determine current nodes.""" | 580 | """Query named relation to determine current nodes.""" |
3006 | 207 | hosts = [] | 581 | hosts = [] |
3008 | 208 | for r_id in relation_ids('ceph'): | 582 | for r_id in relation_ids(relation): |
3009 | 209 | for unit in related_units(r_id): | 583 | for unit in related_units(r_id): |
3010 | 210 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | 584 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
3011 | 211 | 585 | ||
3012 | @@ -357,14 +731,14 @@ | |||
3013 | 357 | service_start(svc) | 731 | service_start(svc) |
3014 | 358 | 732 | ||
3015 | 359 | 733 | ||
3017 | 360 | def ensure_ceph_keyring(service, user=None, group=None): | 734 | def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'): |
3018 | 361 | """Ensures a ceph keyring is created for a named service and optionally | 735 | """Ensures a ceph keyring is created for a named service and optionally |
3019 | 362 | ensures user and group ownership. | 736 | ensures user and group ownership. |
3020 | 363 | 737 | ||
3021 | 364 | Returns False if no ceph key is available in relation state. | 738 | Returns False if no ceph key is available in relation state. |
3022 | 365 | """ | 739 | """ |
3023 | 366 | key = None | 740 | key = None |
3025 | 367 | for rid in relation_ids('ceph'): | 741 | for rid in relation_ids(relation): |
3026 | 368 | for unit in related_units(rid): | 742 | for unit in related_units(rid): |
3027 | 369 | key = relation_get('key', rid=rid, unit=unit) | 743 | key = relation_get('key', rid=rid, unit=unit) |
3028 | 370 | if key: | 744 | if key: |
3029 | @@ -405,7 +779,12 @@ | |||
3030 | 405 | 779 | ||
3031 | 406 | The API is versioned and defaults to version 1. | 780 | The API is versioned and defaults to version 1. |
3032 | 407 | """ | 781 | """ |
3034 | 408 | def __init__(self, api_version=1, request_id=None): | 782 | <<<<<<< TREE |
3035 | 783 | def __init__(self, api_version=1, request_id=None): | ||
3036 | 784 | ======= | ||
3037 | 785 | |||
3038 | 786 | def __init__(self, api_version=1, request_id=None): | ||
3039 | 787 | >>>>>>> MERGE-SOURCE | ||
3040 | 409 | self.api_version = api_version | 788 | self.api_version = api_version |
3041 | 410 | if request_id: | 789 | if request_id: |
3042 | 411 | self.request_id = request_id | 790 | self.request_id = request_id |
3043 | @@ -413,9 +792,24 @@ | |||
3044 | 413 | self.request_id = str(uuid.uuid1()) | 792 | self.request_id = str(uuid.uuid1()) |
3045 | 414 | self.ops = [] | 793 | self.ops = [] |
3046 | 415 | 794 | ||
3048 | 416 | def add_op_create_pool(self, name, replica_count=3): | 795 | def add_op_create_pool(self, name, replica_count=3, pg_num=None): |
3049 | 796 | """Adds an operation to create a pool. | ||
3050 | 797 | |||
3051 | 798 | @param pg_num setting: optional setting. If not provided, this value | ||
3052 | 799 | will be calculated by the broker based on how many OSDs are in the | ||
3053 | 800 | cluster at the time of creation. Note that, if provided, this value | ||
3054 | 801 | will be capped at the current available maximum. | ||
3055 | 802 | """ | ||
3056 | 417 | self.ops.append({'op': 'create-pool', 'name': name, | 803 | self.ops.append({'op': 'create-pool', 'name': name, |
3058 | 418 | 'replicas': replica_count}) | 804 | 'replicas': replica_count, 'pg_num': pg_num}) |
3059 | 805 | |||
3060 | 806 | def set_ops(self, ops): | ||
3061 | 807 | """Set request ops to provided value. | ||
3062 | 808 | |||
3063 | 809 | Useful for injecting ops that come from a previous request | ||
3064 | 810 | to allow comparisons to ensure validity. | ||
3065 | 811 | """ | ||
3066 | 812 | self.ops = ops | ||
3067 | 419 | 813 | ||
3068 | 420 | def set_ops(self, ops): | 814 | def set_ops(self, ops): |
3069 | 421 | """Set request ops to provided value. | 815 | """Set request ops to provided value. |
3070 | @@ -427,6 +821,7 @@ | |||
3071 | 427 | 821 | ||
3072 | 428 | @property | 822 | @property |
3073 | 429 | def request(self): | 823 | def request(self): |
3074 | 824 | <<<<<<< TREE | ||
3075 | 430 | return json.dumps({'api-version': self.api_version, 'ops': self.ops, | 825 | return json.dumps({'api-version': self.api_version, 'ops': self.ops, |
3076 | 431 | 'request-id': self.request_id}) | 826 | 'request-id': self.request_id}) |
3077 | 432 | 827 | ||
3078 | @@ -451,6 +846,32 @@ | |||
3079 | 451 | 846 | ||
3080 | 452 | def __ne__(self, other): | 847 | def __ne__(self, other): |
3081 | 453 | return not self.__eq__(other) | 848 | return not self.__eq__(other) |
3082 | 849 | ======= | ||
3083 | 850 | return json.dumps({'api-version': self.api_version, 'ops': self.ops, | ||
3084 | 851 | 'request-id': self.request_id}) | ||
3085 | 852 | |||
3086 | 853 | def _ops_equal(self, other): | ||
3087 | 854 | if len(self.ops) == len(other.ops): | ||
3088 | 855 | for req_no in range(0, len(self.ops)): | ||
3089 | 856 | for key in ['replicas', 'name', 'op', 'pg_num']: | ||
3090 | 857 | if self.ops[req_no].get(key) != other.ops[req_no].get(key): | ||
3091 | 858 | return False | ||
3092 | 859 | else: | ||
3093 | 860 | return False | ||
3094 | 861 | return True | ||
3095 | 862 | |||
3096 | 863 | def __eq__(self, other): | ||
3097 | 864 | if not isinstance(other, self.__class__): | ||
3098 | 865 | return False | ||
3099 | 866 | if self.api_version == other.api_version and \ | ||
3100 | 867 | self._ops_equal(other): | ||
3101 | 868 | return True | ||
3102 | 869 | else: | ||
3103 | 870 | return False | ||
3104 | 871 | |||
3105 | 872 | def __ne__(self, other): | ||
3106 | 873 | return not self.__eq__(other) | ||
3107 | 874 | >>>>>>> MERGE-SOURCE | ||
3108 | 454 | 875 | ||
3109 | 455 | 876 | ||
3110 | 456 | class CephBrokerRsp(object): | 877 | class CephBrokerRsp(object): |
3111 | @@ -476,6 +897,7 @@ | |||
3112 | 476 | @property | 897 | @property |
3113 | 477 | def exit_msg(self): | 898 | def exit_msg(self): |
3114 | 478 | return self.rsp.get('stderr') | 899 | return self.rsp.get('stderr') |
3115 | 900 | <<<<<<< TREE | ||
3116 | 479 | 901 | ||
3117 | 480 | 902 | ||
3118 | 481 | # Ceph Broker Conversation: | 903 | # Ceph Broker Conversation: |
3119 | @@ -655,3 +1077,184 @@ | |||
3120 | 655 | for rid in relation_ids('ceph'): | 1077 | for rid in relation_ids('ceph'): |
3121 | 656 | log('Sending request {}'.format(request.request_id), level=DEBUG) | 1078 | log('Sending request {}'.format(request.request_id), level=DEBUG) |
3122 | 657 | relation_set(relation_id=rid, broker_req=request.request) | 1079 | relation_set(relation_id=rid, broker_req=request.request) |
3123 | 1080 | ======= | ||
3124 | 1081 | |||
3125 | 1082 | |||
3126 | 1083 | # Ceph Broker Conversation: | ||
3127 | 1084 | # If a charm needs an action to be taken by ceph it can create a CephBrokerRq | ||
3128 | 1085 | # and send that request to ceph via the ceph relation. The CephBrokerRq has a | ||
3129 | 1086 | # unique id so that the client can identity which CephBrokerRsp is associated | ||
3130 | 1087 | # with the request. Ceph will also respond to each client unit individually | ||
3131 | 1088 | # creating a response key per client unit eg glance/0 will get a CephBrokerRsp | ||
3132 | 1089 | # via key broker-rsp-glance-0 | ||
3133 | 1090 | # | ||
3134 | 1091 | # To use this the charm can just do something like: | ||
3135 | 1092 | # | ||
3136 | 1093 | # from charmhelpers.contrib.storage.linux.ceph import ( | ||
3137 | 1094 | # send_request_if_needed, | ||
3138 | 1095 | # is_request_complete, | ||
3139 | 1096 | # CephBrokerRq, | ||
3140 | 1097 | # ) | ||
3141 | 1098 | # | ||
3142 | 1099 | # @hooks.hook('ceph-relation-changed') | ||
3143 | 1100 | # def ceph_changed(): | ||
3144 | 1101 | # rq = CephBrokerRq() | ||
3145 | 1102 | # rq.add_op_create_pool(name='poolname', replica_count=3) | ||
3146 | 1103 | # | ||
3147 | 1104 | # if is_request_complete(rq): | ||
3148 | 1105 | # <Request complete actions> | ||
3149 | 1106 | # else: | ||
3150 | 1107 | # send_request_if_needed(get_ceph_request()) | ||
3151 | 1108 | # | ||
3152 | 1109 | # CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example | ||
3153 | 1110 | # of glance having sent a request to ceph which ceph has successfully processed | ||
3154 | 1111 | # 'ceph:8': { | ||
3155 | 1112 | # 'ceph/0': { | ||
3156 | 1113 | # 'auth': 'cephx', | ||
3157 | 1114 | # 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', | ||
3158 | 1115 | # 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', | ||
3159 | 1116 | # 'ceph-public-address': '10.5.44.103', | ||
3160 | 1117 | # 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', | ||
3161 | 1118 | # 'private-address': '10.5.44.103', | ||
3162 | 1119 | # }, | ||
3163 | 1120 | # 'glance/0': { | ||
3164 | 1121 | # 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' | ||
3165 | 1122 | # '"ops": [{"replicas": 3, "name": "glance", ' | ||
3166 | 1123 | # '"op": "create-pool"}]}'), | ||
3167 | 1124 | # 'private-address': '10.5.44.109', | ||
3168 | 1125 | # }, | ||
3169 | 1126 | # } | ||
3170 | 1127 | |||
3171 | 1128 | def get_previous_request(rid): | ||
3172 | 1129 | """Return the last ceph broker request sent on a given relation | ||
3173 | 1130 | |||
3174 | 1131 | @param rid: Relation id to query for request | ||
3175 | 1132 | """ | ||
3176 | 1133 | request = None | ||
3177 | 1134 | broker_req = relation_get(attribute='broker_req', rid=rid, | ||
3178 | 1135 | unit=local_unit()) | ||
3179 | 1136 | if broker_req: | ||
3180 | 1137 | request_data = json.loads(broker_req) | ||
3181 | 1138 | request = CephBrokerRq(api_version=request_data['api-version'], | ||
3182 | 1139 | request_id=request_data['request-id']) | ||
3183 | 1140 | request.set_ops(request_data['ops']) | ||
3184 | 1141 | |||
3185 | 1142 | return request | ||
3186 | 1143 | |||
3187 | 1144 | |||
3188 | 1145 | def get_request_states(request, relation='ceph'): | ||
3189 | 1146 | """Return a dict of requests per relation id with their corresponding | ||
3190 | 1147 | completion state. | ||
3191 | 1148 | |||
3192 | 1149 | This allows a charm, which has a request for ceph, to see whether there is | ||
3193 | 1150 | an equivalent request already being processed and if so what state that | ||
3194 | 1151 | request is in. | ||
3195 | 1152 | |||
3196 | 1153 | @param request: A CephBrokerRq object | ||
3197 | 1154 | """ | ||
3198 | 1155 | complete = [] | ||
3199 | 1156 | requests = {} | ||
3200 | 1157 | for rid in relation_ids(relation): | ||
3201 | 1158 | complete = False | ||
3202 | 1159 | previous_request = get_previous_request(rid) | ||
3203 | 1160 | if request == previous_request: | ||
3204 | 1161 | sent = True | ||
3205 | 1162 | complete = is_request_complete_for_rid(previous_request, rid) | ||
3206 | 1163 | else: | ||
3207 | 1164 | sent = False | ||
3208 | 1165 | complete = False | ||
3209 | 1166 | |||
3210 | 1167 | requests[rid] = { | ||
3211 | 1168 | 'sent': sent, | ||
3212 | 1169 | 'complete': complete, | ||
3213 | 1170 | } | ||
3214 | 1171 | |||
3215 | 1172 | return requests | ||
3216 | 1173 | |||
3217 | 1174 | |||
3218 | 1175 | def is_request_sent(request, relation='ceph'): | ||
3219 | 1176 | """Check to see if a functionally equivalent request has already been sent | ||
3220 | 1177 | |||
3221 | 1178 | Returns True if a similair request has been sent | ||
3222 | 1179 | |||
3223 | 1180 | @param request: A CephBrokerRq object | ||
3224 | 1181 | """ | ||
3225 | 1182 | states = get_request_states(request, relation=relation) | ||
3226 | 1183 | for rid in states.keys(): | ||
3227 | 1184 | if not states[rid]['sent']: | ||
3228 | 1185 | return False | ||
3229 | 1186 | |||
3230 | 1187 | return True | ||
3231 | 1188 | |||
3232 | 1189 | |||
3233 | 1190 | def is_request_complete(request, relation='ceph'): | ||
3234 | 1191 | """Check to see if a functionally equivalent request has already been | ||
3235 | 1192 | completed | ||
3236 | 1193 | |||
3237 | 1194 | Returns True if a similair request has been completed | ||
3238 | 1195 | |||
3239 | 1196 | @param request: A CephBrokerRq object | ||
3240 | 1197 | """ | ||
3241 | 1198 | states = get_request_states(request, relation=relation) | ||
3242 | 1199 | for rid in states.keys(): | ||
3243 | 1200 | if not states[rid]['complete']: | ||
3244 | 1201 | return False | ||
3245 | 1202 | |||
3246 | 1203 | return True | ||
3247 | 1204 | |||
3248 | 1205 | |||
3249 | 1206 | def is_request_complete_for_rid(request, rid): | ||
3250 | 1207 | """Check if a given request has been completed on the given relation | ||
3251 | 1208 | |||
3252 | 1209 | @param request: A CephBrokerRq object | ||
3253 | 1210 | @param rid: Relation ID | ||
3254 | 1211 | """ | ||
3255 | 1212 | broker_key = get_broker_rsp_key() | ||
3256 | 1213 | for unit in related_units(rid): | ||
3257 | 1214 | rdata = relation_get(rid=rid, unit=unit) | ||
3258 | 1215 | if rdata.get(broker_key): | ||
3259 | 1216 | rsp = CephBrokerRsp(rdata.get(broker_key)) | ||
3260 | 1217 | if rsp.request_id == request.request_id: | ||
3261 | 1218 | if not rsp.exit_code: | ||
3262 | 1219 | return True | ||
3263 | 1220 | else: | ||
3264 | 1221 | # The remote unit sent no reply targeted at this unit so either the | ||
3265 | 1222 | # remote ceph cluster does not support unit targeted replies or it | ||
3266 | 1223 | # has not processed our request yet. | ||
3267 | 1224 | if rdata.get('broker_rsp'): | ||
3268 | 1225 | request_data = json.loads(rdata['broker_rsp']) | ||
3269 | 1226 | if request_data.get('request-id'): | ||
3270 | 1227 | log('Ignoring legacy broker_rsp without unit key as remote ' | ||
3271 | 1228 | 'service supports unit specific replies', level=DEBUG) | ||
3272 | 1229 | else: | ||
3273 | 1230 | log('Using legacy broker_rsp as remote service does not ' | ||
3274 | 1231 | 'supports unit specific replies', level=DEBUG) | ||
3275 | 1232 | rsp = CephBrokerRsp(rdata['broker_rsp']) | ||
3276 | 1233 | if not rsp.exit_code: | ||
3277 | 1234 | return True | ||
3278 | 1235 | |||
3279 | 1236 | return False | ||
3280 | 1237 | |||
3281 | 1238 | |||
3282 | 1239 | def get_broker_rsp_key(): | ||
3283 | 1240 | """Return broker response key for this unit | ||
3284 | 1241 | |||
3285 | 1242 | This is the key that ceph is going to use to pass request status | ||
3286 | 1243 | information back to this unit | ||
3287 | 1244 | """ | ||
3288 | 1245 | return 'broker-rsp-' + local_unit().replace('/', '-') | ||
3289 | 1246 | |||
3290 | 1247 | |||
3291 | 1248 | def send_request_if_needed(request, relation='ceph'): | ||
3292 | 1249 | """Send broker request if an equivalent request has not already been sent | ||
3293 | 1250 | |||
3294 | 1251 | @param request: A CephBrokerRq object | ||
3295 | 1252 | """ | ||
3296 | 1253 | if is_request_sent(request, relation=relation): | ||
3297 | 1254 | log('Request already sent but not complete, not sending new request', | ||
3298 | 1255 | level=DEBUG) | ||
3299 | 1256 | else: | ||
3300 | 1257 | for rid in relation_ids(relation): | ||
3301 | 1258 | log('Sending request {}'.format(request.request_id), level=DEBUG) | ||
3302 | 1259 | relation_set(relation_id=rid, broker_req=request.request) | ||
3303 | 1260 | >>>>>>> MERGE-SOURCE | ||
3304 | 658 | 1261 | ||
3305 | === modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py' | |||
3306 | --- hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-26 09:47:37 +0000 | |||
3307 | +++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2016-01-06 21:19:13 +0000 | |||
3308 | @@ -76,3 +76,13 @@ | |||
3309 | 76 | check_call(cmd) | 76 | check_call(cmd) |
3310 | 77 | 77 | ||
3311 | 78 | return create_loopback(path) | 78 | return create_loopback(path) |
3312 | 79 | |||
3313 | 80 | |||
3314 | 81 | def is_mapped_loopback_device(device): | ||
3315 | 82 | """ | ||
3316 | 83 | Checks if a given device name is an existing/mapped loopback device. | ||
3317 | 84 | :param device: str: Full path to the device (eg, /dev/loop1). | ||
3318 | 85 | :returns: str: Path to the backing file if is a loopback device | ||
3319 | 86 | empty string otherwise | ||
3320 | 87 | """ | ||
3321 | 88 | return loopback_devices().get(device, "") | ||
3322 | 79 | 89 | ||
3323 | === added file 'hooks/charmhelpers/core/files.py' | |||
3324 | --- hooks/charmhelpers/core/files.py 1970-01-01 00:00:00 +0000 | |||
3325 | +++ hooks/charmhelpers/core/files.py 2016-01-06 21:19:13 +0000 | |||
3326 | @@ -0,0 +1,45 @@ | |||
3327 | 1 | #!/usr/bin/env python | ||
3328 | 2 | # -*- coding: utf-8 -*- | ||
3329 | 3 | |||
3330 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
3331 | 5 | # | ||
3332 | 6 | # This file is part of charm-helpers. | ||
3333 | 7 | # | ||
3334 | 8 | # charm-helpers is free software: you can redistribute it and/or modify | ||
3335 | 9 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
3336 | 10 | # published by the Free Software Foundation. | ||
3337 | 11 | # | ||
3338 | 12 | # charm-helpers is distributed in the hope that it will be useful, | ||
3339 | 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
3340 | 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
3341 | 15 | # GNU Lesser General Public License for more details. | ||
3342 | 16 | # | ||
3343 | 17 | # You should have received a copy of the GNU Lesser General Public License | ||
3344 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
3345 | 19 | |||
3346 | 20 | __author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>' | ||
3347 | 21 | |||
3348 | 22 | import os | ||
3349 | 23 | import subprocess | ||
3350 | 24 | |||
3351 | 25 | |||
3352 | 26 | def sed(filename, before, after, flags='g'): | ||
3353 | 27 | """ | ||
3354 | 28 | Search and replaces the given pattern on filename. | ||
3355 | 29 | |||
3356 | 30 | :param filename: relative or absolute file path. | ||
3357 | 31 | :param before: expression to be replaced (see 'man sed') | ||
3358 | 32 | :param after: expression to replace with (see 'man sed') | ||
3359 | 33 | :param flags: sed-compatible regex flags in example, to make | ||
3360 | 34 | the search and replace case insensitive, specify ``flags="i"``. | ||
3361 | 35 | The ``g`` flag is always specified regardless, so you do not | ||
3362 | 36 | need to remember to include it when overriding this parameter. | ||
3363 | 37 | :returns: If the sed command exit code was zero then return, | ||
3364 | 38 | otherwise raise CalledProcessError. | ||
3365 | 39 | """ | ||
3366 | 40 | expression = r's/{0}/{1}/{2}'.format(before, | ||
3367 | 41 | after, flags) | ||
3368 | 42 | |||
3369 | 43 | return subprocess.check_call(["sed", "-i", "-r", "-e", | ||
3370 | 44 | expression, | ||
3371 | 45 | os.path.expanduser(filename)]) | ||
3372 | 0 | 46 | ||
3373 | === renamed file 'hooks/charmhelpers/core/files.py' => 'hooks/charmhelpers/core/files.py.moved' | |||
3374 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
3375 | --- hooks/charmhelpers/core/hookenv.py 2015-10-22 13:19:13 +0000 | |||
3376 | +++ hooks/charmhelpers/core/hookenv.py 2016-01-06 21:19:13 +0000 | |||
3377 | @@ -491,6 +491,7 @@ | |||
3378 | 491 | 491 | ||
3379 | 492 | 492 | ||
3380 | 493 | @cached | 493 | @cached |
3381 | 494 | <<<<<<< TREE | ||
3382 | 494 | def relation_to_interface(relation_name): | 495 | def relation_to_interface(relation_name): |
3383 | 495 | """ | 496 | """ |
3384 | 496 | Given the name of a relation, return the interface that relation uses. | 497 | Given the name of a relation, return the interface that relation uses. |
3385 | @@ -548,6 +549,78 @@ | |||
3386 | 548 | 549 | ||
3387 | 549 | 550 | ||
3388 | 550 | @cached | 551 | @cached |
3389 | 552 | ======= | ||
3390 | 553 | def peer_relation_id(): | ||
3391 | 554 | '''Get the peers relation id if a peers relation has been joined, else None.''' | ||
3392 | 555 | md = metadata() | ||
3393 | 556 | section = md.get('peers') | ||
3394 | 557 | if section: | ||
3395 | 558 | for key in section: | ||
3396 | 559 | relids = relation_ids(key) | ||
3397 | 560 | if relids: | ||
3398 | 561 | return relids[0] | ||
3399 | 562 | return None | ||
3400 | 563 | |||
3401 | 564 | |||
3402 | 565 | @cached | ||
3403 | 566 | def relation_to_interface(relation_name): | ||
3404 | 567 | """ | ||
3405 | 568 | Given the name of a relation, return the interface that relation uses. | ||
3406 | 569 | |||
3407 | 570 | :returns: The interface name, or ``None``. | ||
3408 | 571 | """ | ||
3409 | 572 | return relation_to_role_and_interface(relation_name)[1] | ||
3410 | 573 | |||
3411 | 574 | |||
3412 | 575 | @cached | ||
3413 | 576 | def relation_to_role_and_interface(relation_name): | ||
3414 | 577 | """ | ||
3415 | 578 | Given the name of a relation, return the role and the name of the interface | ||
3416 | 579 | that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). | ||
3417 | 580 | |||
3418 | 581 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | ||
3419 | 582 | """ | ||
3420 | 583 | _metadata = metadata() | ||
3421 | 584 | for role in ('provides', 'requires', 'peers'): | ||
3422 | 585 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | ||
3423 | 586 | if interface: | ||
3424 | 587 | return role, interface | ||
3425 | 588 | return None, None | ||
3426 | 589 | |||
3427 | 590 | |||
3428 | 591 | @cached | ||
3429 | 592 | def role_and_interface_to_relations(role, interface_name): | ||
3430 | 593 | """ | ||
3431 | 594 | Given a role and interface name, return a list of relation names for the | ||
3432 | 595 | current charm that use that interface under that role (where role is one | ||
3433 | 596 | of ``provides``, ``requires``, or ``peers``). | ||
3434 | 597 | |||
3435 | 598 | :returns: A list of relation names. | ||
3436 | 599 | """ | ||
3437 | 600 | _metadata = metadata() | ||
3438 | 601 | results = [] | ||
3439 | 602 | for relation_name, relation in _metadata.get(role, {}).items(): | ||
3440 | 603 | if relation['interface'] == interface_name: | ||
3441 | 604 | results.append(relation_name) | ||
3442 | 605 | return results | ||
3443 | 606 | |||
3444 | 607 | |||
3445 | 608 | @cached | ||
3446 | 609 | def interface_to_relations(interface_name): | ||
3447 | 610 | """ | ||
3448 | 611 | Given an interface, return a list of relation names for the current | ||
3449 | 612 | charm that use that interface. | ||
3450 | 613 | |||
3451 | 614 | :returns: A list of relation names. | ||
3452 | 615 | """ | ||
3453 | 616 | results = [] | ||
3454 | 617 | for role in ('provides', 'requires', 'peers'): | ||
3455 | 618 | results.extend(role_and_interface_to_relations(role, interface_name)) | ||
3456 | 619 | return results | ||
3457 | 620 | |||
3458 | 621 | |||
3459 | 622 | @cached | ||
3460 | 623 | >>>>>>> MERGE-SOURCE | ||
3461 | 551 | def charm_name(): | 624 | def charm_name(): |
3462 | 552 | """Get the name of the current charm as is specified on metadata.yaml""" | 625 | """Get the name of the current charm as is specified on metadata.yaml""" |
3463 | 553 | return metadata().get('name') | 626 | return metadata().get('name') |
3464 | @@ -623,6 +696,7 @@ | |||
3465 | 623 | return unit_get('private-address') | 696 | return unit_get('private-address') |
3466 | 624 | 697 | ||
3467 | 625 | 698 | ||
3468 | 699 | <<<<<<< TREE | ||
3469 | 626 | @cached | 700 | @cached |
3470 | 627 | def storage_get(attribute="", storage_id=""): | 701 | def storage_get(attribute="", storage_id=""): |
3471 | 628 | """Get storage attributes""" | 702 | """Get storage attributes""" |
3472 | @@ -655,6 +729,40 @@ | |||
3473 | 655 | raise | 729 | raise |
3474 | 656 | 730 | ||
3475 | 657 | 731 | ||
3476 | 732 | ======= | ||
3477 | 733 | @cached | ||
3478 | 734 | def storage_get(attribute=None, storage_id=None): | ||
3479 | 735 | """Get storage attributes""" | ||
3480 | 736 | _args = ['storage-get', '--format=json'] | ||
3481 | 737 | if storage_id: | ||
3482 | 738 | _args.extend(('-s', storage_id)) | ||
3483 | 739 | if attribute: | ||
3484 | 740 | _args.append(attribute) | ||
3485 | 741 | try: | ||
3486 | 742 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
3487 | 743 | except ValueError: | ||
3488 | 744 | return None | ||
3489 | 745 | |||
3490 | 746 | |||
3491 | 747 | @cached | ||
3492 | 748 | def storage_list(storage_name=None): | ||
3493 | 749 | """List the storage IDs for the unit""" | ||
3494 | 750 | _args = ['storage-list', '--format=json'] | ||
3495 | 751 | if storage_name: | ||
3496 | 752 | _args.append(storage_name) | ||
3497 | 753 | try: | ||
3498 | 754 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
3499 | 755 | except ValueError: | ||
3500 | 756 | return None | ||
3501 | 757 | except OSError as e: | ||
3502 | 758 | import errno | ||
3503 | 759 | if e.errno == errno.ENOENT: | ||
3504 | 760 | # storage-list does not exist | ||
3505 | 761 | return [] | ||
3506 | 762 | raise | ||
3507 | 763 | |||
3508 | 764 | |||
3509 | 765 | >>>>>>> MERGE-SOURCE | ||
3510 | 658 | class UnregisteredHookError(Exception): | 766 | class UnregisteredHookError(Exception): |
3511 | 659 | """Raised when an undefined hook is called""" | 767 | """Raised when an undefined hook is called""" |
3512 | 660 | pass | 768 | pass |
3513 | @@ -753,178 +861,391 @@ | |||
3514 | 753 | 861 | ||
3515 | 754 | The results set by action_set are preserved.""" | 862 | The results set by action_set are preserved.""" |
3516 | 755 | subprocess.check_call(['action-fail', message]) | 863 | subprocess.check_call(['action-fail', message]) |
3692 | 756 | 864 | <<<<<<< TREE | |
3693 | 757 | 865 | ||
3694 | 758 | def action_name(): | 866 | |
3695 | 759 | """Get the name of the currently executing action.""" | 867 | def action_name(): |
3696 | 760 | return os.environ.get('JUJU_ACTION_NAME') | 868 | """Get the name of the currently executing action.""" |
3697 | 761 | 869 | return os.environ.get('JUJU_ACTION_NAME') | |
3698 | 762 | 870 | ||
3699 | 763 | def action_uuid(): | 871 | |
3700 | 764 | """Get the UUID of the currently executing action.""" | 872 | def action_uuid(): |
3701 | 765 | return os.environ.get('JUJU_ACTION_UUID') | 873 | """Get the UUID of the currently executing action.""" |
3702 | 766 | 874 | return os.environ.get('JUJU_ACTION_UUID') | |
3703 | 767 | 875 | ||
3704 | 768 | def action_tag(): | 876 | |
3705 | 769 | """Get the tag for the currently executing action.""" | 877 | def action_tag(): |
3706 | 770 | return os.environ.get('JUJU_ACTION_TAG') | 878 | """Get the tag for the currently executing action.""" |
3707 | 771 | 879 | return os.environ.get('JUJU_ACTION_TAG') | |
3708 | 772 | 880 | ||
3709 | 773 | def status_set(workload_state, message): | 881 | |
3710 | 774 | """Set the workload state with a message | 882 | def status_set(workload_state, message): |
3711 | 775 | 883 | """Set the workload state with a message | |
3712 | 776 | Use status-set to set the workload state with a message which is visible | 884 | |
3713 | 777 | to the user via juju status. If the status-set command is not found then | 885 | Use status-set to set the workload state with a message which is visible |
3714 | 778 | assume this is juju < 1.23 and juju-log the message unstead. | 886 | to the user via juju status. If the status-set command is not found then |
3715 | 779 | 887 | assume this is juju < 1.23 and juju-log the message unstead. | |
3716 | 780 | workload_state -- valid juju workload state. | 888 | |
3717 | 781 | message -- status update message | 889 | workload_state -- valid juju workload state. |
3718 | 782 | """ | 890 | message -- status update message |
3719 | 783 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] | 891 | """ |
3720 | 784 | if workload_state not in valid_states: | 892 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] |
3721 | 785 | raise ValueError( | 893 | if workload_state not in valid_states: |
3722 | 786 | '{!r} is not a valid workload state'.format(workload_state) | 894 | raise ValueError( |
3723 | 787 | ) | 895 | '{!r} is not a valid workload state'.format(workload_state) |
3724 | 788 | cmd = ['status-set', workload_state, message] | 896 | ) |
3725 | 789 | try: | 897 | cmd = ['status-set', workload_state, message] |
3726 | 790 | ret = subprocess.call(cmd) | 898 | try: |
3727 | 791 | if ret == 0: | 899 | ret = subprocess.call(cmd) |
3728 | 792 | return | 900 | if ret == 0: |
3729 | 793 | except OSError as e: | 901 | return |
3730 | 794 | if e.errno != errno.ENOENT: | 902 | except OSError as e: |
3731 | 795 | raise | 903 | if e.errno != errno.ENOENT: |
3732 | 796 | log_message = 'status-set failed: {} {}'.format(workload_state, | 904 | raise |
3733 | 797 | message) | 905 | log_message = 'status-set failed: {} {}'.format(workload_state, |
3734 | 798 | log(log_message, level='INFO') | 906 | message) |
3735 | 799 | 907 | log(log_message, level='INFO') | |
3736 | 800 | 908 | ||
3737 | 801 | def status_get(): | 909 | |
3738 | 802 | """Retrieve the previously set juju workload state and message | 910 | def status_get(): |
3739 | 803 | 911 | """Retrieve the previously set juju workload state and message | |
3740 | 804 | If the status-get command is not found then assume this is juju < 1.23 and | 912 | |
3741 | 805 | return 'unknown', "" | 913 | If the status-get command is not found then assume this is juju < 1.23 and |
3742 | 806 | 914 | return 'unknown', "" | |
3743 | 807 | """ | 915 | |
3744 | 808 | cmd = ['status-get', "--format=json", "--include-data"] | 916 | """ |
3745 | 809 | try: | 917 | cmd = ['status-get', "--format=json", "--include-data"] |
3746 | 810 | raw_status = subprocess.check_output(cmd) | 918 | try: |
3747 | 811 | except OSError as e: | 919 | raw_status = subprocess.check_output(cmd) |
3748 | 812 | if e.errno == errno.ENOENT: | 920 | except OSError as e: |
3749 | 813 | return ('unknown', "") | 921 | if e.errno == errno.ENOENT: |
3750 | 814 | else: | 922 | return ('unknown', "") |
3751 | 815 | raise | 923 | else: |
3752 | 816 | else: | 924 | raise |
3753 | 817 | status = json.loads(raw_status.decode("UTF-8")) | 925 | else: |
3754 | 818 | return (status["status"], status["message"]) | 926 | status = json.loads(raw_status.decode("UTF-8")) |
3755 | 819 | 927 | return (status["status"], status["message"]) | |
3756 | 820 | 928 | ||
3757 | 821 | def translate_exc(from_exc, to_exc): | 929 | |
3758 | 822 | def inner_translate_exc1(f): | 930 | def translate_exc(from_exc, to_exc): |
3759 | 823 | def inner_translate_exc2(*args, **kwargs): | 931 | def inner_translate_exc1(f): |
3760 | 824 | try: | 932 | def inner_translate_exc2(*args, **kwargs): |
3761 | 825 | return f(*args, **kwargs) | 933 | try: |
3762 | 826 | except from_exc: | 934 | return f(*args, **kwargs) |
3763 | 827 | raise to_exc | 935 | except from_exc: |
3764 | 828 | 936 | raise to_exc | |
3765 | 829 | return inner_translate_exc2 | 937 | |
3766 | 830 | 938 | return inner_translate_exc2 | |
3767 | 831 | return inner_translate_exc1 | 939 | |
3768 | 832 | 940 | return inner_translate_exc1 | |
3769 | 833 | 941 | ||
3770 | 834 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | 942 | |
3771 | 835 | def is_leader(): | 943 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3772 | 836 | """Does the current unit hold the juju leadership | 944 | def is_leader(): |
3773 | 837 | 945 | """Does the current unit hold the juju leadership | |
3774 | 838 | Uses juju to determine whether the current unit is the leader of its peers | 946 | |
3775 | 839 | """ | 947 | Uses juju to determine whether the current unit is the leader of its peers |
3776 | 840 | cmd = ['is-leader', '--format=json'] | 948 | """ |
3777 | 841 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | 949 | cmd = ['is-leader', '--format=json'] |
3778 | 842 | 950 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | |
3779 | 843 | 951 | ||
3780 | 844 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | 952 | |
3781 | 845 | def leader_get(attribute=None): | 953 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3782 | 846 | """Juju leader get value(s)""" | 954 | def leader_get(attribute=None): |
3783 | 847 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] | 955 | """Juju leader get value(s)""" |
3784 | 848 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | 956 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] |
3785 | 849 | 957 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | |
3786 | 850 | 958 | ||
3787 | 851 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | 959 | |
3788 | 852 | def leader_set(settings=None, **kwargs): | 960 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3789 | 853 | """Juju leader set value(s)""" | 961 | def leader_set(settings=None, **kwargs): |
3790 | 854 | # Don't log secrets. | 962 | """Juju leader set value(s)""" |
3791 | 855 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | 963 | # Don't log secrets. |
3792 | 856 | cmd = ['leader-set'] | 964 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
3793 | 857 | settings = settings or {} | 965 | cmd = ['leader-set'] |
3794 | 858 | settings.update(kwargs) | 966 | settings = settings or {} |
3795 | 859 | for k, v in settings.items(): | 967 | settings.update(kwargs) |
3796 | 860 | if v is None: | 968 | for k, v in settings.items(): |
3797 | 861 | cmd.append('{}='.format(k)) | 969 | if v is None: |
3798 | 862 | else: | 970 | cmd.append('{}='.format(k)) |
3799 | 863 | cmd.append('{}={}'.format(k, v)) | 971 | else: |
3800 | 864 | subprocess.check_call(cmd) | 972 | cmd.append('{}={}'.format(k, v)) |
3801 | 865 | 973 | subprocess.check_call(cmd) | |
3802 | 866 | 974 | ||
3803 | 867 | @cached | 975 | |
3804 | 868 | def juju_version(): | 976 | @cached |
3805 | 869 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | 977 | def juju_version(): |
3806 | 870 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | 978 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
3807 | 871 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | 979 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
3808 | 872 | return subprocess.check_output([jujud, 'version'], | 980 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
3809 | 873 | universal_newlines=True).strip() | 981 | return subprocess.check_output([jujud, 'version'], |
3810 | 874 | 982 | universal_newlines=True).strip() | |
3811 | 875 | 983 | ||
3812 | 876 | @cached | 984 | |
3813 | 877 | def has_juju_version(minimum_version): | 985 | @cached |
3814 | 878 | """Return True if the Juju version is at least the provided version""" | 986 | def has_juju_version(minimum_version): |
3815 | 879 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | 987 | """Return True if the Juju version is at least the provided version""" |
3816 | 880 | 988 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | |
3817 | 881 | 989 | ||
3818 | 882 | _atexit = [] | 990 | |
3819 | 883 | _atstart = [] | 991 | _atexit = [] |
3820 | 884 | 992 | _atstart = [] | |
3821 | 885 | 993 | ||
3822 | 886 | def atstart(callback, *args, **kwargs): | 994 | |
3823 | 887 | '''Schedule a callback to run before the main hook. | 995 | def atstart(callback, *args, **kwargs): |
3824 | 888 | 996 | '''Schedule a callback to run before the main hook. | |
3825 | 889 | Callbacks are run in the order they were added. | 997 | |
3826 | 890 | 998 | Callbacks are run in the order they were added. | |
3827 | 891 | This is useful for modules and classes to perform initialization | 999 | |
3828 | 892 | and inject behavior. In particular: | 1000 | This is useful for modules and classes to perform initialization |
3829 | 893 | 1001 | and inject behavior. In particular: | |
3830 | 894 | - Run common code before all of your hooks, such as logging | 1002 | |
3831 | 895 | the hook name or interesting relation data. | 1003 | - Run common code before all of your hooks, such as logging |
3832 | 896 | - Defer object or module initialization that requires a hook | 1004 | the hook name or interesting relation data. |
3833 | 897 | context until we know there actually is a hook context, | 1005 | - Defer object or module initialization that requires a hook |
3834 | 898 | making testing easier. | 1006 | context until we know there actually is a hook context, |
3835 | 899 | - Rather than requiring charm authors to include boilerplate to | 1007 | making testing easier. |
3836 | 900 | invoke your helper's behavior, have it run automatically if | 1008 | - Rather than requiring charm authors to include boilerplate to |
3837 | 901 | your object is instantiated or module imported. | 1009 | invoke your helper's behavior, have it run automatically if |
3838 | 902 | 1010 | your object is instantiated or module imported. | |
3839 | 903 | This is not at all useful after your hook framework as been launched. | 1011 | |
3840 | 904 | ''' | 1012 | This is not at all useful after your hook framework as been launched. |
3841 | 905 | global _atstart | 1013 | ''' |
3842 | 906 | _atstart.append((callback, args, kwargs)) | 1014 | global _atstart |
3843 | 907 | 1015 | _atstart.append((callback, args, kwargs)) | |
3844 | 908 | 1016 | ||
3845 | 909 | def atexit(callback, *args, **kwargs): | 1017 | |
3846 | 910 | '''Schedule a callback to run on successful hook completion. | 1018 | def atexit(callback, *args, **kwargs): |
3847 | 911 | 1019 | '''Schedule a callback to run on successful hook completion. | |
3848 | 912 | Callbacks are run in the reverse order that they were added.''' | 1020 | |
3849 | 913 | _atexit.append((callback, args, kwargs)) | 1021 | Callbacks are run in the reverse order that they were added.''' |
3850 | 914 | 1022 | _atexit.append((callback, args, kwargs)) | |
3851 | 915 | 1023 | ||
3852 | 916 | def _run_atstart(): | 1024 | |
3853 | 917 | '''Hook frameworks must invoke this before running the main hook body.''' | 1025 | def _run_atstart(): |
3854 | 918 | global _atstart | 1026 | '''Hook frameworks must invoke this before running the main hook body.''' |
3855 | 919 | for callback, args, kwargs in _atstart: | 1027 | global _atstart |
3856 | 920 | callback(*args, **kwargs) | 1028 | for callback, args, kwargs in _atstart: |
3857 | 921 | del _atstart[:] | 1029 | callback(*args, **kwargs) |
3858 | 922 | 1030 | del _atstart[:] | |
3859 | 923 | 1031 | ||
3860 | 924 | def _run_atexit(): | 1032 | |
3861 | 925 | '''Hook frameworks must invoke this after the main hook body has | 1033 | def _run_atexit(): |
3862 | 926 | successfully completed. Do not invoke it if the hook fails.''' | 1034 | '''Hook frameworks must invoke this after the main hook body has |
3863 | 927 | global _atexit | 1035 | successfully completed. Do not invoke it if the hook fails.''' |
3864 | 928 | for callback, args, kwargs in reversed(_atexit): | 1036 | global _atexit |
3865 | 929 | callback(*args, **kwargs) | 1037 | for callback, args, kwargs in reversed(_atexit): |
3866 | 930 | del _atexit[:] | 1038 | callback(*args, **kwargs) |
3867 | 1039 | del _atexit[:] | ||
3868 | 1040 | ======= | ||
3869 | 1041 | |||
3870 | 1042 | |||
3871 | 1043 | def action_name(): | ||
3872 | 1044 | """Get the name of the currently executing action.""" | ||
3873 | 1045 | return os.environ.get('JUJU_ACTION_NAME') | ||
3874 | 1046 | |||
3875 | 1047 | |||
3876 | 1048 | def action_uuid(): | ||
3877 | 1049 | """Get the UUID of the currently executing action.""" | ||
3878 | 1050 | return os.environ.get('JUJU_ACTION_UUID') | ||
3879 | 1051 | |||
3880 | 1052 | |||
3881 | 1053 | def action_tag(): | ||
3882 | 1054 | """Get the tag for the currently executing action.""" | ||
3883 | 1055 | return os.environ.get('JUJU_ACTION_TAG') | ||
3884 | 1056 | |||
3885 | 1057 | |||
3886 | 1058 | def status_set(workload_state, message): | ||
3887 | 1059 | """Set the workload state with a message | ||
3888 | 1060 | |||
3889 | 1061 | Use status-set to set the workload state with a message which is visible | ||
3890 | 1062 | to the user via juju status. If the status-set command is not found then | ||
3891 | 1063 | assume this is juju < 1.23 and juju-log the message unstead. | ||
3892 | 1064 | |||
3893 | 1065 | workload_state -- valid juju workload state. | ||
3894 | 1066 | message -- status update message | ||
3895 | 1067 | """ | ||
3896 | 1068 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] | ||
3897 | 1069 | if workload_state not in valid_states: | ||
3898 | 1070 | raise ValueError( | ||
3899 | 1071 | '{!r} is not a valid workload state'.format(workload_state) | ||
3900 | 1072 | ) | ||
3901 | 1073 | cmd = ['status-set', workload_state, message] | ||
3902 | 1074 | try: | ||
3903 | 1075 | ret = subprocess.call(cmd) | ||
3904 | 1076 | if ret == 0: | ||
3905 | 1077 | return | ||
3906 | 1078 | except OSError as e: | ||
3907 | 1079 | if e.errno != errno.ENOENT: | ||
3908 | 1080 | raise | ||
3909 | 1081 | log_message = 'status-set failed: {} {}'.format(workload_state, | ||
3910 | 1082 | message) | ||
3911 | 1083 | log(log_message, level='INFO') | ||
3912 | 1084 | |||
3913 | 1085 | |||
3914 | 1086 | def status_get(): | ||
3915 | 1087 | """Retrieve the previously set juju workload state and message | ||
3916 | 1088 | |||
3917 | 1089 | If the status-get command is not found then assume this is juju < 1.23 and | ||
3918 | 1090 | return 'unknown', "" | ||
3919 | 1091 | |||
3920 | 1092 | """ | ||
3921 | 1093 | cmd = ['status-get', "--format=json", "--include-data"] | ||
3922 | 1094 | try: | ||
3923 | 1095 | raw_status = subprocess.check_output(cmd) | ||
3924 | 1096 | except OSError as e: | ||
3925 | 1097 | if e.errno == errno.ENOENT: | ||
3926 | 1098 | return ('unknown', "") | ||
3927 | 1099 | else: | ||
3928 | 1100 | raise | ||
3929 | 1101 | else: | ||
3930 | 1102 | status = json.loads(raw_status.decode("UTF-8")) | ||
3931 | 1103 | return (status["status"], status["message"]) | ||
3932 | 1104 | |||
3933 | 1105 | |||
3934 | 1106 | def translate_exc(from_exc, to_exc): | ||
3935 | 1107 | def inner_translate_exc1(f): | ||
3936 | 1108 | @wraps(f) | ||
3937 | 1109 | def inner_translate_exc2(*args, **kwargs): | ||
3938 | 1110 | try: | ||
3939 | 1111 | return f(*args, **kwargs) | ||
3940 | 1112 | except from_exc: | ||
3941 | 1113 | raise to_exc | ||
3942 | 1114 | |||
3943 | 1115 | return inner_translate_exc2 | ||
3944 | 1116 | |||
3945 | 1117 | return inner_translate_exc1 | ||
3946 | 1118 | |||
3947 | 1119 | |||
3948 | 1120 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3949 | 1121 | def is_leader(): | ||
3950 | 1122 | """Does the current unit hold the juju leadership | ||
3951 | 1123 | |||
3952 | 1124 | Uses juju to determine whether the current unit is the leader of its peers | ||
3953 | 1125 | """ | ||
3954 | 1126 | cmd = ['is-leader', '--format=json'] | ||
3955 | 1127 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
3956 | 1128 | |||
3957 | 1129 | |||
3958 | 1130 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3959 | 1131 | def leader_get(attribute=None): | ||
3960 | 1132 | """Juju leader get value(s)""" | ||
3961 | 1133 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] | ||
3962 | 1134 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
3963 | 1135 | |||
3964 | 1136 | |||
3965 | 1137 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3966 | 1138 | def leader_set(settings=None, **kwargs): | ||
3967 | 1139 | """Juju leader set value(s)""" | ||
3968 | 1140 | # Don't log secrets. | ||
3969 | 1141 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
3970 | 1142 | cmd = ['leader-set'] | ||
3971 | 1143 | settings = settings or {} | ||
3972 | 1144 | settings.update(kwargs) | ||
3973 | 1145 | for k, v in settings.items(): | ||
3974 | 1146 | if v is None: | ||
3975 | 1147 | cmd.append('{}='.format(k)) | ||
3976 | 1148 | else: | ||
3977 | 1149 | cmd.append('{}={}'.format(k, v)) | ||
3978 | 1150 | subprocess.check_call(cmd) | ||
3979 | 1151 | |||
3980 | 1152 | |||
3981 | 1153 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3982 | 1154 | def payload_register(ptype, klass, pid): | ||
3983 | 1155 | """ is used while a hook is running to let Juju know that a | ||
3984 | 1156 | payload has been started.""" | ||
3985 | 1157 | cmd = ['payload-register'] | ||
3986 | 1158 | for x in [ptype, klass, pid]: | ||
3987 | 1159 | cmd.append(x) | ||
3988 | 1160 | subprocess.check_call(cmd) | ||
3989 | 1161 | |||
3990 | 1162 | |||
3991 | 1163 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3992 | 1164 | def payload_unregister(klass, pid): | ||
3993 | 1165 | """ is used while a hook is running to let Juju know | ||
3994 | 1166 | that a payload has been manually stopped. The <class> and <id> provided | ||
3995 | 1167 | must match a payload that has been previously registered with juju using | ||
3996 | 1168 | payload-register.""" | ||
3997 | 1169 | cmd = ['payload-unregister'] | ||
3998 | 1170 | for x in [klass, pid]: | ||
3999 | 1171 | cmd.append(x) | ||
4000 | 1172 | subprocess.check_call(cmd) | ||
4001 | 1173 | |||
4002 | 1174 | |||
4003 | 1175 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
4004 | 1176 | def payload_status_set(klass, pid, status): | ||
4005 | 1177 | """is used to update the current status of a registered payload. | ||
4006 | 1178 | The <class> and <id> provided must match a payload that has been previously | ||
4007 | 1179 | registered with juju using payload-register. The <status> must be one of the | ||
4008 | 1180 | follow: starting, started, stopping, stopped""" | ||
4009 | 1181 | cmd = ['payload-status-set'] | ||
4010 | 1182 | for x in [klass, pid, status]: | ||
4011 | 1183 | cmd.append(x) | ||
4012 | 1184 | subprocess.check_call(cmd) | ||
4013 | 1185 | |||
4014 | 1186 | |||
4015 | 1187 | @cached | ||
4016 | 1188 | def juju_version(): | ||
4017 | 1189 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
4018 | 1190 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
4019 | 1191 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
4020 | 1192 | return subprocess.check_output([jujud, 'version'], | ||
4021 | 1193 | universal_newlines=True).strip() | ||
4022 | 1194 | |||
4023 | 1195 | |||
4024 | 1196 | @cached | ||
4025 | 1197 | def has_juju_version(minimum_version): | ||
4026 | 1198 | """Return True if the Juju version is at least the provided version""" | ||
4027 | 1199 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
4028 | 1200 | |||
4029 | 1201 | |||
4030 | 1202 | _atexit = [] | ||
4031 | 1203 | _atstart = [] | ||
4032 | 1204 | |||
4033 | 1205 | |||
4034 | 1206 | def atstart(callback, *args, **kwargs): | ||
4035 | 1207 | '''Schedule a callback to run before the main hook. | ||
4036 | 1208 | |||
4037 | 1209 | Callbacks are run in the order they were added. | ||
4038 | 1210 | |||
4039 | 1211 | This is useful for modules and classes to perform initialization | ||
4040 | 1212 | and inject behavior. In particular: | ||
4041 | 1213 | |||
4042 | 1214 | - Run common code before all of your hooks, such as logging | ||
4043 | 1215 | the hook name or interesting relation data. | ||
4044 | 1216 | - Defer object or module initialization that requires a hook | ||
4045 | 1217 | context until we know there actually is a hook context, | ||
4046 | 1218 | making testing easier. | ||
4047 | 1219 | - Rather than requiring charm authors to include boilerplate to | ||
4048 | 1220 | invoke your helper's behavior, have it run automatically if | ||
4049 | 1221 | your object is instantiated or module imported. | ||
4050 | 1222 | |||
4051 | 1223 | This is not at all useful after your hook framework as been launched. | ||
4052 | 1224 | ''' | ||
4053 | 1225 | global _atstart | ||
4054 | 1226 | _atstart.append((callback, args, kwargs)) | ||
4055 | 1227 | |||
4056 | 1228 | |||
4057 | 1229 | def atexit(callback, *args, **kwargs): | ||
4058 | 1230 | '''Schedule a callback to run on successful hook completion. | ||
4059 | 1231 | |||
4060 | 1232 | Callbacks are run in the reverse order that they were added.''' | ||
4061 | 1233 | _atexit.append((callback, args, kwargs)) | ||
4062 | 1234 | |||
4063 | 1235 | |||
4064 | 1236 | def _run_atstart(): | ||
4065 | 1237 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
4066 | 1238 | global _atstart | ||
4067 | 1239 | for callback, args, kwargs in _atstart: | ||
4068 | 1240 | callback(*args, **kwargs) | ||
4069 | 1241 | del _atstart[:] | ||
4070 | 1242 | |||
4071 | 1243 | |||
4072 | 1244 | def _run_atexit(): | ||
4073 | 1245 | '''Hook frameworks must invoke this after the main hook body has | ||
4074 | 1246 | successfully completed. Do not invoke it if the hook fails.''' | ||
4075 | 1247 | global _atexit | ||
4076 | 1248 | for callback, args, kwargs in reversed(_atexit): | ||
4077 | 1249 | callback(*args, **kwargs) | ||
4078 | 1250 | del _atexit[:] | ||
4079 | 1251 | >>>>>>> MERGE-SOURCE | ||
4080 | 931 | 1252 | ||
4081 | === modified file 'hooks/charmhelpers/core/host.py' | |||
4082 | --- hooks/charmhelpers/core/host.py 2015-10-22 13:19:13 +0000 | |||
4083 | +++ hooks/charmhelpers/core/host.py 2016-01-06 21:19:13 +0000 | |||
4084 | @@ -63,6 +63,7 @@ | |||
4085 | 63 | return service_result | 63 | return service_result |
4086 | 64 | 64 | ||
4087 | 65 | 65 | ||
4088 | 66 | <<<<<<< TREE | ||
4089 | 66 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): | 67 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): |
4090 | 67 | """Pause a system service. | 68 | """Pause a system service. |
4091 | 68 | 69 | ||
4092 | @@ -109,6 +110,58 @@ | |||
4093 | 109 | return started | 110 | return started |
4094 | 110 | 111 | ||
4095 | 111 | 112 | ||
4096 | 113 | ======= | ||
4097 | 114 | def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): | ||
4098 | 115 | """Pause a system service. | ||
4099 | 116 | |||
4100 | 117 | Stop it, and prevent it from starting again at boot.""" | ||
4101 | 118 | stopped = True | ||
4102 | 119 | if service_running(service_name): | ||
4103 | 120 | stopped = service_stop(service_name) | ||
4104 | 121 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
4105 | 122 | sysv_file = os.path.join(initd_dir, service_name) | ||
4106 | 123 | if os.path.exists(upstart_file): | ||
4107 | 124 | override_path = os.path.join( | ||
4108 | 125 | init_dir, '{}.override'.format(service_name)) | ||
4109 | 126 | with open(override_path, 'w') as fh: | ||
4110 | 127 | fh.write("manual\n") | ||
4111 | 128 | elif os.path.exists(sysv_file): | ||
4112 | 129 | subprocess.check_call(["update-rc.d", service_name, "disable"]) | ||
4113 | 130 | else: | ||
4114 | 131 | # XXX: Support SystemD too | ||
4115 | 132 | raise ValueError( | ||
4116 | 133 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | ||
4117 | 134 | service_name, upstart_file, sysv_file)) | ||
4118 | 135 | return stopped | ||
4119 | 136 | |||
4120 | 137 | |||
4121 | 138 | def service_resume(service_name, init_dir="/etc/init", | ||
4122 | 139 | initd_dir="/etc/init.d"): | ||
4123 | 140 | """Resume a system service. | ||
4124 | 141 | |||
4125 | 142 | Reenable starting again at boot. Start the service""" | ||
4126 | 143 | upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) | ||
4127 | 144 | sysv_file = os.path.join(initd_dir, service_name) | ||
4128 | 145 | if os.path.exists(upstart_file): | ||
4129 | 146 | override_path = os.path.join( | ||
4130 | 147 | init_dir, '{}.override'.format(service_name)) | ||
4131 | 148 | if os.path.exists(override_path): | ||
4132 | 149 | os.unlink(override_path) | ||
4133 | 150 | elif os.path.exists(sysv_file): | ||
4134 | 151 | subprocess.check_call(["update-rc.d", service_name, "enable"]) | ||
4135 | 152 | else: | ||
4136 | 153 | # XXX: Support SystemD too | ||
4137 | 154 | raise ValueError( | ||
4138 | 155 | "Unable to detect {0} as either Upstart {1} or SysV {2}".format( | ||
4139 | 156 | service_name, upstart_file, sysv_file)) | ||
4140 | 157 | |||
4141 | 158 | started = service_running(service_name) | ||
4142 | 159 | if not started: | ||
4143 | 160 | started = service_start(service_name) | ||
4144 | 161 | return started | ||
4145 | 162 | |||
4146 | 163 | |||
4147 | 164 | >>>>>>> MERGE-SOURCE | ||
4148 | 112 | def service(action, service_name): | 165 | def service(action, service_name): |
4149 | 113 | """Control a system service""" | 166 | """Control a system service""" |
4150 | 114 | cmd = ['service', service_name, action] | 167 | cmd = ['service', service_name, action] |
4151 | @@ -142,8 +195,22 @@ | |||
4152 | 142 | return True | 195 | return True |
4153 | 143 | 196 | ||
4154 | 144 | 197 | ||
4157 | 145 | def adduser(username, password=None, shell='/bin/bash', system_user=False): | 198 | def adduser(username, password=None, shell='/bin/bash', system_user=False, |
4158 | 146 | """Add a user to the system""" | 199 | primary_group=None, secondary_groups=None): |
4159 | 200 | """ | ||
4160 | 201 | Add a user to the system. | ||
4161 | 202 | |||
4162 | 203 | Will log but otherwise succeed if the user already exists. | ||
4163 | 204 | |||
4164 | 205 | :param str username: Username to create | ||
4165 | 206 | :param str password: Password for user; if ``None``, create a system user | ||
4166 | 207 | :param str shell: The default shell for the user | ||
4167 | 208 | :param bool system_user: Whether to create a login or system user | ||
4168 | 209 | :param str primary_group: Primary group for user; defaults to their username | ||
4169 | 210 | :param list secondary_groups: Optional list of additional groups | ||
4170 | 211 | |||
4171 | 212 | :returns: The password database entry struct, as returned by `pwd.getpwnam` | ||
4172 | 213 | """ | ||
4173 | 147 | try: | 214 | try: |
4174 | 148 | user_info = pwd.getpwnam(username) | 215 | user_info = pwd.getpwnam(username) |
4175 | 149 | log('user {0} already exists!'.format(username)) | 216 | log('user {0} already exists!'.format(username)) |
4176 | @@ -158,6 +225,16 @@ | |||
4177 | 158 | '--shell', shell, | 225 | '--shell', shell, |
4178 | 159 | '--password', password, | 226 | '--password', password, |
4179 | 160 | ]) | 227 | ]) |
4180 | 228 | if not primary_group: | ||
4181 | 229 | try: | ||
4182 | 230 | grp.getgrnam(username) | ||
4183 | 231 | primary_group = username # avoid "group exists" error | ||
4184 | 232 | except KeyError: | ||
4185 | 233 | pass | ||
4186 | 234 | if primary_group: | ||
4187 | 235 | cmd.extend(['-g', primary_group]) | ||
4188 | 236 | if secondary_groups: | ||
4189 | 237 | cmd.extend(['-G', ','.join(secondary_groups)]) | ||
4190 | 161 | cmd.append(username) | 238 | cmd.append(username) |
4191 | 162 | subprocess.check_call(cmd) | 239 | subprocess.check_call(cmd) |
4192 | 163 | user_info = pwd.getpwnam(username) | 240 | user_info = pwd.getpwnam(username) |
4193 | @@ -566,7 +643,14 @@ | |||
4194 | 566 | os.chdir(cur) | 643 | os.chdir(cur) |
4195 | 567 | 644 | ||
4196 | 568 | 645 | ||
4198 | 569 | def chownr(path, owner, group, follow_links=True): | 646 | def chownr(path, owner, group, follow_links=True, chowntopdir=False): |
4199 | 647 | """ | ||
4200 | 648 | Recursively change user and group ownership of files and directories | ||
4201 | 649 | in given path. Doesn't chown path itself by default, only its children. | ||
4202 | 650 | |||
4203 | 651 | :param bool follow_links: Also Chown links if True | ||
4204 | 652 | :param bool chowntopdir: Also chown path itself if True | ||
4205 | 653 | """ | ||
4206 | 570 | uid = pwd.getpwnam(owner).pw_uid | 654 | uid = pwd.getpwnam(owner).pw_uid |
4207 | 571 | gid = grp.getgrnam(group).gr_gid | 655 | gid = grp.getgrnam(group).gr_gid |
4208 | 572 | if follow_links: | 656 | if follow_links: |
4209 | @@ -574,6 +658,10 @@ | |||
4210 | 574 | else: | 658 | else: |
4211 | 575 | chown = os.lchown | 659 | chown = os.lchown |
4212 | 576 | 660 | ||
4213 | 661 | if chowntopdir: | ||
4214 | 662 | broken_symlink = os.path.lexists(path) and not os.path.exists(path) | ||
4215 | 663 | if not broken_symlink: | ||
4216 | 664 | chown(path, uid, gid) | ||
4217 | 577 | for root, dirs, files in os.walk(path): | 665 | for root, dirs, files in os.walk(path): |
4218 | 578 | for name in dirs + files: | 666 | for name in dirs + files: |
4219 | 579 | full = os.path.join(root, name) | 667 | full = os.path.join(root, name) |
4220 | @@ -584,3 +672,19 @@ | |||
4221 | 584 | 672 | ||
4222 | 585 | def lchownr(path, owner, group): | 673 | def lchownr(path, owner, group): |
4223 | 586 | chownr(path, owner, group, follow_links=False) | 674 | chownr(path, owner, group, follow_links=False) |
4224 | 675 | |||
4225 | 676 | |||
4226 | 677 | def get_total_ram(): | ||
4227 | 678 | '''The total amount of system RAM in bytes. | ||
4228 | 679 | |||
4229 | 680 | This is what is reported by the OS, and may be overcommitted when | ||
4230 | 681 | there are multiple containers hosted on the same machine. | ||
4231 | 682 | ''' | ||
4232 | 683 | with open('/proc/meminfo', 'r') as f: | ||
4233 | 684 | for line in f.readlines(): | ||
4234 | 685 | if line: | ||
4235 | 686 | key, value, unit = line.split() | ||
4236 | 687 | if key == 'MemTotal:': | ||
4237 | 688 | assert unit == 'kB', 'Unknown unit' | ||
4238 | 689 | return int(value) * 1024 # Classic, not KiB. | ||
4239 | 690 | raise NotImplementedError() | ||
4240 | 587 | 691 | ||
4241 | === added file 'hooks/charmhelpers/core/hugepage.py' | |||
4242 | --- hooks/charmhelpers/core/hugepage.py 1970-01-01 00:00:00 +0000 | |||
4243 | +++ hooks/charmhelpers/core/hugepage.py 2016-01-06 21:19:13 +0000 | |||
4244 | @@ -0,0 +1,71 @@ | |||
4245 | 1 | # -*- coding: utf-8 -*- | ||
4246 | 2 | |||
4247 | 3 | # Copyright 2014-2015 Canonical Limited. | ||
4248 | 4 | # | ||
4249 | 5 | # This file is part of charm-helpers. | ||
4250 | 6 | # | ||
4251 | 7 | # charm-helpers is free software: you can redistribute it and/or modify | ||
4252 | 8 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
4253 | 9 | # published by the Free Software Foundation. | ||
4254 | 10 | # | ||
4255 | 11 | # charm-helpers is distributed in the hope that it will be useful, | ||
4256 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4257 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4258 | 14 | # GNU Lesser General Public License for more details. | ||
4259 | 15 | # | ||
4260 | 16 | # You should have received a copy of the GNU Lesser General Public License | ||
4261 | 17 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
4262 | 18 | |||
4263 | 19 | import yaml | ||
4264 | 20 | from charmhelpers.core import fstab | ||
4265 | 21 | from charmhelpers.core import sysctl | ||
4266 | 22 | from charmhelpers.core.host import ( | ||
4267 | 23 | add_group, | ||
4268 | 24 | add_user_to_group, | ||
4269 | 25 | fstab_mount, | ||
4270 | 26 | mkdir, | ||
4271 | 27 | ) | ||
4272 | 28 | from charmhelpers.core.strutils import bytes_from_string | ||
4273 | 29 | from subprocess import check_output | ||
4274 | 30 | |||
4275 | 31 | |||
4276 | 32 | def hugepage_support(user, group='hugetlb', nr_hugepages=256, | ||
4277 | 33 | max_map_count=65536, mnt_point='/run/hugepages/kvm', | ||
4278 | 34 | pagesize='2MB', mount=True, set_shmmax=False): | ||
4279 | 35 | """Enable hugepages on system. | ||
4280 | 36 | |||
4281 | 37 | Args: | ||
4282 | 38 | user (str) -- Username to allow access to hugepages to | ||
4283 | 39 | group (str) -- Group name to own hugepages | ||
4284 | 40 | nr_hugepages (int) -- Number of pages to reserve | ||
4285 | 41 | max_map_count (int) -- Number of Virtual Memory Areas a process can own | ||
4286 | 42 | mnt_point (str) -- Directory to mount hugepages on | ||
4287 | 43 | pagesize (str) -- Size of hugepages | ||
4288 | 44 | mount (bool) -- Whether to Mount hugepages | ||
4289 | 45 | """ | ||
4290 | 46 | group_info = add_group(group) | ||
4291 | 47 | gid = group_info.gr_gid | ||
4292 | 48 | add_user_to_group(user, group) | ||
4293 | 49 | if max_map_count < 2 * nr_hugepages: | ||
4294 | 50 | max_map_count = 2 * nr_hugepages | ||
4295 | 51 | sysctl_settings = { | ||
4296 | 52 | 'vm.nr_hugepages': nr_hugepages, | ||
4297 | 53 | 'vm.max_map_count': max_map_count, | ||
4298 | 54 | 'vm.hugetlb_shm_group': gid, | ||
4299 | 55 | } | ||
4300 | 56 | if set_shmmax: | ||
4301 | 57 | shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) | ||
4302 | 58 | shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages | ||
4303 | 59 | if shmmax_minsize > shmmax_current: | ||
4304 | 60 | sysctl_settings['kernel.shmmax'] = shmmax_minsize | ||
4305 | 61 | sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') | ||
4306 | 62 | mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) | ||
4307 | 63 | lfstab = fstab.Fstab() | ||
4308 | 64 | fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) | ||
4309 | 65 | if fstab_entry: | ||
4310 | 66 | lfstab.remove_entry(fstab_entry) | ||
4311 | 67 | entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', | ||
4312 | 68 | 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) | ||
4313 | 69 | lfstab.add_entry(entry) | ||
4314 | 70 | if mount: | ||
4315 | 71 | fstab_mount(mnt_point) | ||
4316 | 0 | 72 | ||
4317 | === renamed file 'hooks/charmhelpers/core/hugepage.py' => 'hooks/charmhelpers/core/hugepage.py.moved' | |||
4318 | === added file 'hooks/charmhelpers/core/kernel.py' | |||
4319 | --- hooks/charmhelpers/core/kernel.py 1970-01-01 00:00:00 +0000 | |||
4320 | +++ hooks/charmhelpers/core/kernel.py 2016-01-06 21:19:13 +0000 | |||
4321 | @@ -0,0 +1,68 @@ | |||
4322 | 1 | #!/usr/bin/env python | ||
4323 | 2 | # -*- coding: utf-8 -*- | ||
4324 | 3 | |||
4325 | 4 | # Copyright 2014-2015 Canonical Limited. | ||
4326 | 5 | # | ||
4327 | 6 | # This file is part of charm-helpers. | ||
4328 | 7 | # | ||
4329 | 8 | # charm-helpers is free software: you can redistribute it and/or modify | ||
4330 | 9 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
4331 | 10 | # published by the Free Software Foundation. | ||
4332 | 11 | # | ||
4333 | 12 | # charm-helpers is distributed in the hope that it will be useful, | ||
4334 | 13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4335 | 14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4336 | 15 | # GNU Lesser General Public License for more details. | ||
4337 | 16 | # | ||
4338 | 17 | # You should have received a copy of the GNU Lesser General Public License | ||
4339 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
4340 | 19 | |||
4341 | 20 | __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" | ||
4342 | 21 | |||
4343 | 22 | from charmhelpers.core.hookenv import ( | ||
4344 | 23 | log, | ||
4345 | 24 | INFO | ||
4346 | 25 | ) | ||
4347 | 26 | |||
4348 | 27 | from subprocess import check_call, check_output | ||
4349 | 28 | import re | ||
4350 | 29 | |||
4351 | 30 | |||
4352 | 31 | def modprobe(module, persist=True): | ||
4353 | 32 | """Load a kernel module and configure for auto-load on reboot.""" | ||
4354 | 33 | cmd = ['modprobe', module] | ||
4355 | 34 | |||
4356 | 35 | log('Loading kernel module %s' % module, level=INFO) | ||
4357 | 36 | |||
4358 | 37 | check_call(cmd) | ||
4359 | 38 | if persist: | ||
4360 | 39 | with open('/etc/modules', 'r+') as modules: | ||
4361 | 40 | if module not in modules.read(): | ||
4362 | 41 | modules.write(module) | ||
4363 | 42 | |||
4364 | 43 | |||
4365 | 44 | def rmmod(module, force=False): | ||
4366 | 45 | """Remove a module from the linux kernel""" | ||
4367 | 46 | cmd = ['rmmod'] | ||
4368 | 47 | if force: | ||
4369 | 48 | cmd.append('-f') | ||
4370 | 49 | cmd.append(module) | ||
4371 | 50 | log('Removing kernel module %s' % module, level=INFO) | ||
4372 | 51 | return check_call(cmd) | ||
4373 | 52 | |||
4374 | 53 | |||
4375 | 54 | def lsmod(): | ||
4376 | 55 | """Shows what kernel modules are currently loaded""" | ||
4377 | 56 | return check_output(['lsmod'], | ||
4378 | 57 | universal_newlines=True) | ||
4379 | 58 | |||
4380 | 59 | |||
4381 | 60 | def is_module_loaded(module): | ||
4382 | 61 | """Checks if a kernel module is already loaded""" | ||
4383 | 62 | matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) | ||
4384 | 63 | return len(matches) > 0 | ||
4385 | 64 | |||
4386 | 65 | |||
4387 | 66 | def update_initramfs(version='all'): | ||
4388 | 67 | """Updates an initramfs image""" | ||
4389 | 68 | return check_call(["update-initramfs", "-k", version, "-u"]) | ||
4390 | 0 | 69 | ||
4391 | === renamed file 'hooks/charmhelpers/core/kernel.py' => 'hooks/charmhelpers/core/kernel.py.moved' | |||
4392 | === modified file 'hooks/charmhelpers/core/services/helpers.py' | |||
4393 | --- hooks/charmhelpers/core/services/helpers.py 2015-10-22 13:19:13 +0000 | |||
4394 | +++ hooks/charmhelpers/core/services/helpers.py 2016-01-06 21:19:13 +0000 | |||
4395 | @@ -243,31 +243,50 @@ | |||
4396 | 243 | :param str source: The template source file, relative to | 243 | :param str source: The template source file, relative to |
4397 | 244 | `$CHARM_DIR/templates` | 244 | `$CHARM_DIR/templates` |
4398 | 245 | 245 | ||
4400 | 246 | :param str target: The target to write the rendered template to | 246 | :param str target: The target to write the rendered template to (or None) |
4401 | 247 | :param str owner: The owner of the rendered file | 247 | :param str owner: The owner of the rendered file |
4402 | 248 | :param str group: The group of the rendered file | 248 | :param str group: The group of the rendered file |
4403 | 249 | :param int perms: The permissions of the rendered file | 249 | :param int perms: The permissions of the rendered file |
4406 | 250 | :param partial on_change_action: functools partial to be executed when | 250 | <<<<<<< TREE |
4407 | 251 | rendered file changes | 251 | :param partial on_change_action: functools partial to be executed when |
4408 | 252 | rendered file changes | ||
4409 | 253 | ======= | ||
4410 | 254 | :param partial on_change_action: functools partial to be executed when | ||
4411 | 255 | rendered file changes | ||
4412 | 256 | :param jinja2 loader template_loader: A jinja2 template loader | ||
4413 | 257 | |||
4414 | 258 | :return str: The rendered template | ||
4415 | 259 | >>>>>>> MERGE-SOURCE | ||
4416 | 252 | """ | 260 | """ |
4417 | 253 | def __init__(self, source, target, | 261 | def __init__(self, source, target, |
4418 | 262 | <<<<<<< TREE | ||
4419 | 254 | owner='root', group='root', perms=0o444, | 263 | owner='root', group='root', perms=0o444, |
4420 | 255 | on_change_action=None): | 264 | on_change_action=None): |
4421 | 265 | ======= | ||
4422 | 266 | owner='root', group='root', perms=0o444, | ||
4423 | 267 | on_change_action=None, template_loader=None): | ||
4424 | 268 | >>>>>>> MERGE-SOURCE | ||
4425 | 256 | self.source = source | 269 | self.source = source |
4426 | 257 | self.target = target | 270 | self.target = target |
4427 | 258 | self.owner = owner | 271 | self.owner = owner |
4428 | 259 | self.group = group | 272 | self.group = group |
4429 | 260 | self.perms = perms | 273 | self.perms = perms |
4431 | 261 | self.on_change_action = on_change_action | 274 | <<<<<<< TREE |
4432 | 275 | self.on_change_action = on_change_action | ||
4433 | 276 | ======= | ||
4434 | 277 | self.on_change_action = on_change_action | ||
4435 | 278 | self.template_loader = template_loader | ||
4436 | 279 | >>>>>>> MERGE-SOURCE | ||
4437 | 262 | 280 | ||
4438 | 263 | def __call__(self, manager, service_name, event_name): | 281 | def __call__(self, manager, service_name, event_name): |
4439 | 264 | pre_checksum = '' | 282 | pre_checksum = '' |
4440 | 265 | if self.on_change_action and os.path.isfile(self.target): | 283 | if self.on_change_action and os.path.isfile(self.target): |
4441 | 266 | pre_checksum = host.file_hash(self.target) | 284 | pre_checksum = host.file_hash(self.target) |
4442 | 267 | service = manager.get_service(service_name) | 285 | service = manager.get_service(service_name) |
4444 | 268 | context = {} | 286 | context = {'ctx': {}} |
4445 | 269 | for ctx in service.get('required_data', []): | 287 | for ctx in service.get('required_data', []): |
4446 | 270 | context.update(ctx) | 288 | context.update(ctx) |
4447 | 289 | <<<<<<< TREE | ||
4448 | 271 | templating.render(self.source, self.target, context, | 290 | templating.render(self.source, self.target, context, |
4449 | 272 | self.owner, self.group, self.perms) | 291 | self.owner, self.group, self.perms) |
4450 | 273 | if self.on_change_action: | 292 | if self.on_change_action: |
4451 | @@ -277,6 +296,22 @@ | |||
4452 | 277 | hookenv.DEBUG) | 296 | hookenv.DEBUG) |
4453 | 278 | else: | 297 | else: |
4454 | 279 | self.on_change_action() | 298 | self.on_change_action() |
4455 | 299 | ======= | ||
4456 | 300 | context['ctx'].update(ctx) | ||
4457 | 301 | |||
4458 | 302 | result = templating.render(self.source, self.target, context, | ||
4459 | 303 | self.owner, self.group, self.perms, | ||
4460 | 304 | template_loader=self.template_loader) | ||
4461 | 305 | if self.on_change_action: | ||
4462 | 306 | if pre_checksum == host.file_hash(self.target): | ||
4463 | 307 | hookenv.log( | ||
4464 | 308 | 'No change detected: {}'.format(self.target), | ||
4465 | 309 | hookenv.DEBUG) | ||
4466 | 310 | else: | ||
4467 | 311 | self.on_change_action() | ||
4468 | 312 | |||
4469 | 313 | return result | ||
4470 | 314 | >>>>>>> MERGE-SOURCE | ||
4471 | 280 | 315 | ||
4472 | 281 | 316 | ||
4473 | 282 | # Convenience aliases for templates | 317 | # Convenience aliases for templates |
4474 | 283 | 318 | ||
4475 | === modified file 'hooks/charmhelpers/core/templating.py' | |||
4476 | --- hooks/charmhelpers/core/templating.py 2015-03-13 13:00:03 +0000 | |||
4477 | +++ hooks/charmhelpers/core/templating.py 2016-01-06 21:19:13 +0000 | |||
4478 | @@ -21,13 +21,14 @@ | |||
4479 | 21 | 21 | ||
4480 | 22 | 22 | ||
4481 | 23 | def render(source, target, context, owner='root', group='root', | 23 | def render(source, target, context, owner='root', group='root', |
4483 | 24 | perms=0o444, templates_dir=None, encoding='UTF-8'): | 24 | perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): |
4484 | 25 | """ | 25 | """ |
4485 | 26 | Render a template. | 26 | Render a template. |
4486 | 27 | 27 | ||
4487 | 28 | The `source` path, if not absolute, is relative to the `templates_dir`. | 28 | The `source` path, if not absolute, is relative to the `templates_dir`. |
4488 | 29 | 29 | ||
4490 | 30 | The `target` path should be absolute. | 30 | The `target` path should be absolute. It can also be `None`, in which |
4491 | 31 | case no file will be written. | ||
4492 | 31 | 32 | ||
4493 | 32 | The context should be a dict containing the values to be replaced in the | 33 | The context should be a dict containing the values to be replaced in the |
4494 | 33 | template. | 34 | template. |
4495 | @@ -36,6 +37,9 @@ | |||
4496 | 36 | 37 | ||
4497 | 37 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. | 38 | If omitted, `templates_dir` defaults to the `templates` folder in the charm. |
4498 | 38 | 39 | ||
4499 | 40 | The rendered template will be written to the file as well as being returned | ||
4500 | 41 | as a string. | ||
4501 | 42 | |||
4502 | 39 | Note: Using this requires python-jinja2; if it is not installed, calling | 43 | Note: Using this requires python-jinja2; if it is not installed, calling |
4503 | 40 | this will attempt to use charmhelpers.fetch.apt_install to install it. | 44 | this will attempt to use charmhelpers.fetch.apt_install to install it. |
4504 | 41 | """ | 45 | """ |
4505 | @@ -52,17 +56,26 @@ | |||
4506 | 52 | apt_install('python-jinja2', fatal=True) | 56 | apt_install('python-jinja2', fatal=True) |
4507 | 53 | from jinja2 import FileSystemLoader, Environment, exceptions | 57 | from jinja2 import FileSystemLoader, Environment, exceptions |
4508 | 54 | 58 | ||
4512 | 55 | if templates_dir is None: | 59 | if template_loader: |
4513 | 56 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | 60 | template_env = Environment(loader=template_loader) |
4514 | 57 | loader = Environment(loader=FileSystemLoader(templates_dir)) | 61 | else: |
4515 | 62 | if templates_dir is None: | ||
4516 | 63 | templates_dir = os.path.join(hookenv.charm_dir(), 'templates') | ||
4517 | 64 | template_env = Environment(loader=FileSystemLoader(templates_dir)) | ||
4518 | 58 | try: | 65 | try: |
4519 | 59 | source = source | 66 | source = source |
4521 | 60 | template = loader.get_template(source) | 67 | template = template_env.get_template(source) |
4522 | 61 | except exceptions.TemplateNotFound as e: | 68 | except exceptions.TemplateNotFound as e: |
4523 | 62 | hookenv.log('Could not load template %s from %s.' % | 69 | hookenv.log('Could not load template %s from %s.' % |
4524 | 63 | (source, templates_dir), | 70 | (source, templates_dir), |
4525 | 64 | level=hookenv.ERROR) | 71 | level=hookenv.ERROR) |
4526 | 65 | raise e | 72 | raise e |
4527 | 66 | content = template.render(context) | 73 | content = template.render(context) |
4530 | 67 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | 74 | if target is not None: |
4531 | 68 | host.write_file(target, content.encode(encoding), owner, group, perms) | 75 | target_dir = os.path.dirname(target) |
4532 | 76 | if not os.path.exists(target_dir): | ||
4533 | 77 | # This is a terrible default directory permission, as the file | ||
4534 | 78 | # or its siblings will often contain secrets. | ||
4535 | 79 | host.mkdir(os.path.dirname(target), owner, group, perms=0o755) | ||
4536 | 80 | host.write_file(target, content.encode(encoding), owner, group, perms) | ||
4537 | 81 | return content | ||
4538 | 69 | 82 | ||
4539 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
4540 | --- hooks/charmhelpers/fetch/__init__.py 2015-10-22 13:19:13 +0000 | |||
4541 | +++ hooks/charmhelpers/fetch/__init__.py 2016-01-06 21:19:13 +0000 | |||
4542 | @@ -90,14 +90,33 @@ | |||
4543 | 90 | 'kilo/proposed': 'trusty-proposed/kilo', | 90 | 'kilo/proposed': 'trusty-proposed/kilo', |
4544 | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', | 91 | 'trusty-kilo/proposed': 'trusty-proposed/kilo', |
4545 | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', | 92 | 'trusty-proposed/kilo': 'trusty-proposed/kilo', |
4554 | 93 | # Liberty | 93 | <<<<<<< TREE |
4555 | 94 | 'liberty': 'trusty-updates/liberty', | 94 | # Liberty |
4556 | 95 | 'trusty-liberty': 'trusty-updates/liberty', | 95 | 'liberty': 'trusty-updates/liberty', |
4557 | 96 | 'trusty-liberty/updates': 'trusty-updates/liberty', | 96 | 'trusty-liberty': 'trusty-updates/liberty', |
4558 | 97 | 'trusty-updates/liberty': 'trusty-updates/liberty', | 97 | 'trusty-liberty/updates': 'trusty-updates/liberty', |
4559 | 98 | 'liberty/proposed': 'trusty-proposed/liberty', | 98 | 'trusty-updates/liberty': 'trusty-updates/liberty', |
4560 | 99 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', | 99 | 'liberty/proposed': 'trusty-proposed/liberty', |
4561 | 100 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | 100 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', |
4562 | 101 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | ||
4563 | 102 | ======= | ||
4564 | 103 | # Liberty | ||
4565 | 104 | 'liberty': 'trusty-updates/liberty', | ||
4566 | 105 | 'trusty-liberty': 'trusty-updates/liberty', | ||
4567 | 106 | 'trusty-liberty/updates': 'trusty-updates/liberty', | ||
4568 | 107 | 'trusty-updates/liberty': 'trusty-updates/liberty', | ||
4569 | 108 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
4570 | 109 | 'trusty-liberty/proposed': 'trusty-proposed/liberty', | ||
4571 | 110 | 'trusty-proposed/liberty': 'trusty-proposed/liberty', | ||
4572 | 111 | # Mitaka | ||
4573 | 112 | 'mitaka': 'trusty-updates/mitaka', | ||
4574 | 113 | 'trusty-mitaka': 'trusty-updates/mitaka', | ||
4575 | 114 | 'trusty-mitaka/updates': 'trusty-updates/mitaka', | ||
4576 | 115 | 'trusty-updates/mitaka': 'trusty-updates/mitaka', | ||
4577 | 116 | 'mitaka/proposed': 'trusty-proposed/mitaka', | ||
4578 | 117 | 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', | ||
4579 | 118 | 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', | ||
4580 | 119 | >>>>>>> MERGE-SOURCE | ||
4581 | 101 | } | 120 | } |
4582 | 102 | 121 | ||
4583 | 103 | # The order of this list is very important. Handlers should be listed in from | 122 | # The order of this list is very important. Handlers should be listed in from |
4584 | @@ -223,6 +242,7 @@ | |||
4585 | 223 | _run_apt_command(cmd, fatal) | 242 | _run_apt_command(cmd, fatal) |
4586 | 224 | 243 | ||
4587 | 225 | 244 | ||
4588 | 245 | <<<<<<< TREE | ||
4589 | 226 | def apt_mark(packages, mark, fatal=False): | 246 | def apt_mark(packages, mark, fatal=False): |
4590 | 227 | """Flag one or more packages using apt-mark""" | 247 | """Flag one or more packages using apt-mark""" |
4591 | 228 | cmd = ['apt-mark', mark] | 248 | cmd = ['apt-mark', mark] |
4592 | @@ -238,6 +258,23 @@ | |||
4593 | 238 | subprocess.call(cmd, universal_newlines=True) | 258 | subprocess.call(cmd, universal_newlines=True) |
4594 | 239 | 259 | ||
4595 | 240 | 260 | ||
4596 | 261 | ======= | ||
4597 | 262 | def apt_mark(packages, mark, fatal=False): | ||
4598 | 263 | """Flag one or more packages using apt-mark""" | ||
4599 | 264 | log("Marking {} as {}".format(packages, mark)) | ||
4600 | 265 | cmd = ['apt-mark', mark] | ||
4601 | 266 | if isinstance(packages, six.string_types): | ||
4602 | 267 | cmd.append(packages) | ||
4603 | 268 | else: | ||
4604 | 269 | cmd.extend(packages) | ||
4605 | 270 | |||
4606 | 271 | if fatal: | ||
4607 | 272 | subprocess.check_call(cmd, universal_newlines=True) | ||
4608 | 273 | else: | ||
4609 | 274 | subprocess.call(cmd, universal_newlines=True) | ||
4610 | 275 | |||
4611 | 276 | |||
4612 | 277 | >>>>>>> MERGE-SOURCE | ||
4613 | 241 | def apt_hold(packages, fatal=False): | 278 | def apt_hold(packages, fatal=False): |
4614 | 242 | return apt_mark(packages, 'hold', fatal=fatal) | 279 | return apt_mark(packages, 'hold', fatal=fatal) |
4615 | 243 | 280 | ||
4616 | @@ -411,7 +448,7 @@ | |||
4617 | 411 | importlib.import_module(package), | 448 | importlib.import_module(package), |
4618 | 412 | classname) | 449 | classname) |
4619 | 413 | plugin_list.append(handler_class()) | 450 | plugin_list.append(handler_class()) |
4621 | 414 | except (ImportError, AttributeError): | 451 | except NotImplementedError: |
4622 | 415 | # Skip missing plugins so that they can be ommitted from | 452 | # Skip missing plugins so that they can be ommitted from |
4623 | 416 | # installation if desired | 453 | # installation if desired |
4624 | 417 | log("FetchHandler {} not found, skipping plugin".format( | 454 | log("FetchHandler {} not found, skipping plugin".format( |
4625 | 418 | 455 | ||
4626 | === modified file 'hooks/charmhelpers/fetch/archiveurl.py' | |||
4627 | --- hooks/charmhelpers/fetch/archiveurl.py 2015-08-10 16:34:04 +0000 | |||
4628 | +++ hooks/charmhelpers/fetch/archiveurl.py 2016-01-06 21:19:13 +0000 | |||
4629 | @@ -108,7 +108,7 @@ | |||
4630 | 108 | install_opener(opener) | 108 | install_opener(opener) |
4631 | 109 | response = urlopen(source) | 109 | response = urlopen(source) |
4632 | 110 | try: | 110 | try: |
4634 | 111 | with open(dest, 'w') as dest_file: | 111 | with open(dest, 'wb') as dest_file: |
4635 | 112 | dest_file.write(response.read()) | 112 | dest_file.write(response.read()) |
4636 | 113 | except Exception as e: | 113 | except Exception as e: |
4637 | 114 | if os.path.isfile(dest): | 114 | if os.path.isfile(dest): |
4638 | 115 | 115 | ||
4639 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
4640 | --- hooks/charmhelpers/fetch/bzrurl.py 2015-01-26 09:47:37 +0000 | |||
4641 | +++ hooks/charmhelpers/fetch/bzrurl.py 2016-01-06 21:19:13 +0000 | |||
4642 | @@ -15,60 +15,50 @@ | |||
4643 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4644 | 16 | 16 | ||
4645 | 17 | import os | 17 | import os |
4646 | 18 | from subprocess import check_call | ||
4647 | 18 | from charmhelpers.fetch import ( | 19 | from charmhelpers.fetch import ( |
4648 | 19 | BaseFetchHandler, | 20 | BaseFetchHandler, |
4650 | 20 | UnhandledSource | 21 | UnhandledSource, |
4651 | 22 | filter_installed_packages, | ||
4652 | 23 | apt_install, | ||
4653 | 21 | ) | 24 | ) |
4654 | 22 | from charmhelpers.core.host import mkdir | 25 | from charmhelpers.core.host import mkdir |
4655 | 23 | 26 | ||
4656 | 24 | import six | ||
4657 | 25 | if six.PY3: | ||
4658 | 26 | raise ImportError('bzrlib does not support Python3') | ||
4659 | 27 | 27 | ||
4668 | 28 | try: | 28 | if filter_installed_packages(['bzr']) != []: |
4669 | 29 | from bzrlib.branch import Branch | 29 | apt_install(['bzr']) |
4670 | 30 | from bzrlib import bzrdir, workingtree, errors | 30 | if filter_installed_packages(['bzr']) != []: |
4671 | 31 | except ImportError: | 31 | raise NotImplementedError('Unable to install bzr') |
4664 | 32 | from charmhelpers.fetch import apt_install | ||
4665 | 33 | apt_install("python-bzrlib") | ||
4666 | 34 | from bzrlib.branch import Branch | ||
4667 | 35 | from bzrlib import bzrdir, workingtree, errors | ||
4672 | 36 | 32 | ||
4673 | 37 | 33 | ||
4674 | 38 | class BzrUrlFetchHandler(BaseFetchHandler): | 34 | class BzrUrlFetchHandler(BaseFetchHandler): |
4675 | 39 | """Handler for bazaar branches via generic and lp URLs""" | 35 | """Handler for bazaar branches via generic and lp URLs""" |
4676 | 40 | def can_handle(self, source): | 36 | def can_handle(self, source): |
4677 | 41 | url_parts = self.parse_url(source) | 37 | url_parts = self.parse_url(source) |
4679 | 42 | if url_parts.scheme not in ('bzr+ssh', 'lp'): | 38 | if url_parts.scheme not in ('bzr+ssh', 'lp', ''): |
4680 | 43 | return False | 39 | return False |
4681 | 40 | elif not url_parts.scheme: | ||
4682 | 41 | return os.path.exists(os.path.join(source, '.bzr')) | ||
4683 | 44 | else: | 42 | else: |
4684 | 45 | return True | 43 | return True |
4685 | 46 | 44 | ||
4686 | 47 | def branch(self, source, dest): | 45 | def branch(self, source, dest): |
4687 | 48 | url_parts = self.parse_url(source) | ||
4688 | 49 | # If we use lp:branchname scheme we need to load plugins | ||
4689 | 50 | if not self.can_handle(source): | 46 | if not self.can_handle(source): |
4690 | 51 | raise UnhandledSource("Cannot handle {}".format(source)) | 47 | raise UnhandledSource("Cannot handle {}".format(source)) |
4705 | 52 | if url_parts.scheme == "lp": | 48 | if os.path.exists(dest): |
4706 | 53 | from bzrlib.plugin import load_plugins | 49 | check_call(['bzr', 'pull', '--overwrite', '-d', dest, source]) |
4707 | 54 | load_plugins() | 50 | else: |
4708 | 55 | try: | 51 | check_call(['bzr', 'branch', source, dest]) |
4695 | 56 | local_branch = bzrdir.BzrDir.create_branch_convenience(dest) | ||
4696 | 57 | except errors.AlreadyControlDirError: | ||
4697 | 58 | local_branch = Branch.open(dest) | ||
4698 | 59 | try: | ||
4699 | 60 | remote_branch = Branch.open(source) | ||
4700 | 61 | remote_branch.push(local_branch) | ||
4701 | 62 | tree = workingtree.WorkingTree.open(dest) | ||
4702 | 63 | tree.update() | ||
4703 | 64 | except Exception as e: | ||
4704 | 65 | raise e | ||
4709 | 66 | 52 | ||
4711 | 67 | def install(self, source): | 53 | def install(self, source, dest=None): |
4712 | 68 | url_parts = self.parse_url(source) | 54 | url_parts = self.parse_url(source) |
4713 | 69 | branch_name = url_parts.path.strip("/").split("/")[-1] | 55 | branch_name = url_parts.path.strip("/").split("/")[-1] |
4716 | 70 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | 56 | if dest: |
4717 | 71 | branch_name) | 57 | dest_dir = os.path.join(dest, branch_name) |
4718 | 58 | else: | ||
4719 | 59 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | ||
4720 | 60 | branch_name) | ||
4721 | 61 | |||
4722 | 72 | if not os.path.exists(dest_dir): | 62 | if not os.path.exists(dest_dir): |
4723 | 73 | mkdir(dest_dir, perms=0o755) | 63 | mkdir(dest_dir, perms=0o755) |
4724 | 74 | try: | 64 | try: |
4725 | 75 | 65 | ||
4726 | === modified file 'hooks/charmhelpers/fetch/giturl.py' | |||
4727 | --- hooks/charmhelpers/fetch/giturl.py 2015-08-10 16:34:04 +0000 | |||
4728 | +++ hooks/charmhelpers/fetch/giturl.py 2016-01-06 21:19:13 +0000 | |||
4729 | @@ -15,24 +15,19 @@ | |||
4730 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
4731 | 16 | 16 | ||
4732 | 17 | import os | 17 | import os |
4733 | 18 | from subprocess import check_call | ||
4734 | 18 | from charmhelpers.fetch import ( | 19 | from charmhelpers.fetch import ( |
4735 | 19 | BaseFetchHandler, | 20 | BaseFetchHandler, |
4737 | 20 | UnhandledSource | 21 | UnhandledSource, |
4738 | 22 | filter_installed_packages, | ||
4739 | 23 | apt_install, | ||
4740 | 21 | ) | 24 | ) |
4741 | 22 | from charmhelpers.core.host import mkdir | 25 | from charmhelpers.core.host import mkdir |
4742 | 23 | 26 | ||
4755 | 24 | import six | 27 | if filter_installed_packages(['git']) != []: |
4756 | 25 | if six.PY3: | 28 | apt_install(['git']) |
4757 | 26 | raise ImportError('GitPython does not support Python 3') | 29 | if filter_installed_packages(['git']) != []: |
4758 | 27 | 30 | raise NotImplementedError('Unable to install git') | |
4747 | 28 | try: | ||
4748 | 29 | from git import Repo | ||
4749 | 30 | except ImportError: | ||
4750 | 31 | from charmhelpers.fetch import apt_install | ||
4751 | 32 | apt_install("python-git") | ||
4752 | 33 | from git import Repo | ||
4753 | 34 | |||
4754 | 35 | from git.exc import GitCommandError # noqa E402 | ||
4759 | 36 | 31 | ||
4760 | 37 | 32 | ||
4761 | 38 | class GitUrlFetchHandler(BaseFetchHandler): | 33 | class GitUrlFetchHandler(BaseFetchHandler): |
4762 | @@ -40,19 +35,35 @@ | |||
4763 | 40 | def can_handle(self, source): | 35 | def can_handle(self, source): |
4764 | 41 | url_parts = self.parse_url(source) | 36 | url_parts = self.parse_url(source) |
4765 | 42 | # TODO (mattyw) no support for ssh git@ yet | 37 | # TODO (mattyw) no support for ssh git@ yet |
4767 | 43 | if url_parts.scheme not in ('http', 'https', 'git'): | 38 | if url_parts.scheme not in ('http', 'https', 'git', ''): |
4768 | 44 | return False | 39 | return False |
4769 | 40 | elif not url_parts.scheme: | ||
4770 | 41 | return os.path.exists(os.path.join(source, '.git')) | ||
4771 | 45 | else: | 42 | else: |
4772 | 46 | return True | 43 | return True |
4773 | 47 | 44 | ||
4774 | 45 | <<<<<<< TREE | ||
4775 | 48 | def clone(self, source, dest, branch, depth=None): | 46 | def clone(self, source, dest, branch, depth=None): |
4776 | 47 | ======= | ||
4777 | 48 | def clone(self, source, dest, branch="master", depth=None): | ||
4778 | 49 | >>>>>>> MERGE-SOURCE | ||
4779 | 49 | if not self.can_handle(source): | 50 | if not self.can_handle(source): |
4780 | 50 | raise UnhandledSource("Cannot handle {}".format(source)) | 51 | raise UnhandledSource("Cannot handle {}".format(source)) |
4781 | 51 | 52 | ||
4782 | 53 | <<<<<<< TREE | ||
4783 | 52 | if depth: | 54 | if depth: |
4784 | 53 | Repo.clone_from(source, dest, branch=branch, depth=depth) | 55 | Repo.clone_from(source, dest, branch=branch, depth=depth) |
4785 | 54 | else: | 56 | else: |
4786 | 55 | Repo.clone_from(source, dest, branch=branch) | 57 | Repo.clone_from(source, dest, branch=branch) |
4787 | 58 | ======= | ||
4788 | 59 | if os.path.exists(dest): | ||
4789 | 60 | cmd = ['git', '-C', dest, 'pull', source, branch] | ||
4790 | 61 | else: | ||
4791 | 62 | cmd = ['git', 'clone', source, dest, '--branch', branch] | ||
4792 | 63 | if depth: | ||
4793 | 64 | cmd.extend(['--depth', depth]) | ||
4794 | 65 | check_call(cmd) | ||
4795 | 66 | >>>>>>> MERGE-SOURCE | ||
4796 | 56 | 67 | ||
4797 | 57 | def install(self, source, branch="master", dest=None, depth=None): | 68 | def install(self, source, branch="master", dest=None, depth=None): |
4798 | 58 | url_parts = self.parse_url(source) | 69 | url_parts = self.parse_url(source) |
4799 | @@ -65,9 +76,13 @@ | |||
4800 | 65 | if not os.path.exists(dest_dir): | 76 | if not os.path.exists(dest_dir): |
4801 | 66 | mkdir(dest_dir, perms=0o755) | 77 | mkdir(dest_dir, perms=0o755) |
4802 | 67 | try: | 78 | try: |
4803 | 79 | <<<<<<< TREE | ||
4804 | 68 | self.clone(source, dest_dir, branch, depth) | 80 | self.clone(source, dest_dir, branch, depth) |
4805 | 69 | except GitCommandError as e: | 81 | except GitCommandError as e: |
4806 | 70 | raise UnhandledSource(e) | 82 | raise UnhandledSource(e) |
4807 | 83 | ======= | ||
4808 | 84 | self.clone(source, dest_dir, branch, depth) | ||
4809 | 85 | >>>>>>> MERGE-SOURCE | ||
4810 | 71 | except OSError as e: | 86 | except OSError as e: |
4811 | 72 | raise UnhandledSource(e.strerror) | 87 | raise UnhandledSource(e.strerror) |
4812 | 73 | return dest_dir | 88 | return dest_dir |
4813 | 74 | 89 | ||
4814 | === modified file 'hooks/cinder_hooks.py' | |||
4815 | --- hooks/cinder_hooks.py 2015-10-22 13:19:13 +0000 | |||
4816 | +++ hooks/cinder_hooks.py 2016-01-06 21:19:13 +0000 | |||
4817 | @@ -24,11 +24,19 @@ | |||
4818 | 24 | CINDER_CONF, | 24 | CINDER_CONF, |
4819 | 25 | CINDER_API_CONF, | 25 | CINDER_API_CONF, |
4820 | 26 | ceph_config_file, | 26 | ceph_config_file, |
4821 | 27 | <<<<<<< TREE | ||
4822 | 27 | setup_ipv6, | 28 | setup_ipv6, |
4823 | 28 | check_db_initialised, | 29 | check_db_initialised, |
4824 | 29 | filesystem_mounted, | 30 | filesystem_mounted, |
4825 | 30 | REQUIRED_INTERFACES, | 31 | REQUIRED_INTERFACES, |
4826 | 31 | check_optional_relations, | 32 | check_optional_relations, |
4827 | 33 | ======= | ||
4828 | 34 | setup_ipv6, | ||
4829 | 35 | check_db_initialised, | ||
4830 | 36 | filesystem_mounted, | ||
4831 | 37 | required_interfaces, | ||
4832 | 38 | check_optional_relations, | ||
4833 | 39 | >>>>>>> MERGE-SOURCE | ||
4834 | 32 | ) | 40 | ) |
4835 | 33 | 41 | ||
4836 | 34 | from charmhelpers.core.hookenv import ( | 42 | from charmhelpers.core.hookenv import ( |
4837 | @@ -553,5 +561,10 @@ | |||
4838 | 553 | hooks.execute(sys.argv) | 561 | hooks.execute(sys.argv) |
4839 | 554 | except UnregisteredHookError as e: | 562 | except UnregisteredHookError as e: |
4840 | 555 | juju_log('Unknown hook {} - skipping.'.format(e)) | 563 | juju_log('Unknown hook {} - skipping.'.format(e)) |
4841 | 564 | <<<<<<< TREE | ||
4842 | 556 | set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, | 565 | set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, |
4843 | 557 | charm_func=check_optional_relations) | 566 | charm_func=check_optional_relations) |
4844 | 567 | ======= | ||
4845 | 568 | set_os_workload_status(CONFIGS, required_interfaces(), | ||
4846 | 569 | charm_func=check_optional_relations) | ||
4847 | 570 | >>>>>>> MERGE-SOURCE | ||
4848 | 558 | 571 | ||
4849 | === modified file 'hooks/cinder_utils.py' | |||
4850 | --- hooks/cinder_utils.py 2015-10-22 13:19:13 +0000 | |||
4851 | +++ hooks/cinder_utils.py 2016-01-06 21:19:13 +0000 | |||
4852 | @@ -158,15 +158,36 @@ | |||
4853 | 158 | 158 | ||
4854 | 159 | TEMPLATES = 'templates/' | 159 | TEMPLATES = 'templates/' |
4855 | 160 | 160 | ||
4865 | 161 | # The interface is said to be satisfied if anyone of the interfaces in | 161 | <<<<<<< TREE |
4866 | 162 | # the | 162 | # The interface is said to be satisfied if anyone of the interfaces in |
4867 | 163 | # list has a complete context. | 163 | # the |
4868 | 164 | REQUIRED_INTERFACES = { | 164 | # list has a complete context. |
4869 | 165 | 'database': ['shared-db', 'pgsql-db'], | 165 | REQUIRED_INTERFACES = { |
4870 | 166 | 'messaging': ['amqp'], | 166 | 'database': ['shared-db', 'pgsql-db'], |
4871 | 167 | 'identity': ['identity-service'], | 167 | 'messaging': ['amqp'], |
4872 | 168 | } | 168 | 'identity': ['identity-service'], |
4873 | 169 | 169 | } | |
4874 | 170 | |||
4875 | 171 | ======= | ||
4876 | 172 | # The interface is said to be satisfied if anyone of the interfaces in | ||
4877 | 173 | # the | ||
4878 | 174 | # list has a complete context. | ||
4879 | 175 | REQUIRED_INTERFACES = { | ||
4880 | 176 | 'database': ['shared-db', 'pgsql-db'], | ||
4881 | 177 | 'messaging': ['amqp'], | ||
4882 | 178 | 'identity': ['identity-service'], | ||
4883 | 179 | } | ||
4884 | 180 | |||
4885 | 181 | |||
4886 | 182 | def required_interfaces(): | ||
4887 | 183 | '''Provide the required charm interfaces based on configured roles.''' | ||
4888 | 184 | _interfaces = copy(REQUIRED_INTERFACES) | ||
4889 | 185 | if not service_enabled('api'): | ||
4890 | 186 | # drop requirement for identity interface | ||
4891 | 187 | _interfaces.pop('identity') | ||
4892 | 188 | return _interfaces | ||
4893 | 189 | |||
4894 | 190 | >>>>>>> MERGE-SOURCE | ||
4895 | 170 | 191 | ||
4896 | 171 | def ceph_config_file(): | 192 | def ceph_config_file(): |
4897 | 172 | return CHARM_CEPH_CONF.format(service_name()) | 193 | return CHARM_CEPH_CONF.format(service_name()) |
4898 | 173 | 194 | ||
4899 | === added symlink 'hooks/install.real' | |||
4900 | === target is u'cinder_hooks.py' | |||
4901 | === renamed symlink 'hooks/install.real' => 'hooks/install.real.moved' | |||
4902 | === added symlink 'hooks/update-status' | |||
4903 | === target is u'cinder_hooks.py' | |||
4904 | === renamed symlink 'hooks/update-status' => 'hooks/update-status.moved' | |||
4905 | === modified file 'metadata.yaml' | |||
4906 | --- metadata.yaml 2015-10-22 13:19:13 +0000 | |||
4907 | +++ metadata.yaml 2016-01-06 21:19:13 +0000 | |||
4908 | @@ -1,12 +1,20 @@ | |||
4909 | 1 | name: cinder | 1 | name: cinder |
4912 | 2 | summary: Cinder OpenStack storage service | 2 | summary: OpenStack block storage service |
4913 | 3 | maintainer: Adam Gandelman <adamg@canonical.com> | 3 | maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com> |
4914 | 4 | description: | | 4 | description: | |
4915 | 5 | <<<<<<< TREE | ||
4916 | 5 | Cinder is a storage service for the Openstack project | 6 | Cinder is a storage service for the Openstack project |
4917 | 6 | tags: | 7 | tags: |
4918 | 7 | - openstack | 8 | - openstack |
4919 | 8 | - storage | 9 | - storage |
4920 | 9 | - misc | 10 | - misc |
4921 | 11 | ======= | ||
4922 | 12 | Cinder is the block storage service for the OpenStack. | ||
4923 | 13 | tags: | ||
4924 | 14 | - openstack | ||
4925 | 15 | - storage | ||
4926 | 16 | - misc | ||
4927 | 17 | >>>>>>> MERGE-SOURCE | ||
4928 | 10 | provides: | 18 | provides: |
4929 | 11 | nrpe-external-master: | 19 | nrpe-external-master: |
4930 | 12 | interface: nrpe-external-master | 20 | interface: nrpe-external-master |
4931 | 13 | 21 | ||
4932 | === added file 'requirements.txt' | |||
4933 | --- requirements.txt 1970-01-01 00:00:00 +0000 | |||
4934 | +++ requirements.txt 2016-01-06 21:19:13 +0000 | |||
4935 | @@ -0,0 +1,11 @@ | |||
4936 | 1 | # The order of packages is significant, because pip processes them in the order | ||
4937 | 2 | # of appearance. Changing the order has an impact on the overall integration | ||
4938 | 3 | # process, which may cause wedges in the gate later. | ||
4939 | 4 | PyYAML>=3.1.0 | ||
4940 | 5 | simplejson>=2.2.0 | ||
4941 | 6 | netifaces>=0.10.4 | ||
4942 | 7 | netaddr>=0.7.12,!=0.7.16 | ||
4943 | 8 | Jinja2>=2.6 # BSD License (3 clause) | ||
4944 | 9 | six>=1.9.0 | ||
4945 | 10 | dnspython>=1.12.0 | ||
4946 | 11 | psutil>=1.1.1,<2.0.0 | ||
4947 | 0 | 12 | ||
4948 | === added file 'test-requirements.txt' | |||
4949 | --- test-requirements.txt 1970-01-01 00:00:00 +0000 | |||
4950 | +++ test-requirements.txt 2016-01-06 21:19:13 +0000 | |||
4951 | @@ -0,0 +1,8 @@ | |||
4952 | 1 | # The order of packages is significant, because pip processes them in the order | ||
4953 | 2 | # of appearance. Changing the order has an impact on the overall integration | ||
4954 | 3 | # process, which may cause wedges in the gate later. | ||
4955 | 4 | coverage>=3.6 | ||
4956 | 5 | mock>=1.2 | ||
4957 | 6 | flake8>=2.2.4,<=2.4.1 | ||
4958 | 7 | os-testr>=0.4.1 | ||
4959 | 8 | charm-tools | ||
4960 | 0 | 9 | ||
4961 | === added file 'tests/052-basic-trusty-kilo-git' | |||
4962 | --- tests/052-basic-trusty-kilo-git 1970-01-01 00:00:00 +0000 | |||
4963 | +++ tests/052-basic-trusty-kilo-git 2016-01-06 21:19:13 +0000 | |||
4964 | @@ -0,0 +1,12 @@ | |||
4965 | 1 | #!/usr/bin/python | ||
4966 | 2 | |||
4967 | 3 | """Amulet tests on a basic cinder git deployment on trusty-kilo.""" | ||
4968 | 4 | |||
4969 | 5 | from basic_deployment import CinderBasicDeployment | ||
4970 | 6 | |||
4971 | 7 | if __name__ == '__main__': | ||
4972 | 8 | deployment = CinderBasicDeployment(series='trusty', | ||
4973 | 9 | openstack='cloud:trusty-kilo', | ||
4974 | 10 | source='cloud:trusty-updates/kilo', | ||
4975 | 11 | git=True) | ||
4976 | 12 | deployment.run_tests() | ||
4977 | 0 | 13 | ||
4978 | === renamed file 'tests/052-basic-trusty-kilo-git' => 'tests/052-basic-trusty-kilo-git.moved' | |||
4979 | === modified file 'tests/basic_deployment.py' | |||
4980 | --- tests/basic_deployment.py 2015-10-22 16:09:12 +0000 | |||
4981 | +++ tests/basic_deployment.py 2016-01-06 21:19:13 +0000 | |||
4982 | @@ -26,8 +26,13 @@ | |||
4983 | 26 | Create volume snapshot. Create volume from snapshot.""" | 26 | Create volume snapshot. Create volume from snapshot.""" |
4984 | 27 | 27 | ||
4985 | 28 | def __init__(self, series=None, openstack=None, source=None, git=False, | 28 | def __init__(self, series=None, openstack=None, source=None, git=False, |
4986 | 29 | <<<<<<< TREE | ||
4987 | 29 | stable=True): | 30 | stable=True): |
4988 | 30 | """Deploy the entire test environment.""" | 31 | """Deploy the entire test environment.""" |
4989 | 32 | ======= | ||
4990 | 33 | stable=False): | ||
4991 | 34 | """Deploy the entire test environment.""" | ||
4992 | 35 | >>>>>>> MERGE-SOURCE | ||
4993 | 31 | super(CinderBasicDeployment, self).__init__(series, openstack, source, | 36 | super(CinderBasicDeployment, self).__init__(series, openstack, source, |
4994 | 32 | stable) | 37 | stable) |
4995 | 33 | self.git = git | 38 | self.git = git |
4996 | 34 | 39 | ||
4997 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
4998 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-10-22 13:19:13 +0000 | |||
4999 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2016-01-06 21:19:13 +0000 | |||
5000 | @@ -14,12 +14,18 @@ |