Merge lp:~chris-gondolin/charms/trusty/keystone/ldap-fixes into lp:charms/keystone
- Trusty Tahr (14.04)
- ldap-fixes
- Merge into trunk
Status: | Rejected |
---|---|
Rejected by: | James Page |
Proposed branch: | lp:~chris-gondolin/charms/trusty/keystone/ldap-fixes |
Merge into: | lp:charms/keystone |
Diff against target: |
25116 lines (+24267/-0) (has conflicts) 145 files modified
.bzrignore (+5/-0) .coveragerc (+7/-0) .project (+17/-0) .pydevproject (+9/-0) .testr.conf (+8/-0) Makefile (+30/-0) README.md (+134/-0) actions.yaml (+17/-0) actions/actions.py (+61/-0) actions/git_reinstall.py (+43/-0) actions/openstack_upgrade.py (+37/-0) charm-helpers-hooks.yaml (+18/-0) charm-helpers-tests.yaml (+5/-0) charmhelpers/__init__.py (+38/-0) charmhelpers/cli/__init__.py (+191/-0) charmhelpers/cli/benchmark.py (+36/-0) charmhelpers/cli/commands.py (+32/-0) charmhelpers/cli/hookenv.py (+23/-0) charmhelpers/cli/host.py (+31/-0) charmhelpers/cli/unitdata.py (+39/-0) charmhelpers/contrib/__init__.py (+15/-0) charmhelpers/contrib/charmsupport/__init__.py (+15/-0) charmhelpers/contrib/charmsupport/nrpe.py (+398/-0) charmhelpers/contrib/charmsupport/volumes.py (+175/-0) charmhelpers/contrib/hahelpers/__init__.py (+15/-0) charmhelpers/contrib/hahelpers/apache.py (+82/-0) charmhelpers/contrib/hahelpers/cluster.py (+316/-0) charmhelpers/contrib/network/__init__.py (+15/-0) charmhelpers/contrib/network/ip.py (+458/-0) charmhelpers/contrib/openstack/__init__.py (+15/-0) charmhelpers/contrib/openstack/alternatives.py (+33/-0) charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0) charmhelpers/contrib/openstack/amulet/deployment.py (+301/-0) charmhelpers/contrib/openstack/amulet/utils.py (+985/-0) charmhelpers/contrib/openstack/context.py (+1473/-0) charmhelpers/contrib/openstack/files/__init__.py (+18/-0) charmhelpers/contrib/openstack/files/check_haproxy.sh (+34/-0) charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh (+30/-0) charmhelpers/contrib/openstack/ip.py (+151/-0) charmhelpers/contrib/openstack/neutron.py (+370/-0) charmhelpers/contrib/openstack/templates/__init__.py (+18/-0) charmhelpers/contrib/openstack/templates/ceph.conf (+21/-0) charmhelpers/contrib/openstack/templates/git.upstart (+17/-0) charmhelpers/contrib/openstack/templates/haproxy.cfg (+66/-0) charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0) charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0) charmhelpers/contrib/openstack/templates/section-keystone-authtoken (+9/-0) charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo (+22/-0) charmhelpers/contrib/openstack/templates/section-zeromq (+14/-0) charmhelpers/contrib/openstack/templating.py (+323/-0) charmhelpers/contrib/openstack/utils.py (+1011/-0) charmhelpers/contrib/peerstorage/__init__.py (+269/-0) charmhelpers/contrib/python/__init__.py (+15/-0) charmhelpers/contrib/python/debug.py (+56/-0) charmhelpers/contrib/python/packages.py (+130/-0) charmhelpers/contrib/python/rpdb.py (+58/-0) charmhelpers/contrib/python/version.py (+34/-0) charmhelpers/contrib/storage/__init__.py (+15/-0) charmhelpers/contrib/storage/linux/__init__.py (+15/-0) charmhelpers/contrib/storage/linux/ceph.py (+1039/-0) charmhelpers/contrib/storage/linux/loopback.py (+88/-0) charmhelpers/contrib/storage/linux/lvm.py (+105/-0) charmhelpers/contrib/storage/linux/utils.py (+71/-0) charmhelpers/contrib/unison/__init__.py (+313/-0) charmhelpers/core/__init__.py (+15/-0) charmhelpers/core/decorators.py (+57/-0) charmhelpers/core/files.py (+45/-0) charmhelpers/core/fstab.py (+134/-0) charmhelpers/core/hookenv.py (+978/-0) charmhelpers/core/host.py (+658/-0) charmhelpers/core/hugepage.py (+71/-0) charmhelpers/core/kernel.py (+68/-0) charmhelpers/core/services/__init__.py (+18/-0) charmhelpers/core/services/base.py (+353/-0) charmhelpers/core/services/helpers.py (+292/-0) charmhelpers/core/strutils.py (+72/-0) charmhelpers/core/sysctl.py (+56/-0) charmhelpers/core/templating.py (+81/-0) charmhelpers/core/unitdata.py (+521/-0) charmhelpers/fetch/__init__.py (+464/-0) charmhelpers/fetch/archiveurl.py (+167/-0) charmhelpers/fetch/bzrurl.py (+68/-0) charmhelpers/fetch/giturl.py (+68/-0) charmhelpers/payload/__init__.py (+17/-0) charmhelpers/payload/archive.py (+73/-0) charmhelpers/payload/execd.py (+66/-0) config.yaml (+334/-0) copyright (+17/-0) hooks/install (+20/-0) hooks/keystone_context.py (+254/-0) hooks/keystone_hooks.py (+651/-0) hooks/keystone_ssl.py (+340/-0) hooks/keystone_utils.py (+1877/-0) hooks/manager.py (+47/-0) icon.svg (+653/-0) metadata.yaml (+32/-0) requirements.txt (+12/-0) scripts/add_to_cluster (+13/-0) scripts/remove_from_cluster (+4/-0) setup.cfg (+5/-0) templates/essex/keystone.conf (+93/-0) templates/essex/logging.conf (+39/-0) templates/folsom/keystone.conf (+112/-0) templates/git/logging.conf (+39/-0) templates/grizzly/keystone.conf (+131/-0) templates/havana/keystone.conf (+64/-0) templates/icehouse/keystone.conf (+112/-0) templates/icehouse/logging.conf (+43/-0) templates/kilo/keystone.conf (+115/-0) templates/kilo/logging.conf (+44/-0) templates/parts/section-signing (+13/-0) test-requirements.txt (+8/-0) tests/014-basic-precise-icehouse (+11/-0) tests/015-basic-trusty-icehouse (+9/-0) tests/016-basic-trusty-juno (+11/-0) tests/017-basic-trusty-kilo (+11/-0) tests/018-basic-trusty-liberty (+11/-0) tests/019-basic-trusty-mitaka (+11/-0) tests/020-basic-wily-liberty (+9/-0) tests/021-basic-xenial-mitaka (+9/-0) tests/050-basic-trusty-icehouse-git (+9/-0) tests/051-basic-trusty-juno-git (+12/-0) tests/052-basic-trusty-kilo-git (+12/-0) tests/README (+113/-0) tests/basic_deployment.py (+489/-0) tests/charmhelpers/__init__.py (+38/-0) tests/charmhelpers/contrib/__init__.py (+15/-0) tests/charmhelpers/contrib/amulet/__init__.py (+15/-0) tests/charmhelpers/contrib/amulet/deployment.py (+95/-0) tests/charmhelpers/contrib/amulet/utils.py (+818/-0) tests/charmhelpers/contrib/openstack/__init__.py (+15/-0) tests/charmhelpers/contrib/openstack/amulet/__init__.py (+15/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+301/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+985/-0) tests/setup/00-setup (+17/-0) tests/tests.yaml (+21/-0) tox.ini (+29/-0) unit_tests/__init__.py (+4/-0) unit_tests/test_actions.py (+132/-0) unit_tests/test_actions_git_reinstall.py (+93/-0) unit_tests/test_actions_openstack_upgrade.py (+57/-0) unit_tests/test_keystone_contexts.py (+173/-0) unit_tests/test_keystone_hooks.py (+978/-0) unit_tests/test_keystone_utils.py (+761/-0) unit_tests/test_utils.py (+122/-0) Conflict adding file .bzrignore. Moved existing file to .bzrignore.moved. Conflict adding file .coveragerc. Moved existing file to .coveragerc.moved. Conflict adding file .project. Moved existing file to .project.moved. Conflict adding file .pydevproject. Moved existing file to .pydevproject.moved. Conflict adding file .testr.conf. Moved existing file to .testr.conf.moved. Conflict adding file Makefile. Moved existing file to Makefile.moved. Conflict adding file README.md. Moved existing file to README.md.moved. Conflict adding file actions. Moved existing file to actions.moved. Conflict adding file actions.yaml. Moved existing file to actions.yaml.moved. Conflict adding file charm-helpers-hooks.yaml. Moved existing file to charm-helpers-hooks.yaml.moved. Conflict adding file charm-helpers-tests.yaml. Moved existing file to charm-helpers-tests.yaml.moved. Conflict adding file charmhelpers. Moved existing file to charmhelpers.moved. Conflict adding file config.yaml. Moved existing file to config.yaml.moved. Conflict adding file copyright. Moved existing file to copyright.moved. Conflict adding file hooks. Moved existing file to hooks.moved. Conflict adding file icon.svg. Moved existing file to icon.svg.moved. Conflict adding file metadata.yaml. Moved existing file to metadata.yaml.moved. Conflict adding file requirements.txt. Moved existing file to requirements.txt.moved. Conflict adding file scripts. Moved existing file to scripts.moved. Conflict adding file setup.cfg. Moved existing file to setup.cfg.moved. Conflict adding file templates. Moved existing file to templates.moved. Conflict adding file test-requirements.txt. Moved existing file to test-requirements.txt.moved. Conflict adding file tests. Moved existing file to tests.moved. Conflict adding file tox.ini. Moved existing file to tox.ini.moved. Conflict adding file unit_tests. Moved existing file to unit_tests.moved. |
To merge this branch: | bzr merge lp:~chris-gondolin/charms/trusty/keystone/ldap-fixes |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+290401@code.launchpad.net |
Commit message
Description of the change
Enabling ldap on liberty (and possibly juno and kilo) doesn't work, as it relies on two ldap packages (python-ldap and python-ldappool) that aren't installed. This tries to fix that (it can't always, as python-ldappool doesn't exist at all for precise and only in the cloud archive for trusty, but it at least tries.)
Enabling read-only ldap for the identity backend correctly skips creation of the admin user, but also skips creation of the admin project. It shouldn't - this fixes that.
Also fixes store_admin_
James Page (james-page) wrote : | # |
Unmerged revisions
- 205. By Chris Stratford
-
[chriss] Allow project creation even if identity driver is read-only ldap (as projects are resource not identity, so we still want them)
- 204. By Chris Stratford
-
[chriss] Include ldap packages (but disable them if not available). Fix store_admin_
passwd( ) to work with config.yaml default of "None" - 203. By David Ames
-
[1chb1n, r=thedac] wait for unit status and turn on releases for amulet tests
- 202. By David Ames
-
[tinwood,r=thedac] Fixes Bug#1526511 change pause/resume actions use (new) assess_status()
- 201. By James Page
-
Fix liberty/mitaka typo from previous test definition update batch.
- 200. By Liam Young
-
[gnuoy, r=james-page] Delete the old quantum catalogue entry if a neutron entry is present
- 199. By Liam Young
-
Update test combo definitions, remove Vivid deprecated release tests, update bundletester testplan yaml, update tests README.
- 198. By Corey Bryant
-
[corey.
bryant, r=osci] Sync charm-helpers. Enable sync of payload.archive, sync charm-helpers, and fixup unit test failures.
- 197. By Liam Young
-
[hopem, r=gnuoy] Ensure ssl certs always synced.
Partially-Closes- Bug: 1520339 - 196. By Liam Young
-
[hopem, r=gnuoy] Fix upgrade breakage whereby if upgrading from
version of charm that did not support
db-initialised peer setting db ops get stuck
waiting infinitely for db to be intialised.Closes-Bug: 1519035
Preview Diff
1 | === added file '.bzrignore' |
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 |
3 | +++ .bzrignore 2016-03-30 08:19:58 +0000 |
4 | @@ -0,0 +1,5 @@ |
5 | +bin |
6 | +.coverage |
7 | +.testrepository |
8 | +.tox |
9 | +tags |
10 | |
11 | === renamed file '.bzrignore' => '.bzrignore.moved' |
12 | === added file '.coveragerc' |
13 | --- .coveragerc 1970-01-01 00:00:00 +0000 |
14 | +++ .coveragerc 2016-03-30 08:19:58 +0000 |
15 | @@ -0,0 +1,7 @@ |
16 | +[report] |
17 | +# Regexes for lines to exclude from consideration |
18 | +exclude_lines = |
19 | + if __name__ == .__main__.: |
20 | +include= |
21 | + hooks/keystone_* |
22 | + actions/actions.py |
23 | |
24 | === renamed file '.coveragerc' => '.coveragerc.moved' |
25 | === added file '.project' |
26 | --- .project 1970-01-01 00:00:00 +0000 |
27 | +++ .project 2016-03-30 08:19:58 +0000 |
28 | @@ -0,0 +1,17 @@ |
29 | +<?xml version="1.0" encoding="UTF-8"?> |
30 | +<projectDescription> |
31 | + <name>keystone</name> |
32 | + <comment></comment> |
33 | + <projects> |
34 | + </projects> |
35 | + <buildSpec> |
36 | + <buildCommand> |
37 | + <name>org.python.pydev.PyDevBuilder</name> |
38 | + <arguments> |
39 | + </arguments> |
40 | + </buildCommand> |
41 | + </buildSpec> |
42 | + <natures> |
43 | + <nature>org.python.pydev.pythonNature</nature> |
44 | + </natures> |
45 | +</projectDescription> |
46 | |
47 | === renamed file '.project' => '.project.moved' |
48 | === added file '.pydevproject' |
49 | --- .pydevproject 1970-01-01 00:00:00 +0000 |
50 | +++ .pydevproject 2016-03-30 08:19:58 +0000 |
51 | @@ -0,0 +1,9 @@ |
52 | +<?xml version="1.0" encoding="UTF-8" standalone="no"?> |
53 | +<?eclipse-pydev version="1.0"?><pydev_project> |
54 | +<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property> |
55 | +<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property> |
56 | +<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH"> |
57 | +<path>/keystone/hooks</path> |
58 | +<path>/keystone/unit_tests</path> |
59 | +</pydev_pathproperty> |
60 | +</pydev_project> |
61 | |
62 | === renamed file '.pydevproject' => '.pydevproject.moved' |
63 | === added file '.testr.conf' |
64 | --- .testr.conf 1970-01-01 00:00:00 +0000 |
65 | +++ .testr.conf 2016-03-30 08:19:58 +0000 |
66 | @@ -0,0 +1,8 @@ |
67 | +[DEFAULT] |
68 | +test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ |
69 | + OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ |
70 | + OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ |
71 | + ${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION |
72 | + |
73 | +test_id_option=--load-list $IDFILE |
74 | +test_list_option=--list |
75 | |
76 | === renamed file '.testr.conf' => '.testr.conf.moved' |
77 | === added file 'Makefile' |
78 | --- Makefile 1970-01-01 00:00:00 +0000 |
79 | +++ Makefile 2016-03-30 08:19:58 +0000 |
80 | @@ -0,0 +1,30 @@ |
81 | +#!/usr/bin/make |
82 | +PYTHON := /usr/bin/env python |
83 | + |
84 | +lint: |
85 | + @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ |
86 | + actions hooks unit_tests tests |
87 | + @charm proof |
88 | + |
89 | +test: |
90 | + @# Bundletester expects unit tests here. |
91 | + @echo Starting unit tests... |
92 | + @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests |
93 | + |
94 | +functional_test: |
95 | + @echo Starting Amulet tests... |
96 | + @tests/setup/00-setup |
97 | + @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 |
98 | + |
99 | +bin/charm_helpers_sync.py: |
100 | + @mkdir -p bin |
101 | + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ |
102 | + > bin/charm_helpers_sync.py |
103 | + |
104 | +sync: bin/charm_helpers_sync.py |
105 | + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml |
106 | + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml |
107 | + |
108 | +publish: lint test |
109 | + bzr push lp:charms/keystone |
110 | + bzr push lp:charms/trusty/keystone |
111 | |
112 | === renamed file 'Makefile' => 'Makefile.moved' |
113 | === added file 'README.md' |
114 | --- README.md 1970-01-01 00:00:00 +0000 |
115 | +++ README.md 2016-03-30 08:19:58 +0000 |
116 | @@ -0,0 +1,134 @@ |
117 | +Overview |
118 | +======== |
119 | + |
120 | +This charm provides Keystone, the Openstack identity service. It's target |
121 | +platform is (ideally) Ubuntu LTS + Openstack. |
122 | + |
123 | +Usage |
124 | +===== |
125 | + |
126 | +The following interfaces are provided: |
127 | + |
128 | + - nrpe-external-master: Used to generate Nagios checks. |
129 | + |
130 | + - identity-service: Openstack API endpoints request an entry in the |
131 | + Keystone service catalog + endpoint template catalog. When a relation |
132 | + is established, Keystone receives: service name, region, public_url, |
133 | + admin_url and internal_url. It first checks that the requested service |
134 | + is listed as a supported service. This list should stay updated to |
135 | + support current Openstack core services. If the service is supported, |
136 | + an entry in the service catalog is created, an endpoint template is |
137 | + created and a admin token is generated. The other end of the relation |
138 | + receives the token as well as info on which ports Keystone is listening |
139 | + on. |
140 | + |
141 | + - keystone-service: This is currently only used by Horizon/dashboard |
142 | + as its interaction with Keystone is different from other Openstack API |
143 | + services. That is, Horizon requests a Keystone role and token exists. |
144 | + During a relation, Horizon requests its configured default role and |
145 | + Keystone responds with a token and the auth + admin ports on which |
146 | + Keystone is listening. |
147 | + |
148 | + - identity-admin: Charms use this relation to obtain the credentials |
149 | + for the admin user. This is intended for charms that automatically |
150 | + provision users, tenants, etc. or that otherwise automate using the |
151 | + Openstack cluster deployment. |
152 | + |
153 | + - identity-notifications: Used to broadcast messages to any services |
154 | + listening on the interface. |
155 | + |
156 | +Database |
157 | +-------- |
158 | + |
159 | +Keystone requires a database. By default, a local sqlite database is used. |
160 | +The charm supports relations to a shared-db via mysql-shared interface. When |
161 | +a new data store is configured, the charm ensures the minimum administrator |
162 | +credentials exist (as configured via charm configuration) |
163 | + |
164 | +HA/Clustering |
165 | +------------- |
166 | + |
167 | +VIP is only required if you plan on multi-unit clustering (requires relating |
168 | +with hacluster charm). The VIP becomes a highly-available API endpoint. |
169 | + |
170 | +SSL/HTTPS |
171 | +--------- |
172 | + |
173 | +This charm also supports SSL and HTTPS endpoints. In order to ensure SSL |
174 | +certificates are only created once and distributed to all units, one unit gets |
175 | +elected as an ssl-cert-master. One side-effect of this is that as units are |
176 | +scaled-out the currently elected leader needs to be running in order for nodes |
177 | +to sync certificates. This 'feature' is to work around the lack of native |
178 | +leadership election via Juju itself, a feature that is due for release some |
179 | +time soon but until then we have to rely on this. Also, if a keystone unit does |
180 | +go down, it must be removed from Juju i.e. |
181 | + |
182 | + juju destroy-unit keystone/<unit-num> |
183 | + |
184 | +Otherwise it will be assumed that this unit may come back at some point and |
185 | +therefore must be know to be in-sync with the rest before continuing. |
186 | + |
187 | +Deploying from source |
188 | +--------------------- |
189 | + |
190 | +The minimum openstack-origin-git config required to deploy from source is: |
191 | + |
192 | + openstack-origin-git: include-file://keystone-juno.yaml |
193 | + |
194 | + keystone-juno.yaml |
195 | + repositories: |
196 | + - {name: requirements, |
197 | + repository: 'git://github.com/openstack/requirements', |
198 | + branch: stable/juno} |
199 | + - {name: keystone, |
200 | + repository: 'git://github.com/openstack/keystone', |
201 | + branch: stable/juno} |
202 | + |
203 | +Note that there are only two 'name' values the charm knows about: 'requirements' |
204 | +and 'keystone'. These repositories must correspond to these 'name' values. |
205 | +Additionally, the requirements repository must be specified first and the |
206 | +keystone repository must be specified last. All other repostories are installed |
207 | +in the order in which they are specified. |
208 | + |
209 | +The following is a full list of current tip repos (may not be up-to-date): |
210 | + |
211 | + openstack-origin-git: include-file://keystone-master.yaml |
212 | + |
213 | + keystone-master.yaml |
214 | + repositories: |
215 | + - {name: requirements, |
216 | + repository: 'git://github.com/openstack/requirements', |
217 | + branch: master} |
218 | + - {name: oslo-concurrency, |
219 | + repository: 'git://github.com/openstack/oslo.concurrency', |
220 | + branch: master} |
221 | + - {name: oslo-config, |
222 | + repository: 'git://github.com/openstack/oslo.config', |
223 | + branch: master} |
224 | + - {name: oslo-db, |
225 | + repository: 'git://github.com/openstack/oslo.db', |
226 | + branch: master} |
227 | + - {name: oslo-i18n, |
228 | + repository: 'git://github.com/openstack/oslo.i18n', |
229 | + branch: master} |
230 | + - {name: oslo-serialization, |
231 | + repository: 'git://github.com/openstack/oslo.serialization', |
232 | + branch: master} |
233 | + - {name: oslo-utils, |
234 | + repository: 'git://github.com/openstack/oslo.utils', |
235 | + branch: master} |
236 | + - {name: pbr, |
237 | + repository: 'git://github.com/openstack-dev/pbr', |
238 | + branch: master} |
239 | + - {name: python-keystoneclient, |
240 | + repository: 'git://github.com/openstack/python-keystoneclient', |
241 | + branch: master} |
242 | + - {name: sqlalchemy-migrate, |
243 | + repository: 'git://github.com/stackforge/sqlalchemy-migrate', |
244 | + branch: master} |
245 | + - {name: keystonemiddleware, |
246 | + repository: 'git://github.com/openstack/keystonemiddleware', |
247 | + branch: master} |
248 | + - {name: keystone, |
249 | + repository: 'git://github.com/openstack/keystone', |
250 | + branch: master} |
251 | |
252 | === renamed file 'README.md' => 'README.md.moved' |
253 | === added directory 'actions' |
254 | === renamed directory 'actions' => 'actions.moved' |
255 | === added file 'actions.yaml' |
256 | --- actions.yaml 1970-01-01 00:00:00 +0000 |
257 | +++ actions.yaml 2016-03-30 08:19:58 +0000 |
258 | @@ -0,0 +1,17 @@ |
259 | +git-reinstall: |
260 | + description: Reinstall keystone from the openstack-origin-git repositories. |
261 | +pause: |
262 | + description: | |
263 | + Pause keystone services. |
264 | + If the keystone deployment is clustered using the hacluster charm, the |
265 | + corresponding hacluster unit on the node must first be paused as well. |
266 | + Not doing so may lead to an interruption of service. |
267 | +resume: |
268 | + description: | |
269 | + Resume keystone services. |
270 | + If the keystone deployment is clustered using the hacluster charm, the |
271 | + corresponding hacluster unit on the node must be resumed as well. |
272 | +openstack-upgrade: |
273 | + description: | |
274 | + Perform openstack upgrades. Config option action-managed-upgrade must be |
275 | + set to True. |
276 | |
277 | === renamed file 'actions.yaml' => 'actions.yaml.moved' |
278 | === added file 'actions/__init__.py' |
279 | === added file 'actions/actions.py' |
280 | --- actions/actions.py 1970-01-01 00:00:00 +0000 |
281 | +++ actions/actions.py 2016-03-30 08:19:58 +0000 |
282 | @@ -0,0 +1,61 @@ |
283 | +#!/usr/bin/python |
284 | + |
285 | +import sys |
286 | +import os |
287 | + |
288 | +from charmhelpers.core.host import service_pause, service_resume |
289 | +from charmhelpers.core.hookenv import action_fail |
290 | +from charmhelpers.core.unitdata import HookData, kv |
291 | + |
292 | +from hooks.keystone_utils import services, assess_status |
293 | +from hooks.keystone_hooks import CONFIGS |
294 | + |
295 | + |
296 | +def pause(args): |
297 | + """Pause all the Keystone services. |
298 | + |
299 | + @raises Exception if any services fail to stop |
300 | + """ |
301 | + for service in services(): |
302 | + stopped = service_pause(service) |
303 | + if not stopped: |
304 | + raise Exception("{} didn't stop cleanly.".format(service)) |
305 | + with HookData()(): |
306 | + kv().set('unit-paused', True) |
307 | + assess_status(CONFIGS) |
308 | + |
309 | + |
310 | +def resume(args): |
311 | + """Resume all the Keystone services. |
312 | + |
313 | + @raises Exception if any services fail to start |
314 | + """ |
315 | + for service in services(): |
316 | + started = service_resume(service) |
317 | + if not started: |
318 | + raise Exception("{} didn't start cleanly.".format(service)) |
319 | + with HookData()(): |
320 | + kv().set('unit-paused', False) |
321 | + assess_status(CONFIGS) |
322 | + |
323 | + |
324 | +# A dictionary of all the defined actions to callables (which take |
325 | +# parsed arguments). |
326 | +ACTIONS = {"pause": pause, "resume": resume} |
327 | + |
328 | + |
329 | +def main(args): |
330 | + action_name = os.path.basename(args[0]) |
331 | + try: |
332 | + action = ACTIONS[action_name] |
333 | + except KeyError: |
334 | + return "Action %s undefined" % action_name |
335 | + else: |
336 | + try: |
337 | + action(args) |
338 | + except Exception as e: |
339 | + action_fail(str(e)) |
340 | + |
341 | + |
342 | +if __name__ == "__main__": |
343 | + sys.exit(main(sys.argv)) |
344 | |
345 | === added symlink 'actions/charmhelpers' |
346 | === target is u'../charmhelpers' |
347 | === added symlink 'actions/git-reinstall' |
348 | === target is u'git_reinstall.py' |
349 | === added file 'actions/git_reinstall.py' |
350 | --- actions/git_reinstall.py 1970-01-01 00:00:00 +0000 |
351 | +++ actions/git_reinstall.py 2016-03-30 08:19:58 +0000 |
352 | @@ -0,0 +1,43 @@ |
353 | +#!/usr/bin/python |
354 | + |
355 | +import traceback |
356 | + |
357 | +from charmhelpers.contrib.openstack.utils import ( |
358 | + git_install_requested, |
359 | +) |
360 | + |
361 | +from charmhelpers.core.hookenv import ( |
362 | + action_set, |
363 | + action_fail, |
364 | + config, |
365 | +) |
366 | + |
367 | +from hooks.keystone_utils import ( |
368 | + git_install, |
369 | +) |
370 | + |
371 | +from hooks.keystone_hooks import ( |
372 | + config_changed, |
373 | +) |
374 | + |
375 | + |
376 | +def git_reinstall(): |
377 | + """Reinstall from source and restart services. |
378 | + |
379 | + If the openstack-origin-git config option was used to install openstack |
380 | + from source git repositories, then this action can be used to reinstall |
381 | + from updated git repositories, followed by a restart of services.""" |
382 | + if not git_install_requested(): |
383 | + action_fail('openstack-origin-git is not configured') |
384 | + return |
385 | + |
386 | + try: |
387 | + git_install(config('openstack-origin-git')) |
388 | + config_changed() |
389 | + except: |
390 | + action_set({'traceback': traceback.format_exc()}) |
391 | + action_fail('git-reinstall resulted in an unexpected error') |
392 | + |
393 | + |
394 | +if __name__ == '__main__': |
395 | + git_reinstall() |
396 | |
397 | === added symlink 'actions/hooks' |
398 | === target is u'../hooks' |
399 | === added symlink 'actions/openstack-upgrade' |
400 | === target is u'openstack_upgrade.py' |
401 | === added file 'actions/openstack_upgrade.py' |
402 | --- actions/openstack_upgrade.py 1970-01-01 00:00:00 +0000 |
403 | +++ actions/openstack_upgrade.py 2016-03-30 08:19:58 +0000 |
404 | @@ -0,0 +1,37 @@ |
405 | +#!/usr/bin/python |
406 | +import os |
407 | +import sys |
408 | + |
409 | +sys.path.append('hooks/') |
410 | + |
411 | +from charmhelpers.contrib.openstack.utils import ( |
412 | + do_action_openstack_upgrade, |
413 | +) |
414 | + |
415 | +from keystone_hooks import ( |
416 | + CONFIGS, |
417 | +) |
418 | + |
419 | +from keystone_utils import ( |
420 | + do_openstack_upgrade, |
421 | +) |
422 | + |
423 | + |
424 | +def openstack_upgrade(): |
425 | + """Perform action-managed OpenStack upgrade. |
426 | + |
427 | + Upgrades packages to the configured openstack-origin version and sets |
428 | + the corresponding action status as a result. |
429 | + |
430 | + If the charm was installed from source we cannot upgrade it. |
431 | + For backwards compatibility a config flag (action-managed-upgrade) must |
432 | + be set for this code to run, otherwise a full service level upgrade will |
433 | + fire on config-changed.""" |
434 | + |
435 | + if (do_action_openstack_upgrade('keystone', |
436 | + do_openstack_upgrade, |
437 | + CONFIGS)): |
438 | + os.execl('./hooks/config-changed-postupgrade', '') |
439 | + |
440 | +if __name__ == '__main__': |
441 | + openstack_upgrade() |
442 | |
443 | === added symlink 'actions/pause' |
444 | === target is u'actions.py' |
445 | === added symlink 'actions/resume' |
446 | === target is u'actions.py' |
447 | === added file 'charm-helpers-hooks.yaml' |
448 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 |
449 | +++ charm-helpers-hooks.yaml 2016-03-30 08:19:58 +0000 |
450 | @@ -0,0 +1,18 @@ |
451 | +branch: lp:charm-helpers |
452 | +destination: charmhelpers |
453 | +include: |
454 | + - core |
455 | + - cli |
456 | + - fetch |
457 | + - contrib.openstack|inc=* |
458 | + - contrib.storage |
459 | + - contrib.hahelpers: |
460 | + - apache |
461 | + - cluster |
462 | + - contrib.python |
463 | + - contrib.unison |
464 | + - payload |
465 | + - contrib.peerstorage |
466 | + - contrib.network.ip |
467 | + - contrib.python.packages |
468 | + - contrib.charmsupport |
469 | |
470 | === renamed file 'charm-helpers-hooks.yaml' => 'charm-helpers-hooks.yaml.moved' |
471 | === added file 'charm-helpers-tests.yaml' |
472 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 |
473 | +++ charm-helpers-tests.yaml 2016-03-30 08:19:58 +0000 |
474 | @@ -0,0 +1,5 @@ |
475 | +branch: lp:charm-helpers |
476 | +destination: tests/charmhelpers |
477 | +include: |
478 | + - contrib.amulet |
479 | + - contrib.openstack.amulet |
480 | |
481 | === renamed file 'charm-helpers-tests.yaml' => 'charm-helpers-tests.yaml.moved' |
482 | === added directory 'charmhelpers' |
483 | === renamed directory 'charmhelpers' => 'charmhelpers.moved' |
484 | === added file 'charmhelpers/__init__.py' |
485 | --- charmhelpers/__init__.py 1970-01-01 00:00:00 +0000 |
486 | +++ charmhelpers/__init__.py 2016-03-30 08:19:58 +0000 |
487 | @@ -0,0 +1,38 @@ |
488 | +# Copyright 2014-2015 Canonical Limited. |
489 | +# |
490 | +# This file is part of charm-helpers. |
491 | +# |
492 | +# charm-helpers is free software: you can redistribute it and/or modify |
493 | +# it under the terms of the GNU Lesser General Public License version 3 as |
494 | +# published by the Free Software Foundation. |
495 | +# |
496 | +# charm-helpers is distributed in the hope that it will be useful, |
497 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
498 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
499 | +# GNU Lesser General Public License for more details. |
500 | +# |
501 | +# You should have received a copy of the GNU Lesser General Public License |
502 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
503 | + |
504 | +# Bootstrap charm-helpers, installing its dependencies if necessary using |
505 | +# only standard libraries. |
506 | +import subprocess |
507 | +import sys |
508 | + |
509 | +try: |
510 | + import six # flake8: noqa |
511 | +except ImportError: |
512 | + if sys.version_info.major == 2: |
513 | + subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) |
514 | + else: |
515 | + subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) |
516 | + import six # flake8: noqa |
517 | + |
518 | +try: |
519 | + import yaml # flake8: noqa |
520 | +except ImportError: |
521 | + if sys.version_info.major == 2: |
522 | + subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) |
523 | + else: |
524 | + subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) |
525 | + import yaml # flake8: noqa |
526 | |
527 | === added directory 'charmhelpers/cli' |
528 | === added file 'charmhelpers/cli/__init__.py' |
529 | --- charmhelpers/cli/__init__.py 1970-01-01 00:00:00 +0000 |
530 | +++ charmhelpers/cli/__init__.py 2016-03-30 08:19:58 +0000 |
531 | @@ -0,0 +1,191 @@ |
532 | +# Copyright 2014-2015 Canonical Limited. |
533 | +# |
534 | +# This file is part of charm-helpers. |
535 | +# |
536 | +# charm-helpers is free software: you can redistribute it and/or modify |
537 | +# it under the terms of the GNU Lesser General Public License version 3 as |
538 | +# published by the Free Software Foundation. |
539 | +# |
540 | +# charm-helpers is distributed in the hope that it will be useful, |
541 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
542 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
543 | +# GNU Lesser General Public License for more details. |
544 | +# |
545 | +# You should have received a copy of the GNU Lesser General Public License |
546 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
547 | + |
548 | +import inspect |
549 | +import argparse |
550 | +import sys |
551 | + |
552 | +from six.moves import zip |
553 | + |
554 | +import charmhelpers.core.unitdata |
555 | + |
556 | + |
557 | +class OutputFormatter(object): |
558 | + def __init__(self, outfile=sys.stdout): |
559 | + self.formats = ( |
560 | + "raw", |
561 | + "json", |
562 | + "py", |
563 | + "yaml", |
564 | + "csv", |
565 | + "tab", |
566 | + ) |
567 | + self.outfile = outfile |
568 | + |
569 | + def add_arguments(self, argument_parser): |
570 | + formatgroup = argument_parser.add_mutually_exclusive_group() |
571 | + choices = self.supported_formats |
572 | + formatgroup.add_argument("--format", metavar='FMT', |
573 | + help="Select output format for returned data, " |
574 | + "where FMT is one of: {}".format(choices), |
575 | + choices=choices, default='raw') |
576 | + for fmt in self.formats: |
577 | + fmtfunc = getattr(self, fmt) |
578 | + formatgroup.add_argument("-{}".format(fmt[0]), |
579 | + "--{}".format(fmt), action='store_const', |
580 | + const=fmt, dest='format', |
581 | + help=fmtfunc.__doc__) |
582 | + |
583 | + @property |
584 | + def supported_formats(self): |
585 | + return self.formats |
586 | + |
587 | + def raw(self, output): |
588 | + """Output data as raw string (default)""" |
589 | + if isinstance(output, (list, tuple)): |
590 | + output = '\n'.join(map(str, output)) |
591 | + self.outfile.write(str(output)) |
592 | + |
593 | + def py(self, output): |
594 | + """Output data as a nicely-formatted python data structure""" |
595 | + import pprint |
596 | + pprint.pprint(output, stream=self.outfile) |
597 | + |
598 | + def json(self, output): |
599 | + """Output data in JSON format""" |
600 | + import json |
601 | + json.dump(output, self.outfile) |
602 | + |
603 | + def yaml(self, output): |
604 | + """Output data in YAML format""" |
605 | + import yaml |
606 | + yaml.safe_dump(output, self.outfile) |
607 | + |
608 | + def csv(self, output): |
609 | + """Output data as excel-compatible CSV""" |
610 | + import csv |
611 | + csvwriter = csv.writer(self.outfile) |
612 | + csvwriter.writerows(output) |
613 | + |
614 | + def tab(self, output): |
615 | + """Output data in excel-compatible tab-delimited format""" |
616 | + import csv |
617 | + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) |
618 | + csvwriter.writerows(output) |
619 | + |
620 | + def format_output(self, output, fmt='raw'): |
621 | + fmtfunc = getattr(self, fmt) |
622 | + fmtfunc(output) |
623 | + |
624 | + |
625 | +class CommandLine(object): |
626 | + argument_parser = None |
627 | + subparsers = None |
628 | + formatter = None |
629 | + exit_code = 0 |
630 | + |
631 | + def __init__(self): |
632 | + if not self.argument_parser: |
633 | + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') |
634 | + if not self.formatter: |
635 | + self.formatter = OutputFormatter() |
636 | + self.formatter.add_arguments(self.argument_parser) |
637 | + if not self.subparsers: |
638 | + self.subparsers = self.argument_parser.add_subparsers(help='Commands') |
639 | + |
640 | + def subcommand(self, command_name=None): |
641 | + """ |
642 | + Decorate a function as a subcommand. Use its arguments as the |
643 | + command-line arguments""" |
644 | + def wrapper(decorated): |
645 | + cmd_name = command_name or decorated.__name__ |
646 | + subparser = self.subparsers.add_parser(cmd_name, |
647 | + description=decorated.__doc__) |
648 | + for args, kwargs in describe_arguments(decorated): |
649 | + subparser.add_argument(*args, **kwargs) |
650 | + subparser.set_defaults(func=decorated) |
651 | + return decorated |
652 | + return wrapper |
653 | + |
654 | + def test_command(self, decorated): |
655 | + """ |
656 | + Subcommand is a boolean test function, so bool return values should be |
657 | + converted to a 0/1 exit code. |
658 | + """ |
659 | + decorated._cli_test_command = True |
660 | + return decorated |
661 | + |
662 | + def no_output(self, decorated): |
663 | + """ |
664 | + Subcommand is not expected to return a value, so don't print a spurious None. |
665 | + """ |
666 | + decorated._cli_no_output = True |
667 | + return decorated |
668 | + |
669 | + def subcommand_builder(self, command_name, description=None): |
670 | + """ |
671 | + Decorate a function that builds a subcommand. Builders should accept a |
672 | + single argument (the subparser instance) and return the function to be |
673 | + run as the command.""" |
674 | + def wrapper(decorated): |
675 | + subparser = self.subparsers.add_parser(command_name) |
676 | + func = decorated(subparser) |
677 | + subparser.set_defaults(func=func) |
678 | + subparser.description = description or func.__doc__ |
679 | + return wrapper |
680 | + |
681 | + def run(self): |
682 | + "Run cli, processing arguments and executing subcommands." |
683 | + arguments = self.argument_parser.parse_args() |
684 | + argspec = inspect.getargspec(arguments.func) |
685 | + vargs = [] |
686 | + for arg in argspec.args: |
687 | + vargs.append(getattr(arguments, arg)) |
688 | + if argspec.varargs: |
689 | + vargs.extend(getattr(arguments, argspec.varargs)) |
690 | + output = arguments.func(*vargs) |
691 | + if getattr(arguments.func, '_cli_test_command', False): |
692 | + self.exit_code = 0 if output else 1 |
693 | + output = '' |
694 | + if getattr(arguments.func, '_cli_no_output', False): |
695 | + output = '' |
696 | + self.formatter.format_output(output, arguments.format) |
697 | + if charmhelpers.core.unitdata._KV: |
698 | + charmhelpers.core.unitdata._KV.flush() |
699 | + |
700 | + |
701 | +cmdline = CommandLine() |
702 | + |
703 | + |
704 | +def describe_arguments(func): |
705 | + """ |
706 | + Analyze a function's signature and return a data structure suitable for |
707 | + passing in as arguments to an argparse parser's add_argument() method.""" |
708 | + |
709 | + argspec = inspect.getargspec(func) |
710 | + # we should probably raise an exception somewhere if func includes **kwargs |
711 | + if argspec.defaults: |
712 | + positional_args = argspec.args[:-len(argspec.defaults)] |
713 | + keyword_names = argspec.args[-len(argspec.defaults):] |
714 | + for arg, default in zip(keyword_names, argspec.defaults): |
715 | + yield ('--{}'.format(arg),), {'default': default} |
716 | + else: |
717 | + positional_args = argspec.args |
718 | + |
719 | + for arg in positional_args: |
720 | + yield (arg,), {} |
721 | + if argspec.varargs: |
722 | + yield (argspec.varargs,), {'nargs': '*'} |
723 | |
724 | === added file 'charmhelpers/cli/benchmark.py' |
725 | --- charmhelpers/cli/benchmark.py 1970-01-01 00:00:00 +0000 |
726 | +++ charmhelpers/cli/benchmark.py 2016-03-30 08:19:58 +0000 |
727 | @@ -0,0 +1,36 @@ |
728 | +# Copyright 2014-2015 Canonical Limited. |
729 | +# |
730 | +# This file is part of charm-helpers. |
731 | +# |
732 | +# charm-helpers is free software: you can redistribute it and/or modify |
733 | +# it under the terms of the GNU Lesser General Public License version 3 as |
734 | +# published by the Free Software Foundation. |
735 | +# |
736 | +# charm-helpers is distributed in the hope that it will be useful, |
737 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
738 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
739 | +# GNU Lesser General Public License for more details. |
740 | +# |
741 | +# You should have received a copy of the GNU Lesser General Public License |
742 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
743 | + |
744 | +from . import cmdline |
745 | +from charmhelpers.contrib.benchmark import Benchmark |
746 | + |
747 | + |
748 | +@cmdline.subcommand(command_name='benchmark-start') |
749 | +def start(): |
750 | + Benchmark.start() |
751 | + |
752 | + |
753 | +@cmdline.subcommand(command_name='benchmark-finish') |
754 | +def finish(): |
755 | + Benchmark.finish() |
756 | + |
757 | + |
758 | +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") |
759 | +def service(subparser): |
760 | + subparser.add_argument("value", help="The composite score.") |
761 | + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") |
762 | + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") |
763 | + return Benchmark.set_composite_score |
764 | |
765 | === added file 'charmhelpers/cli/commands.py' |
766 | --- charmhelpers/cli/commands.py 1970-01-01 00:00:00 +0000 |
767 | +++ charmhelpers/cli/commands.py 2016-03-30 08:19:58 +0000 |
768 | @@ -0,0 +1,32 @@ |
769 | +# Copyright 2014-2015 Canonical Limited. |
770 | +# |
771 | +# This file is part of charm-helpers. |
772 | +# |
773 | +# charm-helpers is free software: you can redistribute it and/or modify |
774 | +# it under the terms of the GNU Lesser General Public License version 3 as |
775 | +# published by the Free Software Foundation. |
776 | +# |
777 | +# charm-helpers is distributed in the hope that it will be useful, |
778 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
779 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
780 | +# GNU Lesser General Public License for more details. |
781 | +# |
782 | +# You should have received a copy of the GNU Lesser General Public License |
783 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
784 | + |
785 | +""" |
786 | +This module loads sub-modules into the python runtime so they can be |
787 | +discovered via the inspect module. In order to prevent flake8 from (rightfully) |
788 | +telling us these are unused modules, throw a ' # noqa' at the end of each import |
789 | +so that the warning is suppressed. |
790 | +""" |
791 | + |
792 | +from . import CommandLine # noqa |
793 | + |
794 | +""" |
795 | +Import the sub-modules which have decorated subcommands to register with chlp. |
796 | +""" |
797 | +from . import host # noqa |
798 | +from . import benchmark # noqa |
799 | +from . import unitdata # noqa |
800 | +from . import hookenv # noqa |
801 | |
802 | === added file 'charmhelpers/cli/hookenv.py' |
803 | --- charmhelpers/cli/hookenv.py 1970-01-01 00:00:00 +0000 |
804 | +++ charmhelpers/cli/hookenv.py 2016-03-30 08:19:58 +0000 |
805 | @@ -0,0 +1,23 @@ |
806 | +# Copyright 2014-2015 Canonical Limited. |
807 | +# |
808 | +# This file is part of charm-helpers. |
809 | +# |
810 | +# charm-helpers is free software: you can redistribute it and/or modify |
811 | +# it under the terms of the GNU Lesser General Public License version 3 as |
812 | +# published by the Free Software Foundation. |
813 | +# |
814 | +# charm-helpers is distributed in the hope that it will be useful, |
815 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
816 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
817 | +# GNU Lesser General Public License for more details. |
818 | +# |
819 | +# You should have received a copy of the GNU Lesser General Public License |
820 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
821 | + |
822 | +from . import cmdline |
823 | +from charmhelpers.core import hookenv |
824 | + |
825 | + |
826 | +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) |
827 | +cmdline.subcommand('service-name')(hookenv.service_name) |
828 | +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) |
829 | |
830 | === added file 'charmhelpers/cli/host.py' |
831 | --- charmhelpers/cli/host.py 1970-01-01 00:00:00 +0000 |
832 | +++ charmhelpers/cli/host.py 2016-03-30 08:19:58 +0000 |
833 | @@ -0,0 +1,31 @@ |
834 | +# Copyright 2014-2015 Canonical Limited. |
835 | +# |
836 | +# This file is part of charm-helpers. |
837 | +# |
838 | +# charm-helpers is free software: you can redistribute it and/or modify |
839 | +# it under the terms of the GNU Lesser General Public License version 3 as |
840 | +# published by the Free Software Foundation. |
841 | +# |
842 | +# charm-helpers is distributed in the hope that it will be useful, |
843 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
844 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
845 | +# GNU Lesser General Public License for more details. |
846 | +# |
847 | +# You should have received a copy of the GNU Lesser General Public License |
848 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
849 | + |
850 | +from . import cmdline |
851 | +from charmhelpers.core import host |
852 | + |
853 | + |
854 | +@cmdline.subcommand() |
855 | +def mounts(): |
856 | + "List mounts" |
857 | + return host.mounts() |
858 | + |
859 | + |
860 | +@cmdline.subcommand_builder('service', description="Control system services") |
861 | +def service(subparser): |
862 | + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") |
863 | + subparser.add_argument("service_name", help="Name of the service to control") |
864 | + return host.service |
865 | |
866 | === added file 'charmhelpers/cli/unitdata.py' |
867 | --- charmhelpers/cli/unitdata.py 1970-01-01 00:00:00 +0000 |
868 | +++ charmhelpers/cli/unitdata.py 2016-03-30 08:19:58 +0000 |
869 | @@ -0,0 +1,39 @@ |
870 | +# Copyright 2014-2015 Canonical Limited. |
871 | +# |
872 | +# This file is part of charm-helpers. |
873 | +# |
874 | +# charm-helpers is free software: you can redistribute it and/or modify |
875 | +# it under the terms of the GNU Lesser General Public License version 3 as |
876 | +# published by the Free Software Foundation. |
877 | +# |
878 | +# charm-helpers is distributed in the hope that it will be useful, |
879 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
880 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
881 | +# GNU Lesser General Public License for more details. |
882 | +# |
883 | +# You should have received a copy of the GNU Lesser General Public License |
884 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
885 | + |
886 | +from . import cmdline |
887 | +from charmhelpers.core import unitdata |
888 | + |
889 | + |
890 | +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") |
891 | +def unitdata_cmd(subparser): |
892 | + nested = subparser.add_subparsers() |
893 | + get_cmd = nested.add_parser('get', help='Retrieve data') |
894 | + get_cmd.add_argument('key', help='Key to retrieve the value of') |
895 | + get_cmd.set_defaults(action='get', value=None) |
896 | + set_cmd = nested.add_parser('set', help='Store data') |
897 | + set_cmd.add_argument('key', help='Key to set') |
898 | + set_cmd.add_argument('value', help='Value to store') |
899 | + set_cmd.set_defaults(action='set') |
900 | + |
901 | + def _unitdata_cmd(action, key, value): |
902 | + if action == 'get': |
903 | + return unitdata.kv().get(key) |
904 | + elif action == 'set': |
905 | + unitdata.kv().set(key, value) |
906 | + unitdata.kv().flush() |
907 | + return '' |
908 | + return _unitdata_cmd |
909 | |
910 | === added directory 'charmhelpers/contrib' |
911 | === added file 'charmhelpers/contrib/__init__.py' |
912 | --- charmhelpers/contrib/__init__.py 1970-01-01 00:00:00 +0000 |
913 | +++ charmhelpers/contrib/__init__.py 2016-03-30 08:19:58 +0000 |
914 | @@ -0,0 +1,15 @@ |
915 | +# Copyright 2014-2015 Canonical Limited. |
916 | +# |
917 | +# This file is part of charm-helpers. |
918 | +# |
919 | +# charm-helpers is free software: you can redistribute it and/or modify |
920 | +# it under the terms of the GNU Lesser General Public License version 3 as |
921 | +# published by the Free Software Foundation. |
922 | +# |
923 | +# charm-helpers is distributed in the hope that it will be useful, |
924 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
925 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
926 | +# GNU Lesser General Public License for more details. |
927 | +# |
928 | +# You should have received a copy of the GNU Lesser General Public License |
929 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
930 | |
931 | === added directory 'charmhelpers/contrib/charmsupport' |
932 | === added file 'charmhelpers/contrib/charmsupport/__init__.py' |
933 | --- charmhelpers/contrib/charmsupport/__init__.py 1970-01-01 00:00:00 +0000 |
934 | +++ charmhelpers/contrib/charmsupport/__init__.py 2016-03-30 08:19:58 +0000 |
935 | @@ -0,0 +1,15 @@ |
936 | +# Copyright 2014-2015 Canonical Limited. |
937 | +# |
938 | +# This file is part of charm-helpers. |
939 | +# |
940 | +# charm-helpers is free software: you can redistribute it and/or modify |
941 | +# it under the terms of the GNU Lesser General Public License version 3 as |
942 | +# published by the Free Software Foundation. |
943 | +# |
944 | +# charm-helpers is distributed in the hope that it will be useful, |
945 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
946 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
947 | +# GNU Lesser General Public License for more details. |
948 | +# |
949 | +# You should have received a copy of the GNU Lesser General Public License |
950 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
951 | |
952 | === added file 'charmhelpers/contrib/charmsupport/nrpe.py' |
953 | --- charmhelpers/contrib/charmsupport/nrpe.py 1970-01-01 00:00:00 +0000 |
954 | +++ charmhelpers/contrib/charmsupport/nrpe.py 2016-03-30 08:19:58 +0000 |
955 | @@ -0,0 +1,398 @@ |
956 | +# Copyright 2014-2015 Canonical Limited. |
957 | +# |
958 | +# This file is part of charm-helpers. |
959 | +# |
960 | +# charm-helpers is free software: you can redistribute it and/or modify |
961 | +# it under the terms of the GNU Lesser General Public License version 3 as |
962 | +# published by the Free Software Foundation. |
963 | +# |
964 | +# charm-helpers is distributed in the hope that it will be useful, |
965 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
966 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
967 | +# GNU Lesser General Public License for more details. |
968 | +# |
969 | +# You should have received a copy of the GNU Lesser General Public License |
970 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
971 | + |
972 | +"""Compatibility with the nrpe-external-master charm""" |
973 | +# Copyright 2012 Canonical Ltd. |
974 | +# |
975 | +# Authors: |
976 | +# Matthew Wedgwood <matthew.wedgwood@canonical.com> |
977 | + |
978 | +import subprocess |
979 | +import pwd |
980 | +import grp |
981 | +import os |
982 | +import glob |
983 | +import shutil |
984 | +import re |
985 | +import shlex |
986 | +import yaml |
987 | + |
988 | +from charmhelpers.core.hookenv import ( |
989 | + config, |
990 | + local_unit, |
991 | + log, |
992 | + relation_ids, |
993 | + relation_set, |
994 | + relations_of_type, |
995 | +) |
996 | + |
997 | +from charmhelpers.core.host import service |
998 | + |
999 | +# This module adds compatibility with the nrpe-external-master and plain nrpe |
1000 | +# subordinate charms. To use it in your charm: |
1001 | +# |
1002 | +# 1. Update metadata.yaml |
1003 | +# |
1004 | +# provides: |
1005 | +# (...) |
1006 | +# nrpe-external-master: |
1007 | +# interface: nrpe-external-master |
1008 | +# scope: container |
1009 | +# |
1010 | +# and/or |
1011 | +# |
1012 | +# provides: |
1013 | +# (...) |
1014 | +# local-monitors: |
1015 | +# interface: local-monitors |
1016 | +# scope: container |
1017 | + |
1018 | +# |
1019 | +# 2. Add the following to config.yaml |
1020 | +# |
1021 | +# nagios_context: |
1022 | +# default: "juju" |
1023 | +# type: string |
1024 | +# description: | |
1025 | +# Used by the nrpe subordinate charms. |
1026 | +# A string that will be prepended to instance name to set the host name |
1027 | +# in nagios. So for instance the hostname would be something like: |
1028 | +# juju-myservice-0 |
1029 | +# If you're running multiple environments with the same services in them |
1030 | +# this allows you to differentiate between them. |
1031 | +# nagios_servicegroups: |
1032 | +# default: "" |
1033 | +# type: string |
1034 | +# description: | |
1035 | +# A comma-separated list of nagios servicegroups. |
1036 | +# If left empty, the nagios_context will be used as the servicegroup |
1037 | +# |
1038 | +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master |
1039 | +# |
1040 | +# 4. Update your hooks.py with something like this: |
1041 | +# |
1042 | +# from charmsupport.nrpe import NRPE |
1043 | +# (...) |
1044 | +# def update_nrpe_config(): |
1045 | +# nrpe_compat = NRPE() |
1046 | +# nrpe_compat.add_check( |
1047 | +# shortname = "myservice", |
1048 | +# description = "Check MyService", |
1049 | +# check_cmd = "check_http -w 2 -c 10 http://localhost" |
1050 | +# ) |
1051 | +# nrpe_compat.add_check( |
1052 | +# "myservice_other", |
1053 | +# "Check for widget failures", |
1054 | +# check_cmd = "/srv/myapp/scripts/widget_check" |
1055 | +# ) |
1056 | +# nrpe_compat.write() |
1057 | +# |
1058 | +# def config_changed(): |
1059 | +# (...) |
1060 | +# update_nrpe_config() |
1061 | +# |
1062 | +# def nrpe_external_master_relation_changed(): |
1063 | +# update_nrpe_config() |
1064 | +# |
1065 | +# def local_monitors_relation_changed(): |
1066 | +# update_nrpe_config() |
1067 | +# |
1068 | +# 5. ln -s hooks.py nrpe-external-master-relation-changed |
1069 | +# ln -s hooks.py local-monitors-relation-changed |
1070 | + |
1071 | + |
1072 | +class CheckException(Exception): |
1073 | + pass |
1074 | + |
1075 | + |
1076 | +class Check(object): |
1077 | + shortname_re = '[A-Za-z0-9-_]+$' |
1078 | + service_template = (""" |
1079 | +#--------------------------------------------------- |
1080 | +# This file is Juju managed |
1081 | +#--------------------------------------------------- |
1082 | +define service {{ |
1083 | + use active-service |
1084 | + host_name {nagios_hostname} |
1085 | + service_description {nagios_hostname}[{shortname}] """ |
1086 | + """{description} |
1087 | + check_command check_nrpe!{command} |
1088 | + servicegroups {nagios_servicegroup} |
1089 | +}} |
1090 | +""") |
1091 | + |
1092 | + def __init__(self, shortname, description, check_cmd): |
1093 | + super(Check, self).__init__() |
1094 | + # XXX: could be better to calculate this from the service name |
1095 | + if not re.match(self.shortname_re, shortname): |
1096 | + raise CheckException("shortname must match {}".format( |
1097 | + Check.shortname_re)) |
1098 | + self.shortname = shortname |
1099 | + self.command = "check_{}".format(shortname) |
1100 | + # Note: a set of invalid characters is defined by the |
1101 | + # Nagios server config |
1102 | + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= |
1103 | + self.description = description |
1104 | + self.check_cmd = self._locate_cmd(check_cmd) |
1105 | + |
1106 | + def _get_check_filename(self): |
1107 | + return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command)) |
1108 | + |
1109 | + def _get_service_filename(self, hostname): |
1110 | + return os.path.join(NRPE.nagios_exportdir, |
1111 | + 'service__{}_{}.cfg'.format(hostname, self.command)) |
1112 | + |
1113 | + def _locate_cmd(self, check_cmd): |
1114 | + search_path = ( |
1115 | + '/usr/lib/nagios/plugins', |
1116 | + '/usr/local/lib/nagios/plugins', |
1117 | + ) |
1118 | + parts = shlex.split(check_cmd) |
1119 | + for path in search_path: |
1120 | + if os.path.exists(os.path.join(path, parts[0])): |
1121 | + command = os.path.join(path, parts[0]) |
1122 | + if len(parts) > 1: |
1123 | + command += " " + " ".join(parts[1:]) |
1124 | + return command |
1125 | + log('Check command not found: {}'.format(parts[0])) |
1126 | + return '' |
1127 | + |
1128 | + def _remove_service_files(self): |
1129 | + if not os.path.exists(NRPE.nagios_exportdir): |
1130 | + return |
1131 | + for f in os.listdir(NRPE.nagios_exportdir): |
1132 | + if f.endswith('_{}.cfg'.format(self.command)): |
1133 | + os.remove(os.path.join(NRPE.nagios_exportdir, f)) |
1134 | + |
1135 | + def remove(self, hostname): |
1136 | + nrpe_check_file = self._get_check_filename() |
1137 | + if os.path.exists(nrpe_check_file): |
1138 | + os.remove(nrpe_check_file) |
1139 | + self._remove_service_files() |
1140 | + |
1141 | + def write(self, nagios_context, hostname, nagios_servicegroups): |
1142 | + nrpe_check_file = self._get_check_filename() |
1143 | + with open(nrpe_check_file, 'w') as nrpe_check_config: |
1144 | + nrpe_check_config.write("# check {}\n".format(self.shortname)) |
1145 | + nrpe_check_config.write("command[{}]={}\n".format( |
1146 | + self.command, self.check_cmd)) |
1147 | + |
1148 | + if not os.path.exists(NRPE.nagios_exportdir): |
1149 | + log('Not writing service config as {} is not accessible'.format( |
1150 | + NRPE.nagios_exportdir)) |
1151 | + else: |
1152 | + self.write_service_config(nagios_context, hostname, |
1153 | + nagios_servicegroups) |
1154 | + |
1155 | + def write_service_config(self, nagios_context, hostname, |
1156 | + nagios_servicegroups): |
1157 | + self._remove_service_files() |
1158 | + |
1159 | + templ_vars = { |
1160 | + 'nagios_hostname': hostname, |
1161 | + 'nagios_servicegroup': nagios_servicegroups, |
1162 | + 'description': self.description, |
1163 | + 'shortname': self.shortname, |
1164 | + 'command': self.command, |
1165 | + } |
1166 | + nrpe_service_text = Check.service_template.format(**templ_vars) |
1167 | + nrpe_service_file = self._get_service_filename(hostname) |
1168 | + with open(nrpe_service_file, 'w') as nrpe_service_config: |
1169 | + nrpe_service_config.write(str(nrpe_service_text)) |
1170 | + |
1171 | + def run(self): |
1172 | + subprocess.call(self.check_cmd) |
1173 | + |
1174 | + |
1175 | +class NRPE(object): |
1176 | + nagios_logdir = '/var/log/nagios' |
1177 | + nagios_exportdir = '/var/lib/nagios/export' |
1178 | + nrpe_confdir = '/etc/nagios/nrpe.d' |
1179 | + |
1180 | + def __init__(self, hostname=None): |
1181 | + super(NRPE, self).__init__() |
1182 | + self.config = config() |
1183 | + self.nagios_context = self.config['nagios_context'] |
1184 | + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: |
1185 | + self.nagios_servicegroups = self.config['nagios_servicegroups'] |
1186 | + else: |
1187 | + self.nagios_servicegroups = self.nagios_context |
1188 | + self.unit_name = local_unit().replace('/', '-') |
1189 | + if hostname: |
1190 | + self.hostname = hostname |
1191 | + else: |
1192 | + nagios_hostname = get_nagios_hostname() |
1193 | + if nagios_hostname: |
1194 | + self.hostname = nagios_hostname |
1195 | + else: |
1196 | + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) |
1197 | + self.checks = [] |
1198 | + |
1199 | + def add_check(self, *args, **kwargs): |
1200 | + self.checks.append(Check(*args, **kwargs)) |
1201 | + |
1202 | + def remove_check(self, *args, **kwargs): |
1203 | + if kwargs.get('shortname') is None: |
1204 | + raise ValueError('shortname of check must be specified') |
1205 | + |
1206 | + # Use sensible defaults if they're not specified - these are not |
1207 | + # actually used during removal, but they're required for constructing |
1208 | + # the Check object; check_disk is chosen because it's part of the |
1209 | + # nagios-plugins-basic package. |
1210 | + if kwargs.get('check_cmd') is None: |
1211 | + kwargs['check_cmd'] = 'check_disk' |
1212 | + if kwargs.get('description') is None: |
1213 | + kwargs['description'] = '' |
1214 | + |
1215 | + check = Check(*args, **kwargs) |
1216 | + check.remove(self.hostname) |
1217 | + |
1218 | + def write(self): |
1219 | + try: |
1220 | + nagios_uid = pwd.getpwnam('nagios').pw_uid |
1221 | + nagios_gid = grp.getgrnam('nagios').gr_gid |
1222 | + except: |
1223 | + log("Nagios user not set up, nrpe checks not updated") |
1224 | + return |
1225 | + |
1226 | + if not os.path.exists(NRPE.nagios_logdir): |
1227 | + os.mkdir(NRPE.nagios_logdir) |
1228 | + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) |
1229 | + |
1230 | + nrpe_monitors = {} |
1231 | + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} |
1232 | + for nrpecheck in self.checks: |
1233 | + nrpecheck.write(self.nagios_context, self.hostname, |
1234 | + self.nagios_servicegroups) |
1235 | + nrpe_monitors[nrpecheck.shortname] = { |
1236 | + "command": nrpecheck.command, |
1237 | + } |
1238 | + |
1239 | + service('restart', 'nagios-nrpe-server') |
1240 | + |
1241 | + monitor_ids = relation_ids("local-monitors") + \ |
1242 | + relation_ids("nrpe-external-master") |
1243 | + for rid in monitor_ids: |
1244 | + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) |
1245 | + |
1246 | + |
1247 | +def get_nagios_hostcontext(relation_name='nrpe-external-master'): |
1248 | + """ |
1249 | + Query relation with nrpe subordinate, return the nagios_host_context |
1250 | + |
1251 | + :param str relation_name: Name of relation nrpe sub joined to |
1252 | + """ |
1253 | + for rel in relations_of_type(relation_name): |
1254 | + if 'nagios_host_context' in rel: |
1255 | + return rel['nagios_host_context'] |
1256 | + |
1257 | + |
1258 | +def get_nagios_hostname(relation_name='nrpe-external-master'): |
1259 | + """ |
1260 | + Query relation with nrpe subordinate, return the nagios_hostname |
1261 | + |
1262 | + :param str relation_name: Name of relation nrpe sub joined to |
1263 | + """ |
1264 | + for rel in relations_of_type(relation_name): |
1265 | + if 'nagios_hostname' in rel: |
1266 | + return rel['nagios_hostname'] |
1267 | + |
1268 | + |
1269 | +def get_nagios_unit_name(relation_name='nrpe-external-master'): |
1270 | + """ |
1271 | + Return the nagios unit name prepended with host_context if needed |
1272 | + |
1273 | + :param str relation_name: Name of relation nrpe sub joined to |
1274 | + """ |
1275 | + host_context = get_nagios_hostcontext(relation_name) |
1276 | + if host_context: |
1277 | + unit = "%s:%s" % (host_context, local_unit()) |
1278 | + else: |
1279 | + unit = local_unit() |
1280 | + return unit |
1281 | + |
1282 | + |
1283 | +def add_init_service_checks(nrpe, services, unit_name): |
1284 | + """ |
1285 | + Add checks for each service in list |
1286 | + |
1287 | + :param NRPE nrpe: NRPE object to add check to |
1288 | + :param list services: List of services to check |
1289 | + :param str unit_name: Unit name to use in check description |
1290 | + """ |
1291 | + for svc in services: |
1292 | + upstart_init = '/etc/init/%s.conf' % svc |
1293 | + sysv_init = '/etc/init.d/%s' % svc |
1294 | + if os.path.exists(upstart_init): |
1295 | + # Don't add a check for these services from neutron-gateway |
1296 | + if svc not in ['ext-port', 'os-charm-phy-nic-mtu']: |
1297 | + nrpe.add_check( |
1298 | + shortname=svc, |
1299 | + description='process check {%s}' % unit_name, |
1300 | + check_cmd='check_upstart_job %s' % svc |
1301 | + ) |
1302 | + elif os.path.exists(sysv_init): |
1303 | + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc |
1304 | + cron_file = ('*/5 * * * * root ' |
1305 | + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' |
1306 | + '-s /etc/init.d/%s status > ' |
1307 | + '/var/lib/nagios/service-check-%s.txt\n' % (svc, |
1308 | + svc) |
1309 | + ) |
1310 | + f = open(cronpath, 'w') |
1311 | + f.write(cron_file) |
1312 | + f.close() |
1313 | + nrpe.add_check( |
1314 | + shortname=svc, |
1315 | + description='process check {%s}' % unit_name, |
1316 | + check_cmd='check_status_file.py -f ' |
1317 | + '/var/lib/nagios/service-check-%s.txt' % svc, |
1318 | + ) |
1319 | + |
1320 | + |
1321 | +def copy_nrpe_checks(): |
1322 | + """ |
1323 | + Copy the nrpe checks into place |
1324 | + |
1325 | + """ |
1326 | + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' |
1327 | + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', |
1328 | + 'charmhelpers', 'contrib', 'openstack', |
1329 | + 'files') |
1330 | + |
1331 | + if not os.path.exists(NAGIOS_PLUGINS): |
1332 | + os.makedirs(NAGIOS_PLUGINS) |
1333 | + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): |
1334 | + if os.path.isfile(fname): |
1335 | + shutil.copy2(fname, |
1336 | + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) |
1337 | + |
1338 | + |
1339 | +def add_haproxy_checks(nrpe, unit_name): |
1340 | + """ |
1341 | + Add checks for each service in list |
1342 | + |
1343 | + :param NRPE nrpe: NRPE object to add check to |
1344 | + :param str unit_name: Unit name to use in check description |
1345 | + """ |
1346 | + nrpe.add_check( |
1347 | + shortname='haproxy_servers', |
1348 | + description='Check HAProxy {%s}' % unit_name, |
1349 | + check_cmd='check_haproxy.sh') |
1350 | + nrpe.add_check( |
1351 | + shortname='haproxy_queue', |
1352 | + description='Check HAProxy queue depth {%s}' % unit_name, |
1353 | + check_cmd='check_haproxy_queue_depth.sh') |
1354 | |
1355 | === added file 'charmhelpers/contrib/charmsupport/volumes.py' |
1356 | --- charmhelpers/contrib/charmsupport/volumes.py 1970-01-01 00:00:00 +0000 |
1357 | +++ charmhelpers/contrib/charmsupport/volumes.py 2016-03-30 08:19:58 +0000 |
1358 | @@ -0,0 +1,175 @@ |
1359 | +# Copyright 2014-2015 Canonical Limited. |
1360 | +# |
1361 | +# This file is part of charm-helpers. |
1362 | +# |
1363 | +# charm-helpers is free software: you can redistribute it and/or modify |
1364 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1365 | +# published by the Free Software Foundation. |
1366 | +# |
1367 | +# charm-helpers is distributed in the hope that it will be useful, |
1368 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1369 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1370 | +# GNU Lesser General Public License for more details. |
1371 | +# |
1372 | +# You should have received a copy of the GNU Lesser General Public License |
1373 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1374 | + |
1375 | +''' |
1376 | +Functions for managing volumes in juju units. One volume is supported per unit. |
1377 | +Subordinates may have their own storage, provided it is on its own partition. |
1378 | + |
1379 | +Configuration stanzas:: |
1380 | + |
1381 | + volume-ephemeral: |
1382 | + type: boolean |
1383 | + default: true |
1384 | + description: > |
1385 | + If false, a volume is mounted as sepecified in "volume-map" |
1386 | + If true, ephemeral storage will be used, meaning that log data |
1387 | + will only exist as long as the machine. YOU HAVE BEEN WARNED. |
1388 | + volume-map: |
1389 | + type: string |
1390 | + default: {} |
1391 | + description: > |
1392 | + YAML map of units to device names, e.g: |
1393 | + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" |
1394 | + Service units will raise a configure-error if volume-ephemeral |
1395 | + is 'true' and no volume-map value is set. Use 'juju set' to set a |
1396 | + value and 'juju resolved' to complete configuration. |
1397 | + |
1398 | +Usage:: |
1399 | + |
1400 | + from charmsupport.volumes import configure_volume, VolumeConfigurationError |
1401 | + from charmsupport.hookenv import log, ERROR |
1402 | + def post_mount_hook(): |
1403 | + stop_service('myservice') |
1404 | + def post_mount_hook(): |
1405 | + start_service('myservice') |
1406 | + |
1407 | + if __name__ == '__main__': |
1408 | + try: |
1409 | + configure_volume(before_change=pre_mount_hook, |
1410 | + after_change=post_mount_hook) |
1411 | + except VolumeConfigurationError: |
1412 | + log('Storage could not be configured', ERROR) |
1413 | + |
1414 | +''' |
1415 | + |
1416 | +# XXX: Known limitations |
1417 | +# - fstab is neither consulted nor updated |
1418 | + |
1419 | +import os |
1420 | +from charmhelpers.core import hookenv |
1421 | +from charmhelpers.core import host |
1422 | +import yaml |
1423 | + |
1424 | + |
1425 | +MOUNT_BASE = '/srv/juju/volumes' |
1426 | + |
1427 | + |
1428 | +class VolumeConfigurationError(Exception): |
1429 | + '''Volume configuration data is missing or invalid''' |
1430 | + pass |
1431 | + |
1432 | + |
1433 | +def get_config(): |
1434 | + '''Gather and sanity-check volume configuration data''' |
1435 | + volume_config = {} |
1436 | + config = hookenv.config() |
1437 | + |
1438 | + errors = False |
1439 | + |
1440 | + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): |
1441 | + volume_config['ephemeral'] = True |
1442 | + else: |
1443 | + volume_config['ephemeral'] = False |
1444 | + |
1445 | + try: |
1446 | + volume_map = yaml.safe_load(config.get('volume-map', '{}')) |
1447 | + except yaml.YAMLError as e: |
1448 | + hookenv.log("Error parsing YAML volume-map: {}".format(e), |
1449 | + hookenv.ERROR) |
1450 | + errors = True |
1451 | + if volume_map is None: |
1452 | + # probably an empty string |
1453 | + volume_map = {} |
1454 | + elif not isinstance(volume_map, dict): |
1455 | + hookenv.log("Volume-map should be a dictionary, not {}".format( |
1456 | + type(volume_map))) |
1457 | + errors = True |
1458 | + |
1459 | + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) |
1460 | + if volume_config['device'] and volume_config['ephemeral']: |
1461 | + # asked for ephemeral storage but also defined a volume ID |
1462 | + hookenv.log('A volume is defined for this unit, but ephemeral ' |
1463 | + 'storage was requested', hookenv.ERROR) |
1464 | + errors = True |
1465 | + elif not volume_config['device'] and not volume_config['ephemeral']: |
1466 | + # asked for permanent storage but did not define volume ID |
1467 | + hookenv.log('Ephemeral storage was requested, but there is no volume ' |
1468 | + 'defined for this unit.', hookenv.ERROR) |
1469 | + errors = True |
1470 | + |
1471 | + unit_mount_name = hookenv.local_unit().replace('/', '-') |
1472 | + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) |
1473 | + |
1474 | + if errors: |
1475 | + return None |
1476 | + return volume_config |
1477 | + |
1478 | + |
1479 | +def mount_volume(config): |
1480 | + if os.path.exists(config['mountpoint']): |
1481 | + if not os.path.isdir(config['mountpoint']): |
1482 | + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) |
1483 | + raise VolumeConfigurationError() |
1484 | + else: |
1485 | + host.mkdir(config['mountpoint']) |
1486 | + if os.path.ismount(config['mountpoint']): |
1487 | + unmount_volume(config) |
1488 | + if not host.mount(config['device'], config['mountpoint'], persist=True): |
1489 | + raise VolumeConfigurationError() |
1490 | + |
1491 | + |
1492 | +def unmount_volume(config): |
1493 | + if os.path.ismount(config['mountpoint']): |
1494 | + if not host.umount(config['mountpoint'], persist=True): |
1495 | + raise VolumeConfigurationError() |
1496 | + |
1497 | + |
1498 | +def managed_mounts(): |
1499 | + '''List of all mounted managed volumes''' |
1500 | + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) |
1501 | + |
1502 | + |
1503 | +def configure_volume(before_change=lambda: None, after_change=lambda: None): |
1504 | + '''Set up storage (or don't) according to the charm's volume configuration. |
1505 | + Returns the mount point or "ephemeral". before_change and after_change |
1506 | + are optional functions to be called if the volume configuration changes. |
1507 | + ''' |
1508 | + |
1509 | + config = get_config() |
1510 | + if not config: |
1511 | + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) |
1512 | + raise VolumeConfigurationError() |
1513 | + |
1514 | + if config['ephemeral']: |
1515 | + if os.path.ismount(config['mountpoint']): |
1516 | + before_change() |
1517 | + unmount_volume(config) |
1518 | + after_change() |
1519 | + return 'ephemeral' |
1520 | + else: |
1521 | + # persistent storage |
1522 | + if os.path.ismount(config['mountpoint']): |
1523 | + mounts = dict(managed_mounts()) |
1524 | + if mounts.get(config['mountpoint']) != config['device']: |
1525 | + before_change() |
1526 | + unmount_volume(config) |
1527 | + mount_volume(config) |
1528 | + after_change() |
1529 | + else: |
1530 | + before_change() |
1531 | + mount_volume(config) |
1532 | + after_change() |
1533 | + return config['mountpoint'] |
1534 | |
1535 | === added directory 'charmhelpers/contrib/hahelpers' |
1536 | === added file 'charmhelpers/contrib/hahelpers/__init__.py' |
1537 | --- charmhelpers/contrib/hahelpers/__init__.py 1970-01-01 00:00:00 +0000 |
1538 | +++ charmhelpers/contrib/hahelpers/__init__.py 2016-03-30 08:19:58 +0000 |
1539 | @@ -0,0 +1,15 @@ |
1540 | +# Copyright 2014-2015 Canonical Limited. |
1541 | +# |
1542 | +# This file is part of charm-helpers. |
1543 | +# |
1544 | +# charm-helpers is free software: you can redistribute it and/or modify |
1545 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1546 | +# published by the Free Software Foundation. |
1547 | +# |
1548 | +# charm-helpers is distributed in the hope that it will be useful, |
1549 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1550 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1551 | +# GNU Lesser General Public License for more details. |
1552 | +# |
1553 | +# You should have received a copy of the GNU Lesser General Public License |
1554 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1555 | |
1556 | === added file 'charmhelpers/contrib/hahelpers/apache.py' |
1557 | --- charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000 |
1558 | +++ charmhelpers/contrib/hahelpers/apache.py 2016-03-30 08:19:58 +0000 |
1559 | @@ -0,0 +1,82 @@ |
1560 | +# Copyright 2014-2015 Canonical Limited. |
1561 | +# |
1562 | +# This file is part of charm-helpers. |
1563 | +# |
1564 | +# charm-helpers is free software: you can redistribute it and/or modify |
1565 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1566 | +# published by the Free Software Foundation. |
1567 | +# |
1568 | +# charm-helpers is distributed in the hope that it will be useful, |
1569 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1570 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1571 | +# GNU Lesser General Public License for more details. |
1572 | +# |
1573 | +# You should have received a copy of the GNU Lesser General Public License |
1574 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1575 | + |
1576 | +# |
1577 | +# Copyright 2012 Canonical Ltd. |
1578 | +# |
1579 | +# This file is sourced from lp:openstack-charm-helpers |
1580 | +# |
1581 | +# Authors: |
1582 | +# James Page <james.page@ubuntu.com> |
1583 | +# Adam Gandelman <adamg@ubuntu.com> |
1584 | +# |
1585 | + |
1586 | +import subprocess |
1587 | + |
1588 | +from charmhelpers.core.hookenv import ( |
1589 | + config as config_get, |
1590 | + relation_get, |
1591 | + relation_ids, |
1592 | + related_units as relation_list, |
1593 | + log, |
1594 | + INFO, |
1595 | +) |
1596 | + |
1597 | + |
1598 | +def get_cert(cn=None): |
1599 | + # TODO: deal with multiple https endpoints via charm config |
1600 | + cert = config_get('ssl_cert') |
1601 | + key = config_get('ssl_key') |
1602 | + if not (cert and key): |
1603 | + log("Inspecting identity-service relations for SSL certificate.", |
1604 | + level=INFO) |
1605 | + cert = key = None |
1606 | + if cn: |
1607 | + ssl_cert_attr = 'ssl_cert_{}'.format(cn) |
1608 | + ssl_key_attr = 'ssl_key_{}'.format(cn) |
1609 | + else: |
1610 | + ssl_cert_attr = 'ssl_cert' |
1611 | + ssl_key_attr = 'ssl_key' |
1612 | + for r_id in relation_ids('identity-service'): |
1613 | + for unit in relation_list(r_id): |
1614 | + if not cert: |
1615 | + cert = relation_get(ssl_cert_attr, |
1616 | + rid=r_id, unit=unit) |
1617 | + if not key: |
1618 | + key = relation_get(ssl_key_attr, |
1619 | + rid=r_id, unit=unit) |
1620 | + return (cert, key) |
1621 | + |
1622 | + |
1623 | +def get_ca_cert(): |
1624 | + ca_cert = config_get('ssl_ca') |
1625 | + if ca_cert is None: |
1626 | + log("Inspecting identity-service relations for CA SSL certificate.", |
1627 | + level=INFO) |
1628 | + for r_id in relation_ids('identity-service'): |
1629 | + for unit in relation_list(r_id): |
1630 | + if ca_cert is None: |
1631 | + ca_cert = relation_get('ca_cert', |
1632 | + rid=r_id, unit=unit) |
1633 | + return ca_cert |
1634 | + |
1635 | + |
1636 | +def install_ca_cert(ca_cert): |
1637 | + if ca_cert: |
1638 | + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', |
1639 | + 'w') as crt: |
1640 | + crt.write(ca_cert) |
1641 | + subprocess.check_call(['update-ca-certificates', '--fresh']) |
1642 | |
1643 | === added file 'charmhelpers/contrib/hahelpers/cluster.py' |
1644 | --- charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000 |
1645 | +++ charmhelpers/contrib/hahelpers/cluster.py 2016-03-30 08:19:58 +0000 |
1646 | @@ -0,0 +1,316 @@ |
1647 | +# Copyright 2014-2015 Canonical Limited. |
1648 | +# |
1649 | +# This file is part of charm-helpers. |
1650 | +# |
1651 | +# charm-helpers is free software: you can redistribute it and/or modify |
1652 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1653 | +# published by the Free Software Foundation. |
1654 | +# |
1655 | +# charm-helpers is distributed in the hope that it will be useful, |
1656 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1657 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1658 | +# GNU Lesser General Public License for more details. |
1659 | +# |
1660 | +# You should have received a copy of the GNU Lesser General Public License |
1661 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1662 | + |
1663 | +# |
1664 | +# Copyright 2012 Canonical Ltd. |
1665 | +# |
1666 | +# Authors: |
1667 | +# James Page <james.page@ubuntu.com> |
1668 | +# Adam Gandelman <adamg@ubuntu.com> |
1669 | +# |
1670 | + |
1671 | +""" |
1672 | +Helpers for clustering and determining "cluster leadership" and other |
1673 | +clustering-related helpers. |
1674 | +""" |
1675 | + |
1676 | +import subprocess |
1677 | +import os |
1678 | + |
1679 | +from socket import gethostname as get_unit_hostname |
1680 | + |
1681 | +import six |
1682 | + |
1683 | +from charmhelpers.core.hookenv import ( |
1684 | + log, |
1685 | + relation_ids, |
1686 | + related_units as relation_list, |
1687 | + relation_get, |
1688 | + config as config_get, |
1689 | + INFO, |
1690 | + ERROR, |
1691 | + WARNING, |
1692 | + unit_get, |
1693 | + is_leader as juju_is_leader |
1694 | +) |
1695 | +from charmhelpers.core.decorators import ( |
1696 | + retry_on_exception, |
1697 | +) |
1698 | +from charmhelpers.core.strutils import ( |
1699 | + bool_from_string, |
1700 | +) |
1701 | + |
1702 | +DC_RESOURCE_NAME = 'DC' |
1703 | + |
1704 | + |
1705 | +class HAIncompleteConfig(Exception): |
1706 | + pass |
1707 | + |
1708 | + |
1709 | +class CRMResourceNotFound(Exception): |
1710 | + pass |
1711 | + |
1712 | + |
1713 | +class CRMDCNotFound(Exception): |
1714 | + pass |
1715 | + |
1716 | + |
1717 | +def is_elected_leader(resource): |
1718 | + """ |
1719 | + Returns True if the charm executing this is the elected cluster leader. |
1720 | + |
1721 | + It relies on two mechanisms to determine leadership: |
1722 | + 1. If juju is sufficiently new and leadership election is supported, |
1723 | + the is_leader command will be used. |
1724 | + 2. If the charm is part of a corosync cluster, call corosync to |
1725 | + determine leadership. |
1726 | + 3. If the charm is not part of a corosync cluster, the leader is |
1727 | + determined as being "the alive unit with the lowest unit numer". In |
1728 | + other words, the oldest surviving unit. |
1729 | + """ |
1730 | + try: |
1731 | + return juju_is_leader() |
1732 | + except NotImplementedError: |
1733 | + log('Juju leadership election feature not enabled' |
1734 | + ', using fallback support', |
1735 | + level=WARNING) |
1736 | + |
1737 | + if is_clustered(): |
1738 | + if not is_crm_leader(resource): |
1739 | + log('Deferring action to CRM leader.', level=INFO) |
1740 | + return False |
1741 | + else: |
1742 | + peers = peer_units() |
1743 | + if peers and not oldest_peer(peers): |
1744 | + log('Deferring action to oldest service unit.', level=INFO) |
1745 | + return False |
1746 | + return True |
1747 | + |
1748 | + |
1749 | +def is_clustered(): |
1750 | + for r_id in (relation_ids('ha') or []): |
1751 | + for unit in (relation_list(r_id) or []): |
1752 | + clustered = relation_get('clustered', |
1753 | + rid=r_id, |
1754 | + unit=unit) |
1755 | + if clustered: |
1756 | + return True |
1757 | + return False |
1758 | + |
1759 | + |
1760 | +def is_crm_dc(): |
1761 | + """ |
1762 | + Determine leadership by querying the pacemaker Designated Controller |
1763 | + """ |
1764 | + cmd = ['crm', 'status'] |
1765 | + try: |
1766 | + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
1767 | + if not isinstance(status, six.text_type): |
1768 | + status = six.text_type(status, "utf-8") |
1769 | + except subprocess.CalledProcessError as ex: |
1770 | + raise CRMDCNotFound(str(ex)) |
1771 | + |
1772 | + current_dc = '' |
1773 | + for line in status.split('\n'): |
1774 | + if line.startswith('Current DC'): |
1775 | + # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum |
1776 | + current_dc = line.split(':')[1].split()[0] |
1777 | + if current_dc == get_unit_hostname(): |
1778 | + return True |
1779 | + elif current_dc == 'NONE': |
1780 | + raise CRMDCNotFound('Current DC: NONE') |
1781 | + |
1782 | + return False |
1783 | + |
1784 | + |
1785 | +@retry_on_exception(5, base_delay=2, |
1786 | + exc_type=(CRMResourceNotFound, CRMDCNotFound)) |
1787 | +def is_crm_leader(resource, retry=False): |
1788 | + """ |
1789 | + Returns True if the charm calling this is the elected corosync leader, |
1790 | + as returned by calling the external "crm" command. |
1791 | + |
1792 | + We allow this operation to be retried to avoid the possibility of getting a |
1793 | + false negative. See LP #1396246 for more info. |
1794 | + """ |
1795 | + if resource == DC_RESOURCE_NAME: |
1796 | + return is_crm_dc() |
1797 | + cmd = ['crm', 'resource', 'show', resource] |
1798 | + try: |
1799 | + status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
1800 | + if not isinstance(status, six.text_type): |
1801 | + status = six.text_type(status, "utf-8") |
1802 | + except subprocess.CalledProcessError: |
1803 | + status = None |
1804 | + |
1805 | + if status and get_unit_hostname() in status: |
1806 | + return True |
1807 | + |
1808 | + if status and "resource %s is NOT running" % (resource) in status: |
1809 | + raise CRMResourceNotFound("CRM resource %s not found" % (resource)) |
1810 | + |
1811 | + return False |
1812 | + |
1813 | + |
1814 | +def is_leader(resource): |
1815 | + log("is_leader is deprecated. Please consider using is_crm_leader " |
1816 | + "instead.", level=WARNING) |
1817 | + return is_crm_leader(resource) |
1818 | + |
1819 | + |
1820 | +def peer_units(peer_relation="cluster"): |
1821 | + peers = [] |
1822 | + for r_id in (relation_ids(peer_relation) or []): |
1823 | + for unit in (relation_list(r_id) or []): |
1824 | + peers.append(unit) |
1825 | + return peers |
1826 | + |
1827 | + |
1828 | +def peer_ips(peer_relation='cluster', addr_key='private-address'): |
1829 | + '''Return a dict of peers and their private-address''' |
1830 | + peers = {} |
1831 | + for r_id in relation_ids(peer_relation): |
1832 | + for unit in relation_list(r_id): |
1833 | + peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) |
1834 | + return peers |
1835 | + |
1836 | + |
1837 | +def oldest_peer(peers): |
1838 | + """Determines who the oldest peer is by comparing unit numbers.""" |
1839 | + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
1840 | + for peer in peers: |
1841 | + remote_unit_no = int(peer.split('/')[1]) |
1842 | + if remote_unit_no < local_unit_no: |
1843 | + return False |
1844 | + return True |
1845 | + |
1846 | + |
1847 | +def eligible_leader(resource): |
1848 | + log("eligible_leader is deprecated. Please consider using " |
1849 | + "is_elected_leader instead.", level=WARNING) |
1850 | + return is_elected_leader(resource) |
1851 | + |
1852 | + |
1853 | +def https(): |
1854 | + ''' |
1855 | + Determines whether enough data has been provided in configuration |
1856 | + or relation data to configure HTTPS |
1857 | + . |
1858 | + returns: boolean |
1859 | + ''' |
1860 | + use_https = config_get('use-https') |
1861 | + if use_https and bool_from_string(use_https): |
1862 | + return True |
1863 | + if config_get('ssl_cert') and config_get('ssl_key'): |
1864 | + return True |
1865 | + for r_id in relation_ids('identity-service'): |
1866 | + for unit in relation_list(r_id): |
1867 | + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN |
1868 | + rel_state = [ |
1869 | + relation_get('https_keystone', rid=r_id, unit=unit), |
1870 | + relation_get('ca_cert', rid=r_id, unit=unit), |
1871 | + ] |
1872 | + # NOTE: works around (LP: #1203241) |
1873 | + if (None not in rel_state) and ('' not in rel_state): |
1874 | + return True |
1875 | + return False |
1876 | + |
1877 | + |
1878 | +def determine_api_port(public_port, singlenode_mode=False): |
1879 | + ''' |
1880 | + Determine correct API server listening port based on |
1881 | + existence of HTTPS reverse proxy and/or haproxy. |
1882 | + |
1883 | + public_port: int: standard public port for given service |
1884 | + |
1885 | + singlenode_mode: boolean: Shuffle ports when only a single unit is present |
1886 | + |
1887 | + returns: int: the correct listening port for the API service |
1888 | + ''' |
1889 | + i = 0 |
1890 | + if singlenode_mode: |
1891 | + i += 1 |
1892 | + elif len(peer_units()) > 0 or is_clustered(): |
1893 | + i += 1 |
1894 | + if https(): |
1895 | + i += 1 |
1896 | + return public_port - (i * 10) |
1897 | + |
1898 | + |
1899 | +def determine_apache_port(public_port, singlenode_mode=False): |
1900 | + ''' |
1901 | + Description: Determine correct apache listening port based on public IP + |
1902 | + state of the cluster. |
1903 | + |
1904 | + public_port: int: standard public port for given service |
1905 | + |
1906 | + singlenode_mode: boolean: Shuffle ports when only a single unit is present |
1907 | + |
1908 | + returns: int: the correct listening port for the HAProxy service |
1909 | + ''' |
1910 | + i = 0 |
1911 | + if singlenode_mode: |
1912 | + i += 1 |
1913 | + elif len(peer_units()) > 0 or is_clustered(): |
1914 | + i += 1 |
1915 | + return public_port - (i * 10) |
1916 | + |
1917 | + |
1918 | +def get_hacluster_config(exclude_keys=None): |
1919 | + ''' |
1920 | + Obtains all relevant configuration from charm configuration required |
1921 | + for initiating a relation to hacluster: |
1922 | + |
1923 | + ha-bindiface, ha-mcastport, vip |
1924 | + |
1925 | + param: exclude_keys: list of setting key(s) to be excluded. |
1926 | + returns: dict: A dict containing settings keyed by setting name. |
1927 | + raises: HAIncompleteConfig if settings are missing. |
1928 | + ''' |
1929 | + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
1930 | + conf = {} |
1931 | + for setting in settings: |
1932 | + if exclude_keys and setting in exclude_keys: |
1933 | + continue |
1934 | + |
1935 | + conf[setting] = config_get(setting) |
1936 | + missing = [] |
1937 | + [missing.append(s) for s, v in six.iteritems(conf) if v is None] |
1938 | + if missing: |
1939 | + log('Insufficient config data to configure hacluster.', level=ERROR) |
1940 | + raise HAIncompleteConfig |
1941 | + return conf |
1942 | + |
1943 | + |
1944 | +def canonical_url(configs, vip_setting='vip'): |
1945 | + ''' |
1946 | + Returns the correct HTTP URL to this host given the state of HTTPS |
1947 | + configuration and hacluster. |
1948 | + |
1949 | + :configs : OSTemplateRenderer: A config tempating object to inspect for |
1950 | + a complete https context. |
1951 | + |
1952 | + :vip_setting: str: Setting in charm config that specifies |
1953 | + VIP address. |
1954 | + ''' |
1955 | + scheme = 'http' |
1956 | + if 'https' in configs.complete_contexts(): |
1957 | + scheme = 'https' |
1958 | + if is_clustered(): |
1959 | + addr = config_get(vip_setting) |
1960 | + else: |
1961 | + addr = unit_get('private-address') |
1962 | + return '%s://%s' % (scheme, addr) |
1963 | |
1964 | === added directory 'charmhelpers/contrib/network' |
1965 | === added file 'charmhelpers/contrib/network/__init__.py' |
1966 | --- charmhelpers/contrib/network/__init__.py 1970-01-01 00:00:00 +0000 |
1967 | +++ charmhelpers/contrib/network/__init__.py 2016-03-30 08:19:58 +0000 |
1968 | @@ -0,0 +1,15 @@ |
1969 | +# Copyright 2014-2015 Canonical Limited. |
1970 | +# |
1971 | +# This file is part of charm-helpers. |
1972 | +# |
1973 | +# charm-helpers is free software: you can redistribute it and/or modify |
1974 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1975 | +# published by the Free Software Foundation. |
1976 | +# |
1977 | +# charm-helpers is distributed in the hope that it will be useful, |
1978 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1979 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
1980 | +# GNU Lesser General Public License for more details. |
1981 | +# |
1982 | +# You should have received a copy of the GNU Lesser General Public License |
1983 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1984 | |
1985 | === added file 'charmhelpers/contrib/network/ip.py' |
1986 | --- charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 |
1987 | +++ charmhelpers/contrib/network/ip.py 2016-03-30 08:19:58 +0000 |
1988 | @@ -0,0 +1,458 @@ |
1989 | +# Copyright 2014-2015 Canonical Limited. |
1990 | +# |
1991 | +# This file is part of charm-helpers. |
1992 | +# |
1993 | +# charm-helpers is free software: you can redistribute it and/or modify |
1994 | +# it under the terms of the GNU Lesser General Public License version 3 as |
1995 | +# published by the Free Software Foundation. |
1996 | +# |
1997 | +# charm-helpers is distributed in the hope that it will be useful, |
1998 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
1999 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2000 | +# GNU Lesser General Public License for more details. |
2001 | +# |
2002 | +# You should have received a copy of the GNU Lesser General Public License |
2003 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2004 | + |
2005 | +import glob |
2006 | +import re |
2007 | +import subprocess |
2008 | +import six |
2009 | +import socket |
2010 | + |
2011 | +from functools import partial |
2012 | + |
2013 | +from charmhelpers.core.hookenv import unit_get |
2014 | +from charmhelpers.fetch import apt_install, apt_update |
2015 | +from charmhelpers.core.hookenv import ( |
2016 | + log, |
2017 | + WARNING, |
2018 | +) |
2019 | + |
2020 | +try: |
2021 | + import netifaces |
2022 | +except ImportError: |
2023 | + apt_update(fatal=True) |
2024 | + apt_install('python-netifaces', fatal=True) |
2025 | + import netifaces |
2026 | + |
2027 | +try: |
2028 | + import netaddr |
2029 | +except ImportError: |
2030 | + apt_update(fatal=True) |
2031 | + apt_install('python-netaddr', fatal=True) |
2032 | + import netaddr |
2033 | + |
2034 | + |
2035 | +def _validate_cidr(network): |
2036 | + try: |
2037 | + netaddr.IPNetwork(network) |
2038 | + except (netaddr.core.AddrFormatError, ValueError): |
2039 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
2040 | + network) |
2041 | + |
2042 | + |
2043 | +def no_ip_found_error_out(network): |
2044 | + errmsg = ("No IP address found in network(s): %s" % network) |
2045 | + raise ValueError(errmsg) |
2046 | + |
2047 | + |
2048 | +def get_address_in_network(network, fallback=None, fatal=False): |
2049 | + """Get an IPv4 or IPv6 address within the network from the host. |
2050 | + |
2051 | + :param network (str): CIDR presentation format. For example, |
2052 | + '192.168.1.0/24'. Supports multiple networks as a space-delimited list. |
2053 | + :param fallback (str): If no address is found, return fallback. |
2054 | + :param fatal (boolean): If no address is found, fallback is not |
2055 | + set and fatal is True then exit(1). |
2056 | + """ |
2057 | + if network is None: |
2058 | + if fallback is not None: |
2059 | + return fallback |
2060 | + |
2061 | + if fatal: |
2062 | + no_ip_found_error_out(network) |
2063 | + else: |
2064 | + return None |
2065 | + |
2066 | + networks = network.split() or [network] |
2067 | + for network in networks: |
2068 | + _validate_cidr(network) |
2069 | + network = netaddr.IPNetwork(network) |
2070 | + for iface in netifaces.interfaces(): |
2071 | + addresses = netifaces.ifaddresses(iface) |
2072 | + if network.version == 4 and netifaces.AF_INET in addresses: |
2073 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
2074 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
2075 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
2076 | + if cidr in network: |
2077 | + return str(cidr.ip) |
2078 | + |
2079 | + if network.version == 6 and netifaces.AF_INET6 in addresses: |
2080 | + for addr in addresses[netifaces.AF_INET6]: |
2081 | + if not addr['addr'].startswith('fe80'): |
2082 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
2083 | + addr['netmask'])) |
2084 | + if cidr in network: |
2085 | + return str(cidr.ip) |
2086 | + |
2087 | + if fallback is not None: |
2088 | + return fallback |
2089 | + |
2090 | + if fatal: |
2091 | + no_ip_found_error_out(network) |
2092 | + |
2093 | + return None |
2094 | + |
2095 | + |
2096 | +def is_ipv6(address): |
2097 | + """Determine whether provided address is IPv6 or not.""" |
2098 | + try: |
2099 | + address = netaddr.IPAddress(address) |
2100 | + except netaddr.AddrFormatError: |
2101 | + # probably a hostname - so not an address at all! |
2102 | + return False |
2103 | + |
2104 | + return address.version == 6 |
2105 | + |
2106 | + |
2107 | +def is_address_in_network(network, address): |
2108 | + """ |
2109 | + Determine whether the provided address is within a network range. |
2110 | + |
2111 | + :param network (str): CIDR presentation format. For example, |
2112 | + '192.168.1.0/24'. |
2113 | + :param address: An individual IPv4 or IPv6 address without a net |
2114 | + mask or subnet prefix. For example, '192.168.1.1'. |
2115 | + :returns boolean: Flag indicating whether address is in network. |
2116 | + """ |
2117 | + try: |
2118 | + network = netaddr.IPNetwork(network) |
2119 | + except (netaddr.core.AddrFormatError, ValueError): |
2120 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
2121 | + network) |
2122 | + |
2123 | + try: |
2124 | + address = netaddr.IPAddress(address) |
2125 | + except (netaddr.core.AddrFormatError, ValueError): |
2126 | + raise ValueError("Address (%s) is not in correct presentation format" % |
2127 | + address) |
2128 | + |
2129 | + if address in network: |
2130 | + return True |
2131 | + else: |
2132 | + return False |
2133 | + |
2134 | + |
2135 | +def _get_for_address(address, key): |
2136 | + """Retrieve an attribute of or the physical interface that |
2137 | + the IP address provided could be bound to. |
2138 | + |
2139 | + :param address (str): An individual IPv4 or IPv6 address without a net |
2140 | + mask or subnet prefix. For example, '192.168.1.1'. |
2141 | + :param key: 'iface' for the physical interface name or an attribute |
2142 | + of the configured interface, for example 'netmask'. |
2143 | + :returns str: Requested attribute or None if address is not bindable. |
2144 | + """ |
2145 | + address = netaddr.IPAddress(address) |
2146 | + for iface in netifaces.interfaces(): |
2147 | + addresses = netifaces.ifaddresses(iface) |
2148 | + if address.version == 4 and netifaces.AF_INET in addresses: |
2149 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
2150 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
2151 | + network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
2152 | + cidr = network.cidr |
2153 | + if address in cidr: |
2154 | + if key == 'iface': |
2155 | + return iface |
2156 | + else: |
2157 | + return addresses[netifaces.AF_INET][0][key] |
2158 | + |
2159 | + if address.version == 6 and netifaces.AF_INET6 in addresses: |
2160 | + for addr in addresses[netifaces.AF_INET6]: |
2161 | + if not addr['addr'].startswith('fe80'): |
2162 | + network = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
2163 | + addr['netmask'])) |
2164 | + cidr = network.cidr |
2165 | + if address in cidr: |
2166 | + if key == 'iface': |
2167 | + return iface |
2168 | + elif key == 'netmask' and cidr: |
2169 | + return str(cidr).split('/')[1] |
2170 | + else: |
2171 | + return addr[key] |
2172 | + |
2173 | + return None |
2174 | + |
2175 | + |
2176 | +get_iface_for_address = partial(_get_for_address, key='iface') |
2177 | + |
2178 | + |
2179 | +get_netmask_for_address = partial(_get_for_address, key='netmask') |
2180 | + |
2181 | + |
2182 | +def format_ipv6_addr(address): |
2183 | + """If address is IPv6, wrap it in '[]' otherwise return None. |
2184 | + |
2185 | + This is required by most configuration files when specifying IPv6 |
2186 | + addresses. |
2187 | + """ |
2188 | + if is_ipv6(address): |
2189 | + return "[%s]" % address |
2190 | + |
2191 | + return None |
2192 | + |
2193 | + |
2194 | +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, |
2195 | + fatal=True, exc_list=None): |
2196 | + """Return the assigned IP address for a given interface, if any.""" |
2197 | + # Extract nic if passed /dev/ethX |
2198 | + if '/' in iface: |
2199 | + iface = iface.split('/')[-1] |
2200 | + |
2201 | + if not exc_list: |
2202 | + exc_list = [] |
2203 | + |
2204 | + try: |
2205 | + inet_num = getattr(netifaces, inet_type) |
2206 | + except AttributeError: |
2207 | + raise Exception("Unknown inet type '%s'" % str(inet_type)) |
2208 | + |
2209 | + interfaces = netifaces.interfaces() |
2210 | + if inc_aliases: |
2211 | + ifaces = [] |
2212 | + for _iface in interfaces: |
2213 | + if iface == _iface or _iface.split(':')[0] == iface: |
2214 | + ifaces.append(_iface) |
2215 | + |
2216 | + if fatal and not ifaces: |
2217 | + raise Exception("Invalid interface '%s'" % iface) |
2218 | + |
2219 | + ifaces.sort() |
2220 | + else: |
2221 | + if iface not in interfaces: |
2222 | + if fatal: |
2223 | + raise Exception("Interface '%s' not found " % (iface)) |
2224 | + else: |
2225 | + return [] |
2226 | + |
2227 | + else: |
2228 | + ifaces = [iface] |
2229 | + |
2230 | + addresses = [] |
2231 | + for netiface in ifaces: |
2232 | + net_info = netifaces.ifaddresses(netiface) |
2233 | + if inet_num in net_info: |
2234 | + for entry in net_info[inet_num]: |
2235 | + if 'addr' in entry and entry['addr'] not in exc_list: |
2236 | + addresses.append(entry['addr']) |
2237 | + |
2238 | + if fatal and not addresses: |
2239 | + raise Exception("Interface '%s' doesn't have any %s addresses." % |
2240 | + (iface, inet_type)) |
2241 | + |
2242 | + return sorted(addresses) |
2243 | + |
2244 | + |
2245 | +get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') |
2246 | + |
2247 | + |
2248 | +def get_iface_from_addr(addr): |
2249 | + """Work out on which interface the provided address is configured.""" |
2250 | + for iface in netifaces.interfaces(): |
2251 | + addresses = netifaces.ifaddresses(iface) |
2252 | + for inet_type in addresses: |
2253 | + for _addr in addresses[inet_type]: |
2254 | + _addr = _addr['addr'] |
2255 | + # link local |
2256 | + ll_key = re.compile("(.+)%.*") |
2257 | + raw = re.match(ll_key, _addr) |
2258 | + if raw: |
2259 | + _addr = raw.group(1) |
2260 | + |
2261 | + if _addr == addr: |
2262 | + log("Address '%s' is configured on iface '%s'" % |
2263 | + (addr, iface)) |
2264 | + return iface |
2265 | + |
2266 | + msg = "Unable to infer net iface on which '%s' is configured" % (addr) |
2267 | + raise Exception(msg) |
2268 | + |
2269 | + |
2270 | +def sniff_iface(f): |
2271 | + """Ensure decorated function is called with a value for iface. |
2272 | + |
2273 | + If no iface provided, inject net iface inferred from unit private address. |
2274 | + """ |
2275 | + def iface_sniffer(*args, **kwargs): |
2276 | + if not kwargs.get('iface', None): |
2277 | + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) |
2278 | + |
2279 | + return f(*args, **kwargs) |
2280 | + |
2281 | + return iface_sniffer |
2282 | + |
2283 | + |
2284 | +@sniff_iface |
2285 | +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, |
2286 | + dynamic_only=True): |
2287 | + """Get assigned IPv6 address for a given interface. |
2288 | + |
2289 | + Returns list of addresses found. If no address found, returns empty list. |
2290 | + |
2291 | + If iface is None, we infer the current primary interface by doing a reverse |
2292 | + lookup on the unit private-address. |
2293 | + |
2294 | + We currently only support scope global IPv6 addresses i.e. non-temporary |
2295 | + addresses. If no global IPv6 address is found, return the first one found |
2296 | + in the ipv6 address list. |
2297 | + """ |
2298 | + addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', |
2299 | + inc_aliases=inc_aliases, fatal=fatal, |
2300 | + exc_list=exc_list) |
2301 | + |
2302 | + if addresses: |
2303 | + global_addrs = [] |
2304 | + for addr in addresses: |
2305 | + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") |
2306 | + m = re.match(key_scope_link_local, addr) |
2307 | + if m: |
2308 | + eui_64_mac = m.group(1) |
2309 | + iface = m.group(2) |
2310 | + else: |
2311 | + global_addrs.append(addr) |
2312 | + |
2313 | + if global_addrs: |
2314 | + # Make sure any found global addresses are not temporary |
2315 | + cmd = ['ip', 'addr', 'show', iface] |
2316 | + out = subprocess.check_output(cmd).decode('UTF-8') |
2317 | + if dynamic_only: |
2318 | + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") |
2319 | + else: |
2320 | + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") |
2321 | + |
2322 | + addrs = [] |
2323 | + for line in out.split('\n'): |
2324 | + line = line.strip() |
2325 | + m = re.match(key, line) |
2326 | + if m and 'temporary' not in line: |
2327 | + # Return the first valid address we find |
2328 | + for addr in global_addrs: |
2329 | + if m.group(1) == addr: |
2330 | + if not dynamic_only or \ |
2331 | + m.group(1).endswith(eui_64_mac): |
2332 | + addrs.append(addr) |
2333 | + |
2334 | + if addrs: |
2335 | + return addrs |
2336 | + |
2337 | + if fatal: |
2338 | + raise Exception("Interface '%s' does not have a scope global " |
2339 | + "non-temporary ipv6 address." % iface) |
2340 | + |
2341 | + return [] |
2342 | + |
2343 | + |
2344 | +def get_bridges(vnic_dir='/sys/devices/virtual/net'): |
2345 | + """Return a list of bridges on the system.""" |
2346 | + b_regex = "%s/*/bridge" % vnic_dir |
2347 | + return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] |
2348 | + |
2349 | + |
2350 | +def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): |
2351 | + """Return a list of nics comprising a given bridge on the system.""" |
2352 | + brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) |
2353 | + return [x.split('/')[-1] for x in glob.glob(brif_regex)] |
2354 | + |
2355 | + |
2356 | +def is_bridge_member(nic): |
2357 | + """Check if a given nic is a member of a bridge.""" |
2358 | + for bridge in get_bridges(): |
2359 | + if nic in get_bridge_nics(bridge): |
2360 | + return True |
2361 | + |
2362 | + return False |
2363 | + |
2364 | + |
2365 | +def is_ip(address): |
2366 | + """ |
2367 | + Returns True if address is a valid IP address. |
2368 | + """ |
2369 | + try: |
2370 | + # Test to see if already an IPv4 address |
2371 | + socket.inet_aton(address) |
2372 | + return True |
2373 | + except socket.error: |
2374 | + return False |
2375 | + |
2376 | + |
2377 | +def ns_query(address): |
2378 | + try: |
2379 | + import dns.resolver |
2380 | + except ImportError: |
2381 | + apt_install('python-dnspython') |
2382 | + import dns.resolver |
2383 | + |
2384 | + if isinstance(address, dns.name.Name): |
2385 | + rtype = 'PTR' |
2386 | + elif isinstance(address, six.string_types): |
2387 | + rtype = 'A' |
2388 | + else: |
2389 | + return None |
2390 | + |
2391 | + answers = dns.resolver.query(address, rtype) |
2392 | + if answers: |
2393 | + return str(answers[0]) |
2394 | + return None |
2395 | + |
2396 | + |
2397 | +def get_host_ip(hostname, fallback=None): |
2398 | + """ |
2399 | + Resolves the IP for a given hostname, or returns |
2400 | + the input if it is already an IP. |
2401 | + """ |
2402 | + if is_ip(hostname): |
2403 | + return hostname |
2404 | + |
2405 | + ip_addr = ns_query(hostname) |
2406 | + if not ip_addr: |
2407 | + try: |
2408 | + ip_addr = socket.gethostbyname(hostname) |
2409 | + except: |
2410 | + log("Failed to resolve hostname '%s'" % (hostname), |
2411 | + level=WARNING) |
2412 | + return fallback |
2413 | + return ip_addr |
2414 | + |
2415 | + |
2416 | +def get_hostname(address, fqdn=True): |
2417 | + """ |
2418 | + Resolves hostname for given IP, or returns the input |
2419 | + if it is already a hostname. |
2420 | + """ |
2421 | + if is_ip(address): |
2422 | + try: |
2423 | + import dns.reversename |
2424 | + except ImportError: |
2425 | + apt_install("python-dnspython") |
2426 | + import dns.reversename |
2427 | + |
2428 | + rev = dns.reversename.from_address(address) |
2429 | + result = ns_query(rev) |
2430 | + |
2431 | + if not result: |
2432 | + try: |
2433 | + result = socket.gethostbyaddr(address)[0] |
2434 | + except: |
2435 | + return None |
2436 | + else: |
2437 | + result = address |
2438 | + |
2439 | + if fqdn: |
2440 | + # strip trailing . |
2441 | + if result.endswith('.'): |
2442 | + return result[:-1] |
2443 | + else: |
2444 | + return result |
2445 | + else: |
2446 | + return result.split('.')[0] |
2447 | |
2448 | === added directory 'charmhelpers/contrib/openstack' |
2449 | === added file 'charmhelpers/contrib/openstack/__init__.py' |
2450 | --- charmhelpers/contrib/openstack/__init__.py 1970-01-01 00:00:00 +0000 |
2451 | +++ charmhelpers/contrib/openstack/__init__.py 2016-03-30 08:19:58 +0000 |
2452 | @@ -0,0 +1,15 @@ |
2453 | +# Copyright 2014-2015 Canonical Limited. |
2454 | +# |
2455 | +# This file is part of charm-helpers. |
2456 | +# |
2457 | +# charm-helpers is free software: you can redistribute it and/or modify |
2458 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2459 | +# published by the Free Software Foundation. |
2460 | +# |
2461 | +# charm-helpers is distributed in the hope that it will be useful, |
2462 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2463 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2464 | +# GNU Lesser General Public License for more details. |
2465 | +# |
2466 | +# You should have received a copy of the GNU Lesser General Public License |
2467 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2468 | |
2469 | === added file 'charmhelpers/contrib/openstack/alternatives.py' |
2470 | --- charmhelpers/contrib/openstack/alternatives.py 1970-01-01 00:00:00 +0000 |
2471 | +++ charmhelpers/contrib/openstack/alternatives.py 2016-03-30 08:19:58 +0000 |
2472 | @@ -0,0 +1,33 @@ |
2473 | +# Copyright 2014-2015 Canonical Limited. |
2474 | +# |
2475 | +# This file is part of charm-helpers. |
2476 | +# |
2477 | +# charm-helpers is free software: you can redistribute it and/or modify |
2478 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2479 | +# published by the Free Software Foundation. |
2480 | +# |
2481 | +# charm-helpers is distributed in the hope that it will be useful, |
2482 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2483 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2484 | +# GNU Lesser General Public License for more details. |
2485 | +# |
2486 | +# You should have received a copy of the GNU Lesser General Public License |
2487 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2488 | + |
2489 | +''' Helper for managing alternatives for file conflict resolution ''' |
2490 | + |
2491 | +import subprocess |
2492 | +import shutil |
2493 | +import os |
2494 | + |
2495 | + |
2496 | +def install_alternative(name, target, source, priority=50): |
2497 | + ''' Install alternative configuration ''' |
2498 | + if (os.path.exists(target) and not os.path.islink(target)): |
2499 | + # Move existing file/directory away before installing |
2500 | + shutil.move(target, '{}.bak'.format(target)) |
2501 | + cmd = [ |
2502 | + 'update-alternatives', '--force', '--install', |
2503 | + target, name, source, str(priority) |
2504 | + ] |
2505 | + subprocess.check_call(cmd) |
2506 | |
2507 | === added directory 'charmhelpers/contrib/openstack/amulet' |
2508 | === added file 'charmhelpers/contrib/openstack/amulet/__init__.py' |
2509 | --- charmhelpers/contrib/openstack/amulet/__init__.py 1970-01-01 00:00:00 +0000 |
2510 | +++ charmhelpers/contrib/openstack/amulet/__init__.py 2016-03-30 08:19:58 +0000 |
2511 | @@ -0,0 +1,15 @@ |
2512 | +# Copyright 2014-2015 Canonical Limited. |
2513 | +# |
2514 | +# This file is part of charm-helpers. |
2515 | +# |
2516 | +# charm-helpers is free software: you can redistribute it and/or modify |
2517 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2518 | +# published by the Free Software Foundation. |
2519 | +# |
2520 | +# charm-helpers is distributed in the hope that it will be useful, |
2521 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2522 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2523 | +# GNU Lesser General Public License for more details. |
2524 | +# |
2525 | +# You should have received a copy of the GNU Lesser General Public License |
2526 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2527 | |
2528 | === added file 'charmhelpers/contrib/openstack/amulet/deployment.py' |
2529 | --- charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 |
2530 | +++ charmhelpers/contrib/openstack/amulet/deployment.py 2016-03-30 08:19:58 +0000 |
2531 | @@ -0,0 +1,301 @@ |
2532 | +# Copyright 2014-2015 Canonical Limited. |
2533 | +# |
2534 | +# This file is part of charm-helpers. |
2535 | +# |
2536 | +# charm-helpers is free software: you can redistribute it and/or modify |
2537 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2538 | +# published by the Free Software Foundation. |
2539 | +# |
2540 | +# charm-helpers is distributed in the hope that it will be useful, |
2541 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2542 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2543 | +# GNU Lesser General Public License for more details. |
2544 | +# |
2545 | +# You should have received a copy of the GNU Lesser General Public License |
2546 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2547 | + |
2548 | +import logging |
2549 | +import re |
2550 | +import sys |
2551 | +import six |
2552 | +from collections import OrderedDict |
2553 | +from charmhelpers.contrib.amulet.deployment import ( |
2554 | + AmuletDeployment |
2555 | +) |
2556 | + |
2557 | +DEBUG = logging.DEBUG |
2558 | +ERROR = logging.ERROR |
2559 | + |
2560 | + |
2561 | +class OpenStackAmuletDeployment(AmuletDeployment): |
2562 | + """OpenStack amulet deployment. |
2563 | + |
2564 | + This class inherits from AmuletDeployment and has additional support |
2565 | + that is specifically for use by OpenStack charms. |
2566 | + """ |
2567 | + |
2568 | + def __init__(self, series=None, openstack=None, source=None, |
2569 | + stable=True, log_level=DEBUG): |
2570 | + """Initialize the deployment environment.""" |
2571 | + super(OpenStackAmuletDeployment, self).__init__(series) |
2572 | + self.log = self.get_logger(level=log_level) |
2573 | + self.log.info('OpenStackAmuletDeployment: init') |
2574 | + self.openstack = openstack |
2575 | + self.source = source |
2576 | + self.stable = stable |
2577 | + # Note(coreycb): this needs to be changed when new next branches come |
2578 | + # out. |
2579 | + self.current_next = "trusty" |
2580 | + |
2581 | + def get_logger(self, name="deployment-logger", level=logging.DEBUG): |
2582 | + """Get a logger object that will log to stdout.""" |
2583 | + log = logging |
2584 | + logger = log.getLogger(name) |
2585 | + fmt = log.Formatter("%(asctime)s %(funcName)s " |
2586 | + "%(levelname)s: %(message)s") |
2587 | + |
2588 | + handler = log.StreamHandler(stream=sys.stdout) |
2589 | + handler.setLevel(level) |
2590 | + handler.setFormatter(fmt) |
2591 | + |
2592 | + logger.addHandler(handler) |
2593 | + logger.setLevel(level) |
2594 | + |
2595 | + return logger |
2596 | + |
2597 | + def _determine_branch_locations(self, other_services): |
2598 | + """Determine the branch locations for the other services. |
2599 | + |
2600 | + Determine if the local branch being tested is derived from its |
2601 | + stable or next (dev) branch, and based on this, use the corresonding |
2602 | + stable or next branches for the other_services.""" |
2603 | + |
2604 | + self.log.info('OpenStackAmuletDeployment: determine branch locations') |
2605 | + |
2606 | + # Charms outside the lp:~openstack-charmers namespace |
2607 | + base_charms = ['mysql', 'mongodb', 'nrpe'] |
2608 | + |
2609 | + # Force these charms to current series even when using an older series. |
2610 | + # ie. Use trusty/nrpe even when series is precise, as the P charm |
2611 | + # does not possess the necessary external master config and hooks. |
2612 | + force_series_current = ['nrpe'] |
2613 | + |
2614 | + if self.series in ['precise', 'trusty']: |
2615 | + base_series = self.series |
2616 | + else: |
2617 | + base_series = self.current_next |
2618 | + |
2619 | + for svc in other_services: |
2620 | + if svc['name'] in force_series_current: |
2621 | + base_series = self.current_next |
2622 | + # If a location has been explicitly set, use it |
2623 | + if svc.get('location'): |
2624 | + continue |
2625 | + if self.stable: |
2626 | + temp = 'lp:charms/{}/{}' |
2627 | + svc['location'] = temp.format(base_series, |
2628 | + svc['name']) |
2629 | + else: |
2630 | + if svc['name'] in base_charms: |
2631 | + temp = 'lp:charms/{}/{}' |
2632 | + svc['location'] = temp.format(base_series, |
2633 | + svc['name']) |
2634 | + else: |
2635 | + temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
2636 | + svc['location'] = temp.format(self.current_next, |
2637 | + svc['name']) |
2638 | + |
2639 | + return other_services |
2640 | + |
2641 | + def _add_services(self, this_service, other_services): |
2642 | + """Add services to the deployment and set openstack-origin/source.""" |
2643 | + self.log.info('OpenStackAmuletDeployment: adding services') |
2644 | + |
2645 | + other_services = self._determine_branch_locations(other_services) |
2646 | + |
2647 | + super(OpenStackAmuletDeployment, self)._add_services(this_service, |
2648 | + other_services) |
2649 | + |
2650 | + services = other_services |
2651 | + services.append(this_service) |
2652 | + |
2653 | + # Charms which should use the source config option |
2654 | + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
2655 | + 'ceph-osd', 'ceph-radosgw'] |
2656 | + |
2657 | + # Charms which can not use openstack-origin, ie. many subordinates |
2658 | + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', |
2659 | + 'openvswitch-odl', 'neutron-api-odl', 'odl-controller'] |
2660 | + |
2661 | + if self.openstack: |
2662 | + for svc in services: |
2663 | + if svc['name'] not in use_source + no_origin: |
2664 | + config = {'openstack-origin': self.openstack} |
2665 | + self.d.configure(svc['name'], config) |
2666 | + |
2667 | + if self.source: |
2668 | + for svc in services: |
2669 | + if svc['name'] in use_source and svc['name'] not in no_origin: |
2670 | + config = {'source': self.source} |
2671 | + self.d.configure(svc['name'], config) |
2672 | + |
2673 | + def _configure_services(self, configs): |
2674 | + """Configure all of the services.""" |
2675 | + self.log.info('OpenStackAmuletDeployment: configure services') |
2676 | + for service, config in six.iteritems(configs): |
2677 | + self.d.configure(service, config) |
2678 | + |
2679 | + def _auto_wait_for_status(self, message=None, exclude_services=None, |
2680 | + include_only=None, timeout=1800): |
2681 | + """Wait for all units to have a specific extended status, except |
2682 | + for any defined as excluded. Unless specified via message, any |
2683 | + status containing any case of 'ready' will be considered a match. |
2684 | + |
2685 | + Examples of message usage: |
2686 | + |
2687 | + Wait for all unit status to CONTAIN any case of 'ready' or 'ok': |
2688 | + message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE) |
2689 | + |
2690 | + Wait for all units to reach this status (exact match): |
2691 | + message = re.compile('^Unit is ready and clustered$') |
2692 | + |
2693 | + Wait for all units to reach any one of these (exact match): |
2694 | + message = re.compile('Unit is ready|OK|Ready') |
2695 | + |
2696 | + Wait for at least one unit to reach this status (exact match): |
2697 | + message = {'ready'} |
2698 | + |
2699 | + See Amulet's sentry.wait_for_messages() for message usage detail. |
2700 | + https://github.com/juju/amulet/blob/master/amulet/sentry.py |
2701 | + |
2702 | + :param message: Expected status match |
2703 | + :param exclude_services: List of juju service names to ignore, |
2704 | + not to be used in conjuction with include_only. |
2705 | + :param include_only: List of juju service names to exclusively check, |
2706 | + not to be used in conjuction with exclude_services. |
2707 | + :param timeout: Maximum time in seconds to wait for status match |
2708 | + :returns: None. Raises if timeout is hit. |
2709 | + """ |
2710 | + self.log.info('Waiting for extended status on units...') |
2711 | + |
2712 | + all_services = self.d.services.keys() |
2713 | + |
2714 | + if exclude_services and include_only: |
2715 | + raise ValueError('exclude_services can not be used ' |
2716 | + 'with include_only') |
2717 | + |
2718 | + if message: |
2719 | + if isinstance(message, re._pattern_type): |
2720 | + match = message.pattern |
2721 | + else: |
2722 | + match = message |
2723 | + |
2724 | + self.log.debug('Custom extended status wait match: ' |
2725 | + '{}'.format(match)) |
2726 | + else: |
2727 | + self.log.debug('Default extended status wait match: contains ' |
2728 | + 'READY (case-insensitive)') |
2729 | + message = re.compile('.*ready.*', re.IGNORECASE) |
2730 | + |
2731 | + if exclude_services: |
2732 | + self.log.debug('Excluding services from extended status match: ' |
2733 | + '{}'.format(exclude_services)) |
2734 | + else: |
2735 | + exclude_services = [] |
2736 | + |
2737 | + if include_only: |
2738 | + services = include_only |
2739 | + else: |
2740 | + services = list(set(all_services) - set(exclude_services)) |
2741 | + |
2742 | + self.log.debug('Waiting up to {}s for extended status on services: ' |
2743 | + '{}'.format(timeout, services)) |
2744 | + service_messages = {service: message for service in services} |
2745 | + self.d.sentry.wait_for_messages(service_messages, timeout=timeout) |
2746 | + self.log.info('OK') |
2747 | + |
2748 | + def _get_openstack_release(self): |
2749 | + """Get openstack release. |
2750 | + |
2751 | + Return an integer representing the enum value of the openstack |
2752 | + release. |
2753 | + """ |
2754 | + # Must be ordered by OpenStack release (not by Ubuntu release): |
2755 | + (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
2756 | + self.precise_havana, self.precise_icehouse, |
2757 | + self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2758 | + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2759 | + self.wily_liberty, self.trusty_mitaka, |
2760 | + self.xenial_mitaka) = range(14) |
2761 | + |
2762 | + releases = { |
2763 | + ('precise', None): self.precise_essex, |
2764 | + ('precise', 'cloud:precise-folsom'): self.precise_folsom, |
2765 | + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, |
2766 | + ('precise', 'cloud:precise-havana'): self.precise_havana, |
2767 | + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, |
2768 | + ('trusty', None): self.trusty_icehouse, |
2769 | + ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
2770 | + ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
2771 | + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, |
2772 | + ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka, |
2773 | + ('utopic', None): self.utopic_juno, |
2774 | + ('vivid', None): self.vivid_kilo, |
2775 | + ('wily', None): self.wily_liberty, |
2776 | + ('xenial', None): self.xenial_mitaka} |
2777 | + return releases[(self.series, self.openstack)] |
2778 | + |
2779 | + def _get_openstack_release_string(self): |
2780 | + """Get openstack release string. |
2781 | + |
2782 | + Return a string representing the openstack release. |
2783 | + """ |
2784 | + releases = OrderedDict([ |
2785 | + ('precise', 'essex'), |
2786 | + ('quantal', 'folsom'), |
2787 | + ('raring', 'grizzly'), |
2788 | + ('saucy', 'havana'), |
2789 | + ('trusty', 'icehouse'), |
2790 | + ('utopic', 'juno'), |
2791 | + ('vivid', 'kilo'), |
2792 | + ('wily', 'liberty'), |
2793 | + ('xenial', 'mitaka'), |
2794 | + ]) |
2795 | + if self.openstack: |
2796 | + os_origin = self.openstack.split(':')[1] |
2797 | + return os_origin.split('%s-' % self.series)[1].split('/')[0] |
2798 | + else: |
2799 | + return releases[self.series] |
2800 | + |
2801 | + def get_ceph_expected_pools(self, radosgw=False): |
2802 | + """Return a list of expected ceph pools in a ceph + cinder + glance |
2803 | + test scenario, based on OpenStack release and whether ceph radosgw |
2804 | + is flagged as present or not.""" |
2805 | + |
2806 | + if self._get_openstack_release() >= self.trusty_kilo: |
2807 | + # Kilo or later |
2808 | + pools = [ |
2809 | + 'rbd', |
2810 | + 'cinder', |
2811 | + 'glance' |
2812 | + ] |
2813 | + else: |
2814 | + # Juno or earlier |
2815 | + pools = [ |
2816 | + 'data', |
2817 | + 'metadata', |
2818 | + 'rbd', |
2819 | + 'cinder', |
2820 | + 'glance' |
2821 | + ] |
2822 | + |
2823 | + if radosgw: |
2824 | + pools.extend([ |
2825 | + '.rgw.root', |
2826 | + '.rgw.control', |
2827 | + '.rgw', |
2828 | + '.rgw.gc', |
2829 | + '.users.uid' |
2830 | + ]) |
2831 | + |
2832 | + return pools |
2833 | |
2834 | === added file 'charmhelpers/contrib/openstack/amulet/utils.py' |
2835 | --- charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 |
2836 | +++ charmhelpers/contrib/openstack/amulet/utils.py 2016-03-30 08:19:58 +0000 |
2837 | @@ -0,0 +1,985 @@ |
2838 | +# Copyright 2014-2015 Canonical Limited. |
2839 | +# |
2840 | +# This file is part of charm-helpers. |
2841 | +# |
2842 | +# charm-helpers is free software: you can redistribute it and/or modify |
2843 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2844 | +# published by the Free Software Foundation. |
2845 | +# |
2846 | +# charm-helpers is distributed in the hope that it will be useful, |
2847 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2848 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2849 | +# GNU Lesser General Public License for more details. |
2850 | +# |
2851 | +# You should have received a copy of the GNU Lesser General Public License |
2852 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2853 | + |
2854 | +import amulet |
2855 | +import json |
2856 | +import logging |
2857 | +import os |
2858 | +import re |
2859 | +import six |
2860 | +import time |
2861 | +import urllib |
2862 | + |
2863 | +import cinderclient.v1.client as cinder_client |
2864 | +import glanceclient.v1.client as glance_client |
2865 | +import heatclient.v1.client as heat_client |
2866 | +import keystoneclient.v2_0 as keystone_client |
2867 | +import novaclient.v1_1.client as nova_client |
2868 | +import pika |
2869 | +import swiftclient |
2870 | + |
2871 | +from charmhelpers.contrib.amulet.utils import ( |
2872 | + AmuletUtils |
2873 | +) |
2874 | + |
2875 | +DEBUG = logging.DEBUG |
2876 | +ERROR = logging.ERROR |
2877 | + |
2878 | + |
2879 | +class OpenStackAmuletUtils(AmuletUtils): |
2880 | + """OpenStack amulet utilities. |
2881 | + |
2882 | + This class inherits from AmuletUtils and has additional support |
2883 | + that is specifically for use by OpenStack charm tests. |
2884 | + """ |
2885 | + |
2886 | + def __init__(self, log_level=ERROR): |
2887 | + """Initialize the deployment environment.""" |
2888 | + super(OpenStackAmuletUtils, self).__init__(log_level) |
2889 | + |
2890 | + def validate_endpoint_data(self, endpoints, admin_port, internal_port, |
2891 | + public_port, expected): |
2892 | + """Validate endpoint data. |
2893 | + |
2894 | + Validate actual endpoint data vs expected endpoint data. The ports |
2895 | + are used to find the matching endpoint. |
2896 | + """ |
2897 | + self.log.debug('Validating endpoint data...') |
2898 | + self.log.debug('actual: {}'.format(repr(endpoints))) |
2899 | + found = False |
2900 | + for ep in endpoints: |
2901 | + self.log.debug('endpoint: {}'.format(repr(ep))) |
2902 | + if (admin_port in ep.adminurl and |
2903 | + internal_port in ep.internalurl and |
2904 | + public_port in ep.publicurl): |
2905 | + found = True |
2906 | + actual = {'id': ep.id, |
2907 | + 'region': ep.region, |
2908 | + 'adminurl': ep.adminurl, |
2909 | + 'internalurl': ep.internalurl, |
2910 | + 'publicurl': ep.publicurl, |
2911 | + 'service_id': ep.service_id} |
2912 | + ret = self._validate_dict_data(expected, actual) |
2913 | + if ret: |
2914 | + return 'unexpected endpoint data - {}'.format(ret) |
2915 | + |
2916 | + if not found: |
2917 | + return 'endpoint not found' |
2918 | + |
2919 | + def validate_svc_catalog_endpoint_data(self, expected, actual): |
2920 | + """Validate service catalog endpoint data. |
2921 | + |
2922 | + Validate a list of actual service catalog endpoints vs a list of |
2923 | + expected service catalog endpoints. |
2924 | + """ |
2925 | + self.log.debug('Validating service catalog endpoint data...') |
2926 | + self.log.debug('actual: {}'.format(repr(actual))) |
2927 | + for k, v in six.iteritems(expected): |
2928 | + if k in actual: |
2929 | + ret = self._validate_dict_data(expected[k][0], actual[k][0]) |
2930 | + if ret: |
2931 | + return self.endpoint_error(k, ret) |
2932 | + else: |
2933 | + return "endpoint {} does not exist".format(k) |
2934 | + return ret |
2935 | + |
2936 | + def validate_tenant_data(self, expected, actual): |
2937 | + """Validate tenant data. |
2938 | + |
2939 | + Validate a list of actual tenant data vs list of expected tenant |
2940 | + data. |
2941 | + """ |
2942 | + self.log.debug('Validating tenant data...') |
2943 | + self.log.debug('actual: {}'.format(repr(actual))) |
2944 | + for e in expected: |
2945 | + found = False |
2946 | + for act in actual: |
2947 | + a = {'enabled': act.enabled, 'description': act.description, |
2948 | + 'name': act.name, 'id': act.id} |
2949 | + if e['name'] == a['name']: |
2950 | + found = True |
2951 | + ret = self._validate_dict_data(e, a) |
2952 | + if ret: |
2953 | + return "unexpected tenant data - {}".format(ret) |
2954 | + if not found: |
2955 | + return "tenant {} does not exist".format(e['name']) |
2956 | + return ret |
2957 | + |
2958 | + def validate_role_data(self, expected, actual): |
2959 | + """Validate role data. |
2960 | + |
2961 | + Validate a list of actual role data vs a list of expected role |
2962 | + data. |
2963 | + """ |
2964 | + self.log.debug('Validating role data...') |
2965 | + self.log.debug('actual: {}'.format(repr(actual))) |
2966 | + for e in expected: |
2967 | + found = False |
2968 | + for act in actual: |
2969 | + a = {'name': act.name, 'id': act.id} |
2970 | + if e['name'] == a['name']: |
2971 | + found = True |
2972 | + ret = self._validate_dict_data(e, a) |
2973 | + if ret: |
2974 | + return "unexpected role data - {}".format(ret) |
2975 | + if not found: |
2976 | + return "role {} does not exist".format(e['name']) |
2977 | + return ret |
2978 | + |
2979 | + def validate_user_data(self, expected, actual): |
2980 | + """Validate user data. |
2981 | + |
2982 | + Validate a list of actual user data vs a list of expected user |
2983 | + data. |
2984 | + """ |
2985 | + self.log.debug('Validating user data...') |
2986 | + self.log.debug('actual: {}'.format(repr(actual))) |
2987 | + for e in expected: |
2988 | + found = False |
2989 | + for act in actual: |
2990 | + a = {'enabled': act.enabled, 'name': act.name, |
2991 | + 'email': act.email, 'tenantId': act.tenantId, |
2992 | + 'id': act.id} |
2993 | + if e['name'] == a['name']: |
2994 | + found = True |
2995 | + ret = self._validate_dict_data(e, a) |
2996 | + if ret: |
2997 | + return "unexpected user data - {}".format(ret) |
2998 | + if not found: |
2999 | + return "user {} does not exist".format(e['name']) |
3000 | + return ret |
3001 | + |
3002 | + def validate_flavor_data(self, expected, actual): |
3003 | + """Validate flavor data. |
3004 | + |
3005 | + Validate a list of actual flavors vs a list of expected flavors. |
3006 | + """ |
3007 | + self.log.debug('Validating flavor data...') |
3008 | + self.log.debug('actual: {}'.format(repr(actual))) |
3009 | + act = [a.name for a in actual] |
3010 | + return self._validate_list_data(expected, act) |
3011 | + |
3012 | + def tenant_exists(self, keystone, tenant): |
3013 | + """Return True if tenant exists.""" |
3014 | + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) |
3015 | + return tenant in [t.name for t in keystone.tenants.list()] |
3016 | + |
3017 | + def authenticate_cinder_admin(self, keystone_sentry, username, |
3018 | + password, tenant): |
3019 | + """Authenticates admin user with cinder.""" |
3020 | + # NOTE(beisner): cinder python client doesn't accept tokens. |
3021 | + service_ip = \ |
3022 | + keystone_sentry.relation('shared-db', |
3023 | + 'mysql:shared-db')['private-address'] |
3024 | + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) |
3025 | + return cinder_client.Client(username, password, tenant, ept) |
3026 | + |
3027 | + def authenticate_keystone_admin(self, keystone_sentry, user, password, |
3028 | + tenant): |
3029 | + """Authenticates admin user with the keystone admin endpoint.""" |
3030 | + self.log.debug('Authenticating keystone admin...') |
3031 | + unit = keystone_sentry |
3032 | + service_ip = unit.relation('shared-db', |
3033 | + 'mysql:shared-db')['private-address'] |
3034 | + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) |
3035 | + return keystone_client.Client(username=user, password=password, |
3036 | + tenant_name=tenant, auth_url=ep) |
3037 | + |
3038 | + def authenticate_keystone_user(self, keystone, user, password, tenant): |
3039 | + """Authenticates a regular user with the keystone public endpoint.""" |
3040 | + self.log.debug('Authenticating keystone user ({})...'.format(user)) |
3041 | + ep = keystone.service_catalog.url_for(service_type='identity', |
3042 | + endpoint_type='publicURL') |
3043 | + return keystone_client.Client(username=user, password=password, |
3044 | + tenant_name=tenant, auth_url=ep) |
3045 | + |
3046 | + def authenticate_glance_admin(self, keystone): |
3047 | + """Authenticates admin user with glance.""" |
3048 | + self.log.debug('Authenticating glance admin...') |
3049 | + ep = keystone.service_catalog.url_for(service_type='image', |
3050 | + endpoint_type='adminURL') |
3051 | + return glance_client.Client(ep, token=keystone.auth_token) |
3052 | + |
3053 | + def authenticate_heat_admin(self, keystone): |
3054 | + """Authenticates the admin user with heat.""" |
3055 | + self.log.debug('Authenticating heat admin...') |
3056 | + ep = keystone.service_catalog.url_for(service_type='orchestration', |
3057 | + endpoint_type='publicURL') |
3058 | + return heat_client.Client(endpoint=ep, token=keystone.auth_token) |
3059 | + |
3060 | + def authenticate_nova_user(self, keystone, user, password, tenant): |
3061 | + """Authenticates a regular user with nova-api.""" |
3062 | + self.log.debug('Authenticating nova user ({})...'.format(user)) |
3063 | + ep = keystone.service_catalog.url_for(service_type='identity', |
3064 | + endpoint_type='publicURL') |
3065 | + return nova_client.Client(username=user, api_key=password, |
3066 | + project_id=tenant, auth_url=ep) |
3067 | + |
3068 | + def authenticate_swift_user(self, keystone, user, password, tenant): |
3069 | + """Authenticates a regular user with swift api.""" |
3070 | + self.log.debug('Authenticating swift user ({})...'.format(user)) |
3071 | + ep = keystone.service_catalog.url_for(service_type='identity', |
3072 | + endpoint_type='publicURL') |
3073 | + return swiftclient.Connection(authurl=ep, |
3074 | + user=user, |
3075 | + key=password, |
3076 | + tenant_name=tenant, |
3077 | + auth_version='2.0') |
3078 | + |
3079 | + def create_cirros_image(self, glance, image_name): |
3080 | + """Download the latest cirros image and upload it to glance, |
3081 | + validate and return a resource pointer. |
3082 | + |
3083 | + :param glance: pointer to authenticated glance connection |
3084 | + :param image_name: display name for new image |
3085 | + :returns: glance image pointer |
3086 | + """ |
3087 | + self.log.debug('Creating glance cirros image ' |
3088 | + '({})...'.format(image_name)) |
3089 | + |
3090 | + # Download cirros image |
3091 | + http_proxy = os.getenv('AMULET_HTTP_PROXY') |
3092 | + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
3093 | + if http_proxy: |
3094 | + proxies = {'http': http_proxy} |
3095 | + opener = urllib.FancyURLopener(proxies) |
3096 | + else: |
3097 | + opener = urllib.FancyURLopener() |
3098 | + |
3099 | + f = opener.open('http://download.cirros-cloud.net/version/released') |
3100 | + version = f.read().strip() |
3101 | + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
3102 | + local_path = os.path.join('tests', cirros_img) |
3103 | + |
3104 | + if not os.path.exists(local_path): |
3105 | + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
3106 | + version, cirros_img) |
3107 | + opener.retrieve(cirros_url, local_path) |
3108 | + f.close() |
3109 | + |
3110 | + # Create glance image |
3111 | + with open(local_path) as f: |
3112 | + image = glance.images.create(name=image_name, is_public=True, |
3113 | + disk_format='qcow2', |
3114 | + container_format='bare', data=f) |
3115 | + |
3116 | + # Wait for image to reach active status |
3117 | + img_id = image.id |
3118 | + ret = self.resource_reaches_status(glance.images, img_id, |
3119 | + expected_stat='active', |
3120 | + msg='Image status wait') |
3121 | + if not ret: |
3122 | + msg = 'Glance image failed to reach expected state.' |
3123 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3124 | + |
3125 | + # Re-validate new image |
3126 | + self.log.debug('Validating image attributes...') |
3127 | + val_img_name = glance.images.get(img_id).name |
3128 | + val_img_stat = glance.images.get(img_id).status |
3129 | + val_img_pub = glance.images.get(img_id).is_public |
3130 | + val_img_cfmt = glance.images.get(img_id).container_format |
3131 | + val_img_dfmt = glance.images.get(img_id).disk_format |
3132 | + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' |
3133 | + 'container fmt:{} disk fmt:{}'.format( |
3134 | + val_img_name, val_img_pub, img_id, |
3135 | + val_img_stat, val_img_cfmt, val_img_dfmt)) |
3136 | + |
3137 | + if val_img_name == image_name and val_img_stat == 'active' \ |
3138 | + and val_img_pub is True and val_img_cfmt == 'bare' \ |
3139 | + and val_img_dfmt == 'qcow2': |
3140 | + self.log.debug(msg_attr) |
3141 | + else: |
3142 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
3143 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3144 | + |
3145 | + return image |
3146 | + |
3147 | + def delete_image(self, glance, image): |
3148 | + """Delete the specified image.""" |
3149 | + |
3150 | + # /!\ DEPRECATION WARNING |
3151 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
3152 | + 'delete_resource instead of delete_image.') |
3153 | + self.log.debug('Deleting glance image ({})...'.format(image)) |
3154 | + return self.delete_resource(glance.images, image, msg='glance image') |
3155 | + |
3156 | + def create_instance(self, nova, image_name, instance_name, flavor): |
3157 | + """Create the specified instance.""" |
3158 | + self.log.debug('Creating instance ' |
3159 | + '({}|{}|{})'.format(instance_name, image_name, flavor)) |
3160 | + image = nova.images.find(name=image_name) |
3161 | + flavor = nova.flavors.find(name=flavor) |
3162 | + instance = nova.servers.create(name=instance_name, image=image, |
3163 | + flavor=flavor) |
3164 | + |
3165 | + count = 1 |
3166 | + status = instance.status |
3167 | + while status != 'ACTIVE' and count < 60: |
3168 | + time.sleep(3) |
3169 | + instance = nova.servers.get(instance.id) |
3170 | + status = instance.status |
3171 | + self.log.debug('instance status: {}'.format(status)) |
3172 | + count += 1 |
3173 | + |
3174 | + if status != 'ACTIVE': |
3175 | + self.log.error('instance creation timed out') |
3176 | + return None |
3177 | + |
3178 | + return instance |
3179 | + |
3180 | + def delete_instance(self, nova, instance): |
3181 | + """Delete the specified instance.""" |
3182 | + |
3183 | + # /!\ DEPRECATION WARNING |
3184 | + self.log.warn('/!\\ DEPRECATION WARNING: use ' |
3185 | + 'delete_resource instead of delete_instance.') |
3186 | + self.log.debug('Deleting instance ({})...'.format(instance)) |
3187 | + return self.delete_resource(nova.servers, instance, |
3188 | + msg='nova instance') |
3189 | + |
3190 | + def create_or_get_keypair(self, nova, keypair_name="testkey"): |
3191 | + """Create a new keypair, or return pointer if it already exists.""" |
3192 | + try: |
3193 | + _keypair = nova.keypairs.get(keypair_name) |
3194 | + self.log.debug('Keypair ({}) already exists, ' |
3195 | + 'using it.'.format(keypair_name)) |
3196 | + return _keypair |
3197 | + except: |
3198 | + self.log.debug('Keypair ({}) does not exist, ' |
3199 | + 'creating it.'.format(keypair_name)) |
3200 | + |
3201 | + _keypair = nova.keypairs.create(name=keypair_name) |
3202 | + return _keypair |
3203 | + |
3204 | + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, |
3205 | + img_id=None, src_vol_id=None, snap_id=None): |
3206 | + """Create cinder volume, optionally from a glance image, OR |
3207 | + optionally as a clone of an existing volume, OR optionally |
3208 | + from a snapshot. Wait for the new volume status to reach |
3209 | + the expected status, validate and return a resource pointer. |
3210 | + |
3211 | + :param vol_name: cinder volume display name |
3212 | + :param vol_size: size in gigabytes |
3213 | + :param img_id: optional glance image id |
3214 | + :param src_vol_id: optional source volume id to clone |
3215 | + :param snap_id: optional snapshot id to use |
3216 | + :returns: cinder volume pointer |
3217 | + """ |
3218 | + # Handle parameter input and avoid impossible combinations |
3219 | + if img_id and not src_vol_id and not snap_id: |
3220 | + # Create volume from image |
3221 | + self.log.debug('Creating cinder volume from glance image...') |
3222 | + bootable = 'true' |
3223 | + elif src_vol_id and not img_id and not snap_id: |
3224 | + # Clone an existing volume |
3225 | + self.log.debug('Cloning cinder volume...') |
3226 | + bootable = cinder.volumes.get(src_vol_id).bootable |
3227 | + elif snap_id and not src_vol_id and not img_id: |
3228 | + # Create volume from snapshot |
3229 | + self.log.debug('Creating cinder volume from snapshot...') |
3230 | + snap = cinder.volume_snapshots.find(id=snap_id) |
3231 | + vol_size = snap.size |
3232 | + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id |
3233 | + bootable = cinder.volumes.get(snap_vol_id).bootable |
3234 | + elif not img_id and not src_vol_id and not snap_id: |
3235 | + # Create volume |
3236 | + self.log.debug('Creating cinder volume...') |
3237 | + bootable = 'false' |
3238 | + else: |
3239 | + # Impossible combination of parameters |
3240 | + msg = ('Invalid method use - name:{} size:{} img_id:{} ' |
3241 | + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, |
3242 | + img_id, src_vol_id, |
3243 | + snap_id)) |
3244 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3245 | + |
3246 | + # Create new volume |
3247 | + try: |
3248 | + vol_new = cinder.volumes.create(display_name=vol_name, |
3249 | + imageRef=img_id, |
3250 | + size=vol_size, |
3251 | + source_volid=src_vol_id, |
3252 | + snapshot_id=snap_id) |
3253 | + vol_id = vol_new.id |
3254 | + except Exception as e: |
3255 | + msg = 'Failed to create volume: {}'.format(e) |
3256 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3257 | + |
3258 | + # Wait for volume to reach available status |
3259 | + ret = self.resource_reaches_status(cinder.volumes, vol_id, |
3260 | + expected_stat="available", |
3261 | + msg="Volume status wait") |
3262 | + if not ret: |
3263 | + msg = 'Cinder volume failed to reach expected state.' |
3264 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3265 | + |
3266 | + # Re-validate new volume |
3267 | + self.log.debug('Validating volume attributes...') |
3268 | + val_vol_name = cinder.volumes.get(vol_id).display_name |
3269 | + val_vol_boot = cinder.volumes.get(vol_id).bootable |
3270 | + val_vol_stat = cinder.volumes.get(vol_id).status |
3271 | + val_vol_size = cinder.volumes.get(vol_id).size |
3272 | + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' |
3273 | + '{} size:{}'.format(val_vol_name, vol_id, |
3274 | + val_vol_stat, val_vol_boot, |
3275 | + val_vol_size)) |
3276 | + |
3277 | + if val_vol_boot == bootable and val_vol_stat == 'available' \ |
3278 | + and val_vol_name == vol_name and val_vol_size == vol_size: |
3279 | + self.log.debug(msg_attr) |
3280 | + else: |
3281 | + msg = ('Volume validation failed, {}'.format(msg_attr)) |
3282 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3283 | + |
3284 | + return vol_new |
3285 | + |
3286 | + def delete_resource(self, resource, resource_id, |
3287 | + msg="resource", max_wait=120): |
3288 | + """Delete one openstack resource, such as one instance, keypair, |
3289 | + image, volume, stack, etc., and confirm deletion within max wait time. |
3290 | + |
3291 | + :param resource: pointer to os resource type, ex:glance_client.images |
3292 | + :param resource_id: unique name or id for the openstack resource |
3293 | + :param msg: text to identify purpose in logging |
3294 | + :param max_wait: maximum wait time in seconds |
3295 | + :returns: True if successful, otherwise False |
3296 | + """ |
3297 | + self.log.debug('Deleting OpenStack resource ' |
3298 | + '{} ({})'.format(resource_id, msg)) |
3299 | + num_before = len(list(resource.list())) |
3300 | + resource.delete(resource_id) |
3301 | + |
3302 | + tries = 0 |
3303 | + num_after = len(list(resource.list())) |
3304 | + while num_after != (num_before - 1) and tries < (max_wait / 4): |
3305 | + self.log.debug('{} delete check: ' |
3306 | + '{} [{}:{}] {}'.format(msg, tries, |
3307 | + num_before, |
3308 | + num_after, |
3309 | + resource_id)) |
3310 | + time.sleep(4) |
3311 | + num_after = len(list(resource.list())) |
3312 | + tries += 1 |
3313 | + |
3314 | + self.log.debug('{}: expected, actual count = {}, ' |
3315 | + '{}'.format(msg, num_before - 1, num_after)) |
3316 | + |
3317 | + if num_after == (num_before - 1): |
3318 | + return True |
3319 | + else: |
3320 | + self.log.error('{} delete timed out'.format(msg)) |
3321 | + return False |
3322 | + |
3323 | + def resource_reaches_status(self, resource, resource_id, |
3324 | + expected_stat='available', |
3325 | + msg='resource', max_wait=120): |
3326 | + """Wait for an openstack resources status to reach an |
3327 | + expected status within a specified time. Useful to confirm that |
3328 | + nova instances, cinder vols, snapshots, glance images, heat stacks |
3329 | + and other resources eventually reach the expected status. |
3330 | + |
3331 | + :param resource: pointer to os resource type, ex: heat_client.stacks |
3332 | + :param resource_id: unique id for the openstack resource |
3333 | + :param expected_stat: status to expect resource to reach |
3334 | + :param msg: text to identify purpose in logging |
3335 | + :param max_wait: maximum wait time in seconds |
3336 | + :returns: True if successful, False if status is not reached |
3337 | + """ |
3338 | + |
3339 | + tries = 0 |
3340 | + resource_stat = resource.get(resource_id).status |
3341 | + while resource_stat != expected_stat and tries < (max_wait / 4): |
3342 | + self.log.debug('{} status check: ' |
3343 | + '{} [{}:{}] {}'.format(msg, tries, |
3344 | + resource_stat, |
3345 | + expected_stat, |
3346 | + resource_id)) |
3347 | + time.sleep(4) |
3348 | + resource_stat = resource.get(resource_id).status |
3349 | + tries += 1 |
3350 | + |
3351 | + self.log.debug('{}: expected, actual status = {}, ' |
3352 | + '{}'.format(msg, resource_stat, expected_stat)) |
3353 | + |
3354 | + if resource_stat == expected_stat: |
3355 | + return True |
3356 | + else: |
3357 | + self.log.debug('{} never reached expected status: ' |
3358 | + '{}'.format(resource_id, expected_stat)) |
3359 | + return False |
3360 | + |
3361 | + def get_ceph_osd_id_cmd(self, index): |
3362 | + """Produce a shell command that will return a ceph-osd id.""" |
3363 | + return ("`initctl list | grep 'ceph-osd ' | " |
3364 | + "awk 'NR=={} {{ print $2 }}' | " |
3365 | + "grep -o '[0-9]*'`".format(index + 1)) |
3366 | + |
3367 | + def get_ceph_pools(self, sentry_unit): |
3368 | + """Return a dict of ceph pools from a single ceph unit, with |
3369 | + pool name as keys, pool id as vals.""" |
3370 | + pools = {} |
3371 | + cmd = 'sudo ceph osd lspools' |
3372 | + output, code = sentry_unit.run(cmd) |
3373 | + if code != 0: |
3374 | + msg = ('{} `{}` returned {} ' |
3375 | + '{}'.format(sentry_unit.info['unit_name'], |
3376 | + cmd, code, output)) |
3377 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3378 | + |
3379 | + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, |
3380 | + for pool in str(output).split(','): |
3381 | + pool_id_name = pool.split(' ') |
3382 | + if len(pool_id_name) == 2: |
3383 | + pool_id = pool_id_name[0] |
3384 | + pool_name = pool_id_name[1] |
3385 | + pools[pool_name] = int(pool_id) |
3386 | + |
3387 | + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], |
3388 | + pools)) |
3389 | + return pools |
3390 | + |
3391 | + def get_ceph_df(self, sentry_unit): |
3392 | + """Return dict of ceph df json output, including ceph pool state. |
3393 | + |
3394 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
3395 | + :returns: Dict of ceph df output |
3396 | + """ |
3397 | + cmd = 'sudo ceph df --format=json' |
3398 | + output, code = sentry_unit.run(cmd) |
3399 | + if code != 0: |
3400 | + msg = ('{} `{}` returned {} ' |
3401 | + '{}'.format(sentry_unit.info['unit_name'], |
3402 | + cmd, code, output)) |
3403 | + amulet.raise_status(amulet.FAIL, msg=msg) |
3404 | + return json.loads(output) |
3405 | + |
3406 | + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): |
3407 | + """Take a sample of attributes of a ceph pool, returning ceph |
3408 | + pool name, object count and disk space used for the specified |
3409 | + pool ID number. |
3410 | + |
3411 | + :param sentry_unit: Pointer to amulet sentry instance (juju unit) |
3412 | + :param pool_id: Ceph pool ID |
3413 | + :returns: List of pool name, object count, kb disk space used |
3414 | + """ |
3415 | + df = self.get_ceph_df(sentry_unit) |
3416 | + pool_name = df['pools'][pool_id]['name'] |
3417 | + obj_count = df['pools'][pool_id]['stats']['objects'] |
3418 | + kb_used = df['pools'][pool_id]['stats']['kb_used'] |
3419 | + self.log.debug('Ceph {} pool (ID {}): {} objects, ' |
3420 | + '{} kb used'.format(pool_name, pool_id, |
3421 | + obj_count, kb_used)) |
3422 | + return pool_name, obj_count, kb_used |
3423 | + |
3424 | + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): |
3425 | + """Validate ceph pool samples taken over time, such as pool |
3426 | + object counts or pool kb used, before adding, after adding, and |
3427 | + after deleting items which affect those pool attributes. The |
3428 | + 2nd element is expected to be greater than the 1st; 3rd is expected |
3429 | + to be less than the 2nd. |
3430 | + |
3431 | + :param samples: List containing 3 data samples |
3432 | + :param sample_type: String for logging and usage context |
3433 | + :returns: None if successful, Failure message otherwise |
3434 | + """ |
3435 | + original, created, deleted = range(3) |
3436 | + if samples[created] <= samples[original] or \ |
3437 | + samples[deleted] >= samples[created]: |
3438 | + return ('Ceph {} samples ({}) ' |
3439 | + 'unexpected.'.format(sample_type, samples)) |
3440 | + else: |
3441 | + self.log.debug('Ceph {} samples (OK): ' |
3442 | + '{}'.format(sample_type, samples)) |
3443 | + return None |
3444 | + |
3445 | + # rabbitmq/amqp specific helpers: |
3446 | + |
3447 | + def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200): |
3448 | + """Wait for rmq units extended status to show cluster readiness, |
3449 | + after an optional initial sleep period. Initial sleep is likely |
3450 | + necessary to be effective following a config change, as status |
3451 | + message may not instantly update to non-ready.""" |
3452 | + |
3453 | + if init_sleep: |
3454 | + time.sleep(init_sleep) |
3455 | + |
3456 | + message = re.compile('^Unit is ready and clustered$') |
3457 | + deployment._auto_wait_for_status(message=message, |
3458 | + timeout=timeout, |
3459 | + include_only=['rabbitmq-server']) |
3460 | + |
3461 | + def add_rmq_test_user(self, sentry_units, |
3462 | + username="testuser1", password="changeme"): |
3463 | + """Add a test user via the first rmq juju unit, check connection as |
3464 | + the new user against all sentry units. |
3465 | + |
3466 | + :param sentry_units: list of sentry unit pointers |
3467 | + :param username: amqp user name, default to testuser1 |
3468 | + :param password: amqp user password |
3469 | + :returns: None if successful. Raise on error. |
3470 | + """ |
3471 | + self.log.debug('Adding rmq user ({})...'.format(username)) |
3472 | + |
3473 | + # Check that user does not already exist |
3474 | + cmd_user_list = 'rabbitmqctl list_users' |
3475 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) |
3476 | + if username in output: |
3477 | + self.log.warning('User ({}) already exists, returning ' |
3478 | + 'gracefully.'.format(username)) |
3479 | + return |
3480 | + |
3481 | + perms = '".*" ".*" ".*"' |
3482 | + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), |
3483 | + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] |
3484 | + |
3485 | + # Add user via first unit |
3486 | + for cmd in cmds: |
3487 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd) |
3488 | + |
3489 | + # Check connection against the other sentry_units |
3490 | + self.log.debug('Checking user connect against units...') |
3491 | + for sentry_unit in sentry_units: |
3492 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, |
3493 | + username=username, |
3494 | + password=password) |
3495 | + connection.close() |
3496 | + |
3497 | + def delete_rmq_test_user(self, sentry_units, username="testuser1"): |
3498 | + """Delete a rabbitmq user via the first rmq juju unit. |
3499 | + |
3500 | + :param sentry_units: list of sentry unit pointers |
3501 | + :param username: amqp user name, default to testuser1 |
3502 | + :param password: amqp user password |
3503 | + :returns: None if successful or no such user. |
3504 | + """ |
3505 | + self.log.debug('Deleting rmq user ({})...'.format(username)) |
3506 | + |
3507 | + # Check that the user exists |
3508 | + cmd_user_list = 'rabbitmqctl list_users' |
3509 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) |
3510 | + |
3511 | + if username not in output: |
3512 | + self.log.warning('User ({}) does not exist, returning ' |
3513 | + 'gracefully.'.format(username)) |
3514 | + return |
3515 | + |
3516 | + # Delete the user |
3517 | + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) |
3518 | + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) |
3519 | + |
3520 | + def get_rmq_cluster_status(self, sentry_unit): |
3521 | + """Execute rabbitmq cluster status command on a unit and return |
3522 | + the full output. |
3523 | + |
3524 | + :param unit: sentry unit |
3525 | + :returns: String containing console output of cluster status command |
3526 | + """ |
3527 | + cmd = 'rabbitmqctl cluster_status' |
3528 | + output, _ = self.run_cmd_unit(sentry_unit, cmd) |
3529 | + self.log.debug('{} cluster_status:\n{}'.format( |
3530 | + sentry_unit.info['unit_name'], output)) |
3531 | + return str(output) |
3532 | + |
3533 | + def get_rmq_cluster_running_nodes(self, sentry_unit): |
3534 | + """Parse rabbitmqctl cluster_status output string, return list of |
3535 | + running rabbitmq cluster nodes. |
3536 | + |
3537 | + :param unit: sentry unit |
3538 | + :returns: List containing node names of running nodes |
3539 | + """ |
3540 | + # NOTE(beisner): rabbitmqctl cluster_status output is not |
3541 | + # json-parsable, do string chop foo, then json.loads that. |
3542 | + str_stat = self.get_rmq_cluster_status(sentry_unit) |
3543 | + if 'running_nodes' in str_stat: |
3544 | + pos_start = str_stat.find("{running_nodes,") + 15 |
3545 | + pos_end = str_stat.find("]},", pos_start) + 1 |
3546 | + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') |
3547 | + run_nodes = json.loads(str_run_nodes) |
3548 | + return run_nodes |
3549 | + else: |
3550 | + return [] |
3551 | + |
3552 | + def validate_rmq_cluster_running_nodes(self, sentry_units): |
3553 | + """Check that all rmq unit hostnames are represented in the |
3554 | + cluster_status output of all units. |
3555 | + |
3556 | + :param host_names: dict of juju unit names to host names |
3557 | + :param units: list of sentry unit pointers (all rmq units) |
3558 | + :returns: None if successful, otherwise return error message |
3559 | + """ |
3560 | + host_names = self.get_unit_hostnames(sentry_units) |
3561 | + errors = [] |
3562 | + |
3563 | + # Query every unit for cluster_status running nodes |
3564 | + for query_unit in sentry_units: |
3565 | + query_unit_name = query_unit.info['unit_name'] |
3566 | + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) |
3567 | + |
3568 | + # Confirm that every unit is represented in the queried unit's |
3569 | + # cluster_status running nodes output. |
3570 | + for validate_unit in sentry_units: |
3571 | + val_host_name = host_names[validate_unit.info['unit_name']] |
3572 | + val_node_name = 'rabbit@{}'.format(val_host_name) |
3573 | + |
3574 | + if val_node_name not in running_nodes: |
3575 | + errors.append('Cluster member check failed on {}: {} not ' |
3576 | + 'in {}\n'.format(query_unit_name, |
3577 | + val_node_name, |
3578 | + running_nodes)) |
3579 | + if errors: |
3580 | + return ''.join(errors) |
3581 | + |
3582 | + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): |
3583 | + """Check a single juju rmq unit for ssl and port in the config file.""" |
3584 | + host = sentry_unit.info['public-address'] |
3585 | + unit_name = sentry_unit.info['unit_name'] |
3586 | + |
3587 | + conf_file = '/etc/rabbitmq/rabbitmq.config' |
3588 | + conf_contents = str(self.file_contents_safe(sentry_unit, |
3589 | + conf_file, max_wait=16)) |
3590 | + # Checks |
3591 | + conf_ssl = 'ssl' in conf_contents |
3592 | + conf_port = str(port) in conf_contents |
3593 | + |
3594 | + # Port explicitly checked in config |
3595 | + if port and conf_port and conf_ssl: |
3596 | + self.log.debug('SSL is enabled @{}:{} ' |
3597 | + '({})'.format(host, port, unit_name)) |
3598 | + return True |
3599 | + elif port and not conf_port and conf_ssl: |
3600 | + self.log.debug('SSL is enabled @{} but not on port {} ' |
3601 | + '({})'.format(host, port, unit_name)) |
3602 | + return False |
3603 | + # Port not checked (useful when checking that ssl is disabled) |
3604 | + elif not port and conf_ssl: |
3605 | + self.log.debug('SSL is enabled @{}:{} ' |
3606 | + '({})'.format(host, port, unit_name)) |
3607 | + return True |
3608 | + elif not conf_ssl: |
3609 | + self.log.debug('SSL not enabled @{}:{} ' |
3610 | + '({})'.format(host, port, unit_name)) |
3611 | + return False |
3612 | + else: |
3613 | + msg = ('Unknown condition when checking SSL status @{}:{} ' |
3614 | + '({})'.format(host, port, unit_name)) |
3615 | + amulet.raise_status(amulet.FAIL, msg) |
3616 | + |
3617 | + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): |
3618 | + """Check that ssl is enabled on rmq juju sentry units. |
3619 | + |
3620 | + :param sentry_units: list of all rmq sentry units |
3621 | + :param port: optional ssl port override to validate |
3622 | + :returns: None if successful, otherwise return error message |
3623 | + """ |
3624 | + for sentry_unit in sentry_units: |
3625 | + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): |
3626 | + return ('Unexpected condition: ssl is disabled on unit ' |
3627 | + '({})'.format(sentry_unit.info['unit_name'])) |
3628 | + return None |
3629 | + |
3630 | + def validate_rmq_ssl_disabled_units(self, sentry_units): |
3631 | + """Check that ssl is enabled on listed rmq juju sentry units. |
3632 | + |
3633 | + :param sentry_units: list of all rmq sentry units |
3634 | + :returns: True if successful. Raise on error. |
3635 | + """ |
3636 | + for sentry_unit in sentry_units: |
3637 | + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): |
3638 | + return ('Unexpected condition: ssl is enabled on unit ' |
3639 | + '({})'.format(sentry_unit.info['unit_name'])) |
3640 | + return None |
3641 | + |
3642 | + def configure_rmq_ssl_on(self, sentry_units, deployment, |
3643 | + port=None, max_wait=60): |
3644 | + """Turn ssl charm config option on, with optional non-default |
3645 | + ssl port specification. Confirm that it is enabled on every |
3646 | + unit. |
3647 | + |
3648 | + :param sentry_units: list of sentry units |
3649 | + :param deployment: amulet deployment object pointer |
3650 | + :param port: amqp port, use defaults if None |
3651 | + :param max_wait: maximum time to wait in seconds to confirm |
3652 | + :returns: None if successful. Raise on error. |
3653 | + """ |
3654 | + self.log.debug('Setting ssl charm config option: on') |
3655 | + |
3656 | + # Enable RMQ SSL |
3657 | + config = {'ssl': 'on'} |
3658 | + if port: |
3659 | + config['ssl_port'] = port |
3660 | + |
3661 | + deployment.d.configure('rabbitmq-server', config) |
3662 | + |
3663 | + # Wait for unit status |
3664 | + self.rmq_wait_for_cluster(deployment) |
3665 | + |
3666 | + # Confirm |
3667 | + tries = 0 |
3668 | + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) |
3669 | + while ret and tries < (max_wait / 4): |
3670 | + time.sleep(4) |
3671 | + self.log.debug('Attempt {}: {}'.format(tries, ret)) |
3672 | + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) |
3673 | + tries += 1 |
3674 | + |
3675 | + if ret: |
3676 | + amulet.raise_status(amulet.FAIL, ret) |
3677 | + |
3678 | + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): |
3679 | + """Turn ssl charm config option off, confirm that it is disabled |
3680 | + on every unit. |
3681 | + |
3682 | + :param sentry_units: list of sentry units |
3683 | + :param deployment: amulet deployment object pointer |
3684 | + :param max_wait: maximum time to wait in seconds to confirm |
3685 | + :returns: None if successful. Raise on error. |
3686 | + """ |
3687 | + self.log.debug('Setting ssl charm config option: off') |
3688 | + |
3689 | + # Disable RMQ SSL |
3690 | + config = {'ssl': 'off'} |
3691 | + deployment.d.configure('rabbitmq-server', config) |
3692 | + |
3693 | + # Wait for unit status |
3694 | + self.rmq_wait_for_cluster(deployment) |
3695 | + |
3696 | + # Confirm |
3697 | + tries = 0 |
3698 | + ret = self.validate_rmq_ssl_disabled_units(sentry_units) |
3699 | + while ret and tries < (max_wait / 4): |
3700 | + time.sleep(4) |
3701 | + self.log.debug('Attempt {}: {}'.format(tries, ret)) |
3702 | + ret = self.validate_rmq_ssl_disabled_units(sentry_units) |
3703 | + tries += 1 |
3704 | + |
3705 | + if ret: |
3706 | + amulet.raise_status(amulet.FAIL, ret) |
3707 | + |
3708 | + def connect_amqp_by_unit(self, sentry_unit, ssl=False, |
3709 | + port=None, fatal=True, |
3710 | + username="testuser1", password="changeme"): |
3711 | + """Establish and return a pika amqp connection to the rabbitmq service |
3712 | + running on a rmq juju unit. |
3713 | + |
3714 | + :param sentry_unit: sentry unit pointer |
3715 | + :param ssl: boolean, default to False |
3716 | + :param port: amqp port, use defaults if None |
3717 | + :param fatal: boolean, default to True (raises on connect error) |
3718 | + :param username: amqp user name, default to testuser1 |
3719 | + :param password: amqp user password |
3720 | + :returns: pika amqp connection pointer or None if failed and non-fatal |
3721 | + """ |
3722 | + host = sentry_unit.info['public-address'] |
3723 | + unit_name = sentry_unit.info['unit_name'] |
3724 | + |
3725 | + # Default port logic if port is not specified |
3726 | + if ssl and not port: |
3727 | + port = 5671 |
3728 | + elif not ssl and not port: |
3729 | + port = 5672 |
3730 | + |
3731 | + self.log.debug('Connecting to amqp on {}:{} ({}) as ' |
3732 | + '{}...'.format(host, port, unit_name, username)) |
3733 | + |
3734 | + try: |
3735 | + credentials = pika.PlainCredentials(username, password) |
3736 | + parameters = pika.ConnectionParameters(host=host, port=port, |
3737 | + credentials=credentials, |
3738 | + ssl=ssl, |
3739 | + connection_attempts=3, |
3740 | + retry_delay=5, |
3741 | + socket_timeout=1) |
3742 | + connection = pika.BlockingConnection(parameters) |
3743 | + assert connection.server_properties['product'] == 'RabbitMQ' |
3744 | + self.log.debug('Connect OK') |
3745 | + return connection |
3746 | + except Exception as e: |
3747 | + msg = ('amqp connection failed to {}:{} as ' |
3748 | + '{} ({})'.format(host, port, username, str(e))) |
3749 | + if fatal: |
3750 | + amulet.raise_status(amulet.FAIL, msg) |
3751 | + else: |
3752 | + self.log.warn(msg) |
3753 | + return None |
3754 | + |
3755 | + def publish_amqp_message_by_unit(self, sentry_unit, message, |
3756 | + queue="test", ssl=False, |
3757 | + username="testuser1", |
3758 | + password="changeme", |
3759 | + port=None): |
3760 | + """Publish an amqp message to a rmq juju unit. |
3761 | + |
3762 | + :param sentry_unit: sentry unit pointer |
3763 | + :param message: amqp message string |
3764 | + :param queue: message queue, default to test |
3765 | + :param username: amqp user name, default to testuser1 |
3766 | + :param password: amqp user password |
3767 | + :param ssl: boolean, default to False |
3768 | + :param port: amqp port, use defaults if None |
3769 | + :returns: None. Raises exception if publish failed. |
3770 | + """ |
3771 | + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, |
3772 | + message)) |
3773 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, |
3774 | + port=port, |
3775 | + username=username, |
3776 | + password=password) |
3777 | + |
3778 | + # NOTE(beisner): extra debug here re: pika hang potential: |
3779 | + # https://github.com/pika/pika/issues/297 |
3780 | + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw |
3781 | + self.log.debug('Defining channel...') |
3782 | + channel = connection.channel() |
3783 | + self.log.debug('Declaring queue...') |
3784 | + channel.queue_declare(queue=queue, auto_delete=False, durable=True) |
3785 | + self.log.debug('Publishing message...') |
3786 | + channel.basic_publish(exchange='', routing_key=queue, body=message) |
3787 | + self.log.debug('Closing channel...') |
3788 | + channel.close() |
3789 | + self.log.debug('Closing connection...') |
3790 | + connection.close() |
3791 | + |
3792 | + def get_amqp_message_by_unit(self, sentry_unit, queue="test", |
3793 | + username="testuser1", |
3794 | + password="changeme", |
3795 | + ssl=False, port=None): |
3796 | + """Get an amqp message from a rmq juju unit. |
3797 | + |
3798 | + :param sentry_unit: sentry unit pointer |
3799 | + :param queue: message queue, default to test |
3800 | + :param username: amqp user name, default to testuser1 |
3801 | + :param password: amqp user password |
3802 | + :param ssl: boolean, default to False |
3803 | + :param port: amqp port, use defaults if None |
3804 | + :returns: amqp message body as string. Raise if get fails. |
3805 | + """ |
3806 | + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, |
3807 | + port=port, |
3808 | + username=username, |
3809 | + password=password) |
3810 | + channel = connection.channel() |
3811 | + method_frame, _, body = channel.basic_get(queue) |
3812 | + |
3813 | + if method_frame: |
3814 | + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, |
3815 | + body)) |
3816 | + channel.basic_ack(method_frame.delivery_tag) |
3817 | + channel.close() |
3818 | + connection.close() |
3819 | + return body |
3820 | + else: |
3821 | + msg = 'No message retrieved.' |
3822 | + amulet.raise_status(amulet.FAIL, msg) |
3823 | |
3824 | === added file 'charmhelpers/contrib/openstack/context.py' |
3825 | --- charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000 |
3826 | +++ charmhelpers/contrib/openstack/context.py 2016-03-30 08:19:58 +0000 |
3827 | @@ -0,0 +1,1473 @@ |
3828 | +# Copyright 2014-2015 Canonical Limited. |
3829 | +# |
3830 | +# This file is part of charm-helpers. |
3831 | +# |
3832 | +# charm-helpers is free software: you can redistribute it and/or modify |
3833 | +# it under the terms of the GNU Lesser General Public License version 3 as |
3834 | +# published by the Free Software Foundation. |
3835 | +# |
3836 | +# charm-helpers is distributed in the hope that it will be useful, |
3837 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
3838 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
3839 | +# GNU Lesser General Public License for more details. |
3840 | +# |
3841 | +# You should have received a copy of the GNU Lesser General Public License |
3842 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
3843 | + |
3844 | +import glob |
3845 | +import json |
3846 | +import os |
3847 | +import re |
3848 | +import time |
3849 | +from base64 import b64decode |
3850 | +from subprocess import check_call |
3851 | + |
3852 | +import six |
3853 | +import yaml |
3854 | + |
3855 | +from charmhelpers.fetch import ( |
3856 | + apt_install, |
3857 | + filter_installed_packages, |
3858 | +) |
3859 | +from charmhelpers.core.hookenv import ( |
3860 | + config, |
3861 | + is_relation_made, |
3862 | + local_unit, |
3863 | + log, |
3864 | + relation_get, |
3865 | + relation_ids, |
3866 | + related_units, |
3867 | + relation_set, |
3868 | + unit_get, |
3869 | + unit_private_ip, |
3870 | + charm_name, |
3871 | + DEBUG, |
3872 | + INFO, |
3873 | + WARNING, |
3874 | + ERROR, |
3875 | +) |
3876 | + |
3877 | +from charmhelpers.core.sysctl import create as sysctl_create |
3878 | +from charmhelpers.core.strutils import bool_from_string |
3879 | + |
3880 | +from charmhelpers.core.host import ( |
3881 | + get_bond_master, |
3882 | + is_phy_iface, |
3883 | + list_nics, |
3884 | + get_nic_hwaddr, |
3885 | + mkdir, |
3886 | + write_file, |
3887 | + pwgen, |
3888 | +) |
3889 | +from charmhelpers.contrib.hahelpers.cluster import ( |
3890 | + determine_apache_port, |
3891 | + determine_api_port, |
3892 | + https, |
3893 | + is_clustered, |
3894 | +) |
3895 | +from charmhelpers.contrib.hahelpers.apache import ( |
3896 | + get_cert, |
3897 | + get_ca_cert, |
3898 | + install_ca_cert, |
3899 | +) |
3900 | +from charmhelpers.contrib.openstack.neutron import ( |
3901 | + neutron_plugin_attribute, |
3902 | + parse_data_port_mappings, |
3903 | +) |
3904 | +from charmhelpers.contrib.openstack.ip import ( |
3905 | + resolve_address, |
3906 | + INTERNAL, |
3907 | +) |
3908 | +from charmhelpers.contrib.network.ip import ( |
3909 | + get_address_in_network, |
3910 | + get_ipv4_addr, |
3911 | + get_ipv6_addr, |
3912 | + get_netmask_for_address, |
3913 | + format_ipv6_addr, |
3914 | + is_address_in_network, |
3915 | + is_bridge_member, |
3916 | +) |
3917 | +from charmhelpers.contrib.openstack.utils import get_host_ip |
3918 | +from charmhelpers.core.unitdata import kv |
3919 | + |
3920 | +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
3921 | +ADDRESS_TYPES = ['admin', 'internal', 'public'] |
3922 | + |
3923 | + |
3924 | +class OSContextError(Exception): |
3925 | + pass |
3926 | + |
3927 | + |
3928 | +def ensure_packages(packages): |
3929 | + """Install but do not upgrade required plugin packages.""" |
3930 | + required = filter_installed_packages(packages) |
3931 | + if required: |
3932 | + apt_install(required, fatal=True) |
3933 | + |
3934 | + |
3935 | +def context_complete(ctxt): |
3936 | + _missing = [] |
3937 | + for k, v in six.iteritems(ctxt): |
3938 | + if v is None or v == '': |
3939 | + _missing.append(k) |
3940 | + |
3941 | + if _missing: |
3942 | + log('Missing required data: %s' % ' '.join(_missing), level=INFO) |
3943 | + return False |
3944 | + |
3945 | + return True |
3946 | + |
3947 | + |
3948 | +def config_flags_parser(config_flags): |
3949 | + """Parses config flags string into dict. |
3950 | + |
3951 | + This parsing method supports a few different formats for the config |
3952 | + flag values to be parsed: |
3953 | + |
3954 | + 1. A string in the simple format of key=value pairs, with the possibility |
3955 | + of specifying multiple key value pairs within the same string. For |
3956 | + example, a string in the format of 'key1=value1, key2=value2' will |
3957 | + return a dict of: |
3958 | + |
3959 | + {'key1': 'value1', |
3960 | + 'key2': 'value2'}. |
3961 | + |
3962 | + 2. A string in the above format, but supporting a comma-delimited list |
3963 | + of values for the same key. For example, a string in the format of |
3964 | + 'key1=value1, key2=value3,value4,value5' will return a dict of: |
3965 | + |
3966 | + {'key1', 'value1', |
3967 | + 'key2', 'value2,value3,value4'} |
3968 | + |
3969 | + 3. A string containing a colon character (:) prior to an equal |
3970 | + character (=) will be treated as yaml and parsed as such. This can be |
3971 | + used to specify more complex key value pairs. For example, |
3972 | + a string in the format of 'key1: subkey1=value1, subkey2=value2' will |
3973 | + return a dict of: |
3974 | + |
3975 | + {'key1', 'subkey1=value1, subkey2=value2'} |
3976 | + |
3977 | + The provided config_flags string may be a list of comma-separated values |
3978 | + which themselves may be comma-separated list of values. |
3979 | + """ |
3980 | + # If we find a colon before an equals sign then treat it as yaml. |
3981 | + # Note: limit it to finding the colon first since this indicates assignment |
3982 | + # for inline yaml. |
3983 | + colon = config_flags.find(':') |
3984 | + equals = config_flags.find('=') |
3985 | + if colon > 0: |
3986 | + if colon < equals or equals < 0: |
3987 | + return yaml.safe_load(config_flags) |
3988 | + |
3989 | + if config_flags.find('==') >= 0: |
3990 | + log("config_flags is not in expected format (key=value)", level=ERROR) |
3991 | + raise OSContextError |
3992 | + |
3993 | + # strip the following from each value. |
3994 | + post_strippers = ' ,' |
3995 | + # we strip any leading/trailing '=' or ' ' from the string then |
3996 | + # split on '='. |
3997 | + split = config_flags.strip(' =').split('=') |
3998 | + limit = len(split) |
3999 | + flags = {} |
4000 | + for i in range(0, limit - 1): |
4001 | + current = split[i] |
4002 | + next = split[i + 1] |
4003 | + vindex = next.rfind(',') |
4004 | + if (i == limit - 2) or (vindex < 0): |
4005 | + value = next |
4006 | + else: |
4007 | + value = next[:vindex] |
4008 | + |
4009 | + if i == 0: |
4010 | + key = current |
4011 | + else: |
4012 | + # if this not the first entry, expect an embedded key. |
4013 | + index = current.rfind(',') |
4014 | + if index < 0: |
4015 | + log("Invalid config value(s) at index %s" % (i), level=ERROR) |
4016 | + raise OSContextError |
4017 | + key = current[index + 1:] |
4018 | + |
4019 | + # Add to collection. |
4020 | + flags[key.strip(post_strippers)] = value.rstrip(post_strippers) |
4021 | + |
4022 | + return flags |
4023 | + |
4024 | + |
4025 | +class OSContextGenerator(object): |
4026 | + """Base class for all context generators.""" |
4027 | + interfaces = [] |
4028 | + related = False |
4029 | + complete = False |
4030 | + missing_data = [] |
4031 | + |
4032 | + def __call__(self): |
4033 | + raise NotImplementedError |
4034 | + |
4035 | + def context_complete(self, ctxt): |
4036 | + """Check for missing data for the required context data. |
4037 | + Set self.missing_data if it exists and return False. |
4038 | + Set self.complete if no missing data and return True. |
4039 | + """ |
4040 | + # Fresh start |
4041 | + self.complete = False |
4042 | + self.missing_data = [] |
4043 | + for k, v in six.iteritems(ctxt): |
4044 | + if v is None or v == '': |
4045 | + if k not in self.missing_data: |
4046 | + self.missing_data.append(k) |
4047 | + |
4048 | + if self.missing_data: |
4049 | + self.complete = False |
4050 | + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) |
4051 | + else: |
4052 | + self.complete = True |
4053 | + return self.complete |
4054 | + |
4055 | + def get_related(self): |
4056 | + """Check if any of the context interfaces have relation ids. |
4057 | + Set self.related and return True if one of the interfaces |
4058 | + has relation ids. |
4059 | + """ |
4060 | + # Fresh start |
4061 | + self.related = False |
4062 | + try: |
4063 | + for interface in self.interfaces: |
4064 | + if relation_ids(interface): |
4065 | + self.related = True |
4066 | + return self.related |
4067 | + except AttributeError as e: |
4068 | + log("{} {}" |
4069 | + "".format(self, e), 'INFO') |
4070 | + return self.related |
4071 | + |
4072 | + |
4073 | +class SharedDBContext(OSContextGenerator): |
4074 | + interfaces = ['shared-db'] |
4075 | + |
4076 | + def __init__(self, |
4077 | + database=None, user=None, relation_prefix=None, ssl_dir=None): |
4078 | + """Allows inspecting relation for settings prefixed with |
4079 | + relation_prefix. This is useful for parsing access for multiple |
4080 | + databases returned via the shared-db interface (eg, nova_password, |
4081 | + quantum_password) |
4082 | + """ |
4083 | + self.relation_prefix = relation_prefix |
4084 | + self.database = database |
4085 | + self.user = user |
4086 | + self.ssl_dir = ssl_dir |
4087 | + self.rel_name = self.interfaces[0] |
4088 | + |
4089 | + def __call__(self): |
4090 | + self.database = self.database or config('database') |
4091 | + self.user = self.user or config('database-user') |
4092 | + if None in [self.database, self.user]: |
4093 | + log("Could not generate shared_db context. Missing required charm " |
4094 | + "config options. (database name and user)", level=ERROR) |
4095 | + raise OSContextError |
4096 | + |
4097 | + ctxt = {} |
4098 | + |
4099 | + # NOTE(jamespage) if mysql charm provides a network upon which |
4100 | + # access to the database should be made, reconfigure relation |
4101 | + # with the service units local address and defer execution |
4102 | + access_network = relation_get('access-network') |
4103 | + if access_network is not None: |
4104 | + if self.relation_prefix is not None: |
4105 | + hostname_key = "{}_hostname".format(self.relation_prefix) |
4106 | + else: |
4107 | + hostname_key = "hostname" |
4108 | + access_hostname = get_address_in_network(access_network, |
4109 | + unit_get('private-address')) |
4110 | + set_hostname = relation_get(attribute=hostname_key, |
4111 | + unit=local_unit()) |
4112 | + if set_hostname != access_hostname: |
4113 | + relation_set(relation_settings={hostname_key: access_hostname}) |
4114 | + return None # Defer any further hook execution for now.... |
4115 | + |
4116 | + password_setting = 'password' |
4117 | + if self.relation_prefix: |
4118 | + password_setting = self.relation_prefix + '_password' |
4119 | + |
4120 | + for rid in relation_ids(self.interfaces[0]): |
4121 | + self.related = True |
4122 | + for unit in related_units(rid): |
4123 | + rdata = relation_get(rid=rid, unit=unit) |
4124 | + host = rdata.get('db_host') |
4125 | + host = format_ipv6_addr(host) or host |
4126 | + ctxt = { |
4127 | + 'database_host': host, |
4128 | + 'database': self.database, |
4129 | + 'database_user': self.user, |
4130 | + 'database_password': rdata.get(password_setting), |
4131 | + 'database_type': 'mysql' |
4132 | + } |
4133 | + if self.context_complete(ctxt): |
4134 | + db_ssl(rdata, ctxt, self.ssl_dir) |
4135 | + return ctxt |
4136 | + return {} |
4137 | + |
4138 | + |
4139 | +class PostgresqlDBContext(OSContextGenerator): |
4140 | + interfaces = ['pgsql-db'] |
4141 | + |
4142 | + def __init__(self, database=None): |
4143 | + self.database = database |
4144 | + |
4145 | + def __call__(self): |
4146 | + self.database = self.database or config('database') |
4147 | + if self.database is None: |
4148 | + log('Could not generate postgresql_db context. Missing required ' |
4149 | + 'charm config options. (database name)', level=ERROR) |
4150 | + raise OSContextError |
4151 | + |
4152 | + ctxt = {} |
4153 | + for rid in relation_ids(self.interfaces[0]): |
4154 | + self.related = True |
4155 | + for unit in related_units(rid): |
4156 | + rel_host = relation_get('host', rid=rid, unit=unit) |
4157 | + rel_user = relation_get('user', rid=rid, unit=unit) |
4158 | + rel_passwd = relation_get('password', rid=rid, unit=unit) |
4159 | + ctxt = {'database_host': rel_host, |
4160 | + 'database': self.database, |
4161 | + 'database_user': rel_user, |
4162 | + 'database_password': rel_passwd, |
4163 | + 'database_type': 'postgresql'} |
4164 | + if self.context_complete(ctxt): |
4165 | + return ctxt |
4166 | + |
4167 | + return {} |
4168 | + |
4169 | + |
4170 | +def db_ssl(rdata, ctxt, ssl_dir): |
4171 | + if 'ssl_ca' in rdata and ssl_dir: |
4172 | + ca_path = os.path.join(ssl_dir, 'db-client.ca') |
4173 | + with open(ca_path, 'w') as fh: |
4174 | + fh.write(b64decode(rdata['ssl_ca'])) |
4175 | + |
4176 | + ctxt['database_ssl_ca'] = ca_path |
4177 | + elif 'ssl_ca' in rdata: |
4178 | + log("Charm not setup for ssl support but ssl ca found", level=INFO) |
4179 | + return ctxt |
4180 | + |
4181 | + if 'ssl_cert' in rdata: |
4182 | + cert_path = os.path.join( |
4183 | + ssl_dir, 'db-client.cert') |
4184 | + if not os.path.exists(cert_path): |
4185 | + log("Waiting 1m for ssl client cert validity", level=INFO) |
4186 | + time.sleep(60) |
4187 | + |
4188 | + with open(cert_path, 'w') as fh: |
4189 | + fh.write(b64decode(rdata['ssl_cert'])) |
4190 | + |
4191 | + ctxt['database_ssl_cert'] = cert_path |
4192 | + key_path = os.path.join(ssl_dir, 'db-client.key') |
4193 | + with open(key_path, 'w') as fh: |
4194 | + fh.write(b64decode(rdata['ssl_key'])) |
4195 | + |
4196 | + ctxt['database_ssl_key'] = key_path |
4197 | + |
4198 | + return ctxt |
4199 | + |
4200 | + |
4201 | +class IdentityServiceContext(OSContextGenerator): |
4202 | + |
4203 | + def __init__(self, service=None, service_user=None, rel_name='identity-service'): |
4204 | + self.service = service |
4205 | + self.service_user = service_user |
4206 | + self.rel_name = rel_name |
4207 | + self.interfaces = [self.rel_name] |
4208 | + |
4209 | + def __call__(self): |
4210 | + log('Generating template context for ' + self.rel_name, level=DEBUG) |
4211 | + ctxt = {} |
4212 | + |
4213 | + if self.service and self.service_user: |
4214 | + # This is required for pki token signing if we don't want /tmp to |
4215 | + # be used. |
4216 | + cachedir = '/var/cache/%s' % (self.service) |
4217 | + if not os.path.isdir(cachedir): |
4218 | + log("Creating service cache dir %s" % (cachedir), level=DEBUG) |
4219 | + mkdir(path=cachedir, owner=self.service_user, |
4220 | + group=self.service_user, perms=0o700) |
4221 | + |
4222 | + ctxt['signing_dir'] = cachedir |
4223 | + |
4224 | + for rid in relation_ids(self.rel_name): |
4225 | + self.related = True |
4226 | + for unit in related_units(rid): |
4227 | + rdata = relation_get(rid=rid, unit=unit) |
4228 | + serv_host = rdata.get('service_host') |
4229 | + serv_host = format_ipv6_addr(serv_host) or serv_host |
4230 | + auth_host = rdata.get('auth_host') |
4231 | + auth_host = format_ipv6_addr(auth_host) or auth_host |
4232 | + svc_protocol = rdata.get('service_protocol') or 'http' |
4233 | + auth_protocol = rdata.get('auth_protocol') or 'http' |
4234 | + ctxt.update({'service_port': rdata.get('service_port'), |
4235 | + 'service_host': serv_host, |
4236 | + 'auth_host': auth_host, |
4237 | + 'auth_port': rdata.get('auth_port'), |
4238 | + 'admin_tenant_name': rdata.get('service_tenant'), |
4239 | + 'admin_user': rdata.get('service_username'), |
4240 | + 'admin_password': rdata.get('service_password'), |
4241 | + 'service_protocol': svc_protocol, |
4242 | + 'auth_protocol': auth_protocol}) |
4243 | + |
4244 | + if self.context_complete(ctxt): |
4245 | + # NOTE(jamespage) this is required for >= icehouse |
4246 | + # so a missing value just indicates keystone needs |
4247 | + # upgrading |
4248 | + ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') |
4249 | + return ctxt |
4250 | + |
4251 | + return {} |
4252 | + |
4253 | + |
4254 | +class AMQPContext(OSContextGenerator): |
4255 | + |
4256 | + def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
4257 | + self.ssl_dir = ssl_dir |
4258 | + self.rel_name = rel_name |
4259 | + self.relation_prefix = relation_prefix |
4260 | + self.interfaces = [rel_name] |
4261 | + |
4262 | + def __call__(self): |
4263 | + log('Generating template context for amqp', level=DEBUG) |
4264 | + conf = config() |
4265 | + if self.relation_prefix: |
4266 | + user_setting = '%s-rabbit-user' % (self.relation_prefix) |
4267 | + vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) |
4268 | + else: |
4269 | + user_setting = 'rabbit-user' |
4270 | + vhost_setting = 'rabbit-vhost' |
4271 | + |
4272 | + try: |
4273 | + username = conf[user_setting] |
4274 | + vhost = conf[vhost_setting] |
4275 | + except KeyError as e: |
4276 | + log('Could not generate shared_db context. Missing required charm ' |
4277 | + 'config options: %s.' % e, level=ERROR) |
4278 | + raise OSContextError |
4279 | + |
4280 | + ctxt = {} |
4281 | + for rid in relation_ids(self.rel_name): |
4282 | + ha_vip_only = False |
4283 | + self.related = True |
4284 | + for unit in related_units(rid): |
4285 | + if relation_get('clustered', rid=rid, unit=unit): |
4286 | + ctxt['clustered'] = True |
4287 | + vip = relation_get('vip', rid=rid, unit=unit) |
4288 | + vip = format_ipv6_addr(vip) or vip |
4289 | + ctxt['rabbitmq_host'] = vip |
4290 | + else: |
4291 | + host = relation_get('private-address', rid=rid, unit=unit) |
4292 | + host = format_ipv6_addr(host) or host |
4293 | + ctxt['rabbitmq_host'] = host |
4294 | + |
4295 | + ctxt.update({ |
4296 | + 'rabbitmq_user': username, |
4297 | + 'rabbitmq_password': relation_get('password', rid=rid, |
4298 | + unit=unit), |
4299 | + 'rabbitmq_virtual_host': vhost, |
4300 | + }) |
4301 | + |
4302 | + ssl_port = relation_get('ssl_port', rid=rid, unit=unit) |
4303 | + if ssl_port: |
4304 | + ctxt['rabbit_ssl_port'] = ssl_port |
4305 | + |
4306 | + ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) |
4307 | + if ssl_ca: |
4308 | + ctxt['rabbit_ssl_ca'] = ssl_ca |
4309 | + |
4310 | + if relation_get('ha_queues', rid=rid, unit=unit) is not None: |
4311 | + ctxt['rabbitmq_ha_queues'] = True |
4312 | + |
4313 | + ha_vip_only = relation_get('ha-vip-only', |
4314 | + rid=rid, unit=unit) is not None |
4315 | + |
4316 | + if self.context_complete(ctxt): |
4317 | + if 'rabbit_ssl_ca' in ctxt: |
4318 | + if not self.ssl_dir: |
4319 | + log("Charm not setup for ssl support but ssl ca " |
4320 | + "found", level=INFO) |
4321 | + break |
4322 | + |
4323 | + ca_path = os.path.join( |
4324 | + self.ssl_dir, 'rabbit-client-ca.pem') |
4325 | + with open(ca_path, 'w') as fh: |
4326 | + fh.write(b64decode(ctxt['rabbit_ssl_ca'])) |
4327 | + ctxt['rabbit_ssl_ca'] = ca_path |
4328 | + |
4329 | + # Sufficient information found = break out! |
4330 | + break |
4331 | + |
4332 | + # Used for active/active rabbitmq >= grizzly |
4333 | + if (('clustered' not in ctxt or ha_vip_only) and |
4334 | + len(related_units(rid)) > 1): |
4335 | + rabbitmq_hosts = [] |
4336 | + for unit in related_units(rid): |
4337 | + host = relation_get('private-address', rid=rid, unit=unit) |
4338 | + host = format_ipv6_addr(host) or host |
4339 | + rabbitmq_hosts.append(host) |
4340 | + |
4341 | + ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) |
4342 | + |
4343 | + oslo_messaging_flags = conf.get('oslo-messaging-flags', None) |
4344 | + if oslo_messaging_flags: |
4345 | + ctxt['oslo_messaging_flags'] = config_flags_parser( |
4346 | + oslo_messaging_flags) |
4347 | + |
4348 | + if not self.complete: |
4349 | + return {} |
4350 | + |
4351 | + return ctxt |
4352 | + |
4353 | + |
4354 | +class CephContext(OSContextGenerator): |
4355 | + """Generates context for /etc/ceph/ceph.conf templates.""" |
4356 | + interfaces = ['ceph'] |
4357 | + |
4358 | + def __call__(self): |
4359 | + if not relation_ids('ceph'): |
4360 | + return {} |
4361 | + |
4362 | + log('Generating template context for ceph', level=DEBUG) |
4363 | + mon_hosts = [] |
4364 | + ctxt = { |
4365 | + 'use_syslog': str(config('use-syslog')).lower() |
4366 | + } |
4367 | + for rid in relation_ids('ceph'): |
4368 | + for unit in related_units(rid): |
4369 | + if not ctxt.get('auth'): |
4370 | + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) |
4371 | + if not ctxt.get('key'): |
4372 | + ctxt['key'] = relation_get('key', rid=rid, unit=unit) |
4373 | + ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
4374 | + unit=unit) |
4375 | + unit_priv_addr = relation_get('private-address', rid=rid, |
4376 | + unit=unit) |
4377 | + ceph_addr = ceph_pub_addr or unit_priv_addr |
4378 | + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
4379 | + mon_hosts.append(ceph_addr) |
4380 | + |
4381 | + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) |
4382 | + |
4383 | + if not os.path.isdir('/etc/ceph'): |
4384 | + os.mkdir('/etc/ceph') |
4385 | + |
4386 | + if not self.context_complete(ctxt): |
4387 | + return {} |
4388 | + |
4389 | + ensure_packages(['ceph-common']) |
4390 | + return ctxt |
4391 | + |
4392 | + |
4393 | +class HAProxyContext(OSContextGenerator): |
4394 | + """Provides half a context for the haproxy template, which describes |
4395 | + all peers to be included in the cluster. Each charm needs to include |
4396 | + its own context generator that describes the port mapping. |
4397 | + """ |
4398 | + interfaces = ['cluster'] |
4399 | + |
4400 | + def __init__(self, singlenode_mode=False): |
4401 | + self.singlenode_mode = singlenode_mode |
4402 | + |
4403 | + def __call__(self): |
4404 | + if not relation_ids('cluster') and not self.singlenode_mode: |
4405 | + return {} |
4406 | + |
4407 | + if config('prefer-ipv6'): |
4408 | + addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
4409 | + else: |
4410 | + addr = get_host_ip(unit_get('private-address')) |
4411 | + |
4412 | + l_unit = local_unit().replace('/', '-') |
4413 | + cluster_hosts = {} |
4414 | + |
4415 | + # NOTE(jamespage): build out map of configured network endpoints |
4416 | + # and associated backends |
4417 | + for addr_type in ADDRESS_TYPES: |
4418 | + cfg_opt = 'os-{}-network'.format(addr_type) |
4419 | + laddr = get_address_in_network(config(cfg_opt)) |
4420 | + if laddr: |
4421 | + netmask = get_netmask_for_address(laddr) |
4422 | + cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, |
4423 | + netmask), |
4424 | + 'backends': {l_unit: laddr}} |
4425 | + for rid in relation_ids('cluster'): |
4426 | + for unit in related_units(rid): |
4427 | + _laddr = relation_get('{}-address'.format(addr_type), |
4428 | + rid=rid, unit=unit) |
4429 | + if _laddr: |
4430 | + _unit = unit.replace('/', '-') |
4431 | + cluster_hosts[laddr]['backends'][_unit] = _laddr |
4432 | + |
4433 | + # NOTE(jamespage) add backend based on private address - this |
4434 | + # with either be the only backend or the fallback if no acls |
4435 | + # match in the frontend |
4436 | + cluster_hosts[addr] = {} |
4437 | + netmask = get_netmask_for_address(addr) |
4438 | + cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), |
4439 | + 'backends': {l_unit: addr}} |
4440 | + for rid in relation_ids('cluster'): |
4441 | + for unit in related_units(rid): |
4442 | + _laddr = relation_get('private-address', |
4443 | + rid=rid, unit=unit) |
4444 | + if _laddr: |
4445 | + _unit = unit.replace('/', '-') |
4446 | + cluster_hosts[addr]['backends'][_unit] = _laddr |
4447 | + |
4448 | + ctxt = { |
4449 | + 'frontends': cluster_hosts, |
4450 | + 'default_backend': addr |
4451 | + } |
4452 | + |
4453 | + if config('haproxy-server-timeout'): |
4454 | + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') |
4455 | + |
4456 | + if config('haproxy-client-timeout'): |
4457 | + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
4458 | + |
4459 | + if config('haproxy-queue-timeout'): |
4460 | + ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout') |
4461 | + |
4462 | + if config('haproxy-connect-timeout'): |
4463 | + ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout') |
4464 | + |
4465 | + if config('prefer-ipv6'): |
4466 | + ctxt['ipv6'] = True |
4467 | + ctxt['local_host'] = 'ip6-localhost' |
4468 | + ctxt['haproxy_host'] = '::' |
4469 | + else: |
4470 | + ctxt['local_host'] = '127.0.0.1' |
4471 | + ctxt['haproxy_host'] = '0.0.0.0' |
4472 | + |
4473 | + ctxt['stat_port'] = '8888' |
4474 | + |
4475 | + db = kv() |
4476 | + ctxt['stat_password'] = db.get('stat-password') |
4477 | + if not ctxt['stat_password']: |
4478 | + ctxt['stat_password'] = db.set('stat-password', |
4479 | + pwgen(32)) |
4480 | + db.flush() |
4481 | + |
4482 | + for frontend in cluster_hosts: |
4483 | + if (len(cluster_hosts[frontend]['backends']) > 1 or |
4484 | + self.singlenode_mode): |
4485 | + # Enable haproxy when we have enough peers. |
4486 | + log('Ensuring haproxy enabled in /etc/default/haproxy.', |
4487 | + level=DEBUG) |
4488 | + with open('/etc/default/haproxy', 'w') as out: |
4489 | + out.write('ENABLED=1\n') |
4490 | + |
4491 | + return ctxt |
4492 | + |
4493 | + log('HAProxy context is incomplete, this unit has no peers.', |
4494 | + level=INFO) |
4495 | + return {} |
4496 | + |
4497 | + |
4498 | +class ImageServiceContext(OSContextGenerator): |
4499 | + interfaces = ['image-service'] |
4500 | + |
4501 | + def __call__(self): |
4502 | + """Obtains the glance API server from the image-service relation. |
4503 | + Useful in nova and cinder (currently). |
4504 | + """ |
4505 | + log('Generating template context for image-service.', level=DEBUG) |
4506 | + rids = relation_ids('image-service') |
4507 | + if not rids: |
4508 | + return {} |
4509 | + |
4510 | + for rid in rids: |
4511 | + for unit in related_units(rid): |
4512 | + api_server = relation_get('glance-api-server', |
4513 | + rid=rid, unit=unit) |
4514 | + if api_server: |
4515 | + return {'glance_api_servers': api_server} |
4516 | + |
4517 | + log("ImageService context is incomplete. Missing required relation " |
4518 | + "data.", level=INFO) |
4519 | + return {} |
4520 | + |
4521 | + |
4522 | +class ApacheSSLContext(OSContextGenerator): |
4523 | + """Generates a context for an apache vhost configuration that configures |
4524 | + HTTPS reverse proxying for one or many endpoints. Generated context |
4525 | + looks something like:: |
4526 | + |
4527 | + { |
4528 | + 'namespace': 'cinder', |
4529 | + 'private_address': 'iscsi.mycinderhost.com', |
4530 | + 'endpoints': [(8776, 8766), (8777, 8767)] |
4531 | + } |
4532 | + |
4533 | + The endpoints list consists of a tuples mapping external ports |
4534 | + to internal ports. |
4535 | + """ |
4536 | + interfaces = ['https'] |
4537 | + |
4538 | + # charms should inherit this context and set external ports |
4539 | + # and service namespace accordingly. |
4540 | + external_ports = [] |
4541 | + service_namespace = None |
4542 | + |
4543 | + def enable_modules(self): |
4544 | + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] |
4545 | + check_call(cmd) |
4546 | + |
4547 | + def configure_cert(self, cn=None): |
4548 | + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) |
4549 | + mkdir(path=ssl_dir) |
4550 | + cert, key = get_cert(cn) |
4551 | + if cn: |
4552 | + cert_filename = 'cert_{}'.format(cn) |
4553 | + key_filename = 'key_{}'.format(cn) |
4554 | + else: |
4555 | + cert_filename = 'cert' |
4556 | + key_filename = 'key' |
4557 | + |
4558 | + write_file(path=os.path.join(ssl_dir, cert_filename), |
4559 | + content=b64decode(cert)) |
4560 | + write_file(path=os.path.join(ssl_dir, key_filename), |
4561 | + content=b64decode(key)) |
4562 | + |
4563 | + def configure_ca(self): |
4564 | + ca_cert = get_ca_cert() |
4565 | + if ca_cert: |
4566 | + install_ca_cert(b64decode(ca_cert)) |
4567 | + |
4568 | + def canonical_names(self): |
4569 | + """Figure out which canonical names clients will access this service. |
4570 | + """ |
4571 | + cns = [] |
4572 | + for r_id in relation_ids('identity-service'): |
4573 | + for unit in related_units(r_id): |
4574 | + rdata = relation_get(rid=r_id, unit=unit) |
4575 | + for k in rdata: |
4576 | + if k.startswith('ssl_key_'): |
4577 | + cns.append(k.lstrip('ssl_key_')) |
4578 | + |
4579 | + return sorted(list(set(cns))) |
4580 | + |
4581 | + def get_network_addresses(self): |
4582 | + """For each network configured, return corresponding address and vip |
4583 | + (if available). |
4584 | + |
4585 | + Returns a list of tuples of the form: |
4586 | + |
4587 | + [(address_in_net_a, vip_in_net_a), |
4588 | + (address_in_net_b, vip_in_net_b), |
4589 | + ...] |
4590 | + |
4591 | + or, if no vip(s) available: |
4592 | + |
4593 | + [(address_in_net_a, address_in_net_a), |
4594 | + (address_in_net_b, address_in_net_b), |
4595 | + ...] |
4596 | + """ |
4597 | + addresses = [] |
4598 | + if config('vip'): |
4599 | + vips = config('vip').split() |
4600 | + else: |
4601 | + vips = [] |
4602 | + |
4603 | + for net_type in ['os-internal-network', 'os-admin-network', |
4604 | + 'os-public-network']: |
4605 | + addr = get_address_in_network(config(net_type), |
4606 | + unit_get('private-address')) |
4607 | + if len(vips) > 1 and is_clustered(): |
4608 | + if not config(net_type): |
4609 | + log("Multiple networks configured but net_type " |
4610 | + "is None (%s)." % net_type, level=WARNING) |
4611 | + continue |
4612 | + |
4613 | + for vip in vips: |
4614 | + if is_address_in_network(config(net_type), vip): |
4615 | + addresses.append((addr, vip)) |
4616 | + break |
4617 | + |
4618 | + elif is_clustered() and config('vip'): |
4619 | + addresses.append((addr, config('vip'))) |
4620 | + else: |
4621 | + addresses.append((addr, addr)) |
4622 | + |
4623 | + return sorted(addresses) |
4624 | + |
4625 | + def __call__(self): |
4626 | + if isinstance(self.external_ports, six.string_types): |
4627 | + self.external_ports = [self.external_ports] |
4628 | + |
4629 | + if not self.external_ports or not https(): |
4630 | + return {} |
4631 | + |
4632 | + self.configure_ca() |
4633 | + self.enable_modules() |
4634 | + |
4635 | + ctxt = {'namespace': self.service_namespace, |
4636 | + 'endpoints': [], |
4637 | + 'ext_ports': []} |
4638 | + |
4639 | + cns = self.canonical_names() |
4640 | + if cns: |
4641 | + for cn in cns: |
4642 | + self.configure_cert(cn) |
4643 | + else: |
4644 | + # Expect cert/key provided in config (currently assumed that ca |
4645 | + # uses ip for cn) |
4646 | + cn = resolve_address(endpoint_type=INTERNAL) |
4647 | + self.configure_cert(cn) |
4648 | + |
4649 | + addresses = self.get_network_addresses() |
4650 | + for address, endpoint in sorted(set(addresses)): |
4651 | + for api_port in self.external_ports: |
4652 | + ext_port = determine_apache_port(api_port, |
4653 | + singlenode_mode=True) |
4654 | + int_port = determine_api_port(api_port, singlenode_mode=True) |
4655 | + portmap = (address, endpoint, int(ext_port), int(int_port)) |
4656 | + ctxt['endpoints'].append(portmap) |
4657 | + ctxt['ext_ports'].append(int(ext_port)) |
4658 | + |
4659 | + ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) |
4660 | + return ctxt |
4661 | + |
4662 | + |
4663 | +class NeutronContext(OSContextGenerator): |
4664 | + interfaces = [] |
4665 | + |
4666 | + @property |
4667 | + def plugin(self): |
4668 | + return None |
4669 | + |
4670 | + @property |
4671 | + def network_manager(self): |
4672 | + return None |
4673 | + |
4674 | + @property |
4675 | + def packages(self): |
4676 | + return neutron_plugin_attribute(self.plugin, 'packages', |
4677 | + self.network_manager) |
4678 | + |
4679 | + @property |
4680 | + def neutron_security_groups(self): |
4681 | + return None |
4682 | + |
4683 | + def _ensure_packages(self): |
4684 | + for pkgs in self.packages: |
4685 | + ensure_packages(pkgs) |
4686 | + |
4687 | + def _save_flag_file(self): |
4688 | + if self.network_manager == 'quantum': |
4689 | + _file = '/etc/nova/quantum_plugin.conf' |
4690 | + else: |
4691 | + _file = '/etc/nova/neutron_plugin.conf' |
4692 | + |
4693 | + with open(_file, 'wb') as out: |
4694 | + out.write(self.plugin + '\n') |
4695 | + |
4696 | + def ovs_ctxt(self): |
4697 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4698 | + self.network_manager) |
4699 | + config = neutron_plugin_attribute(self.plugin, 'config', |
4700 | + self.network_manager) |
4701 | + ovs_ctxt = {'core_plugin': driver, |
4702 | + 'neutron_plugin': 'ovs', |
4703 | + 'neutron_security_groups': self.neutron_security_groups, |
4704 | + 'local_ip': unit_private_ip(), |
4705 | + 'config': config} |
4706 | + |
4707 | + return ovs_ctxt |
4708 | + |
4709 | + def nuage_ctxt(self): |
4710 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4711 | + self.network_manager) |
4712 | + config = neutron_plugin_attribute(self.plugin, 'config', |
4713 | + self.network_manager) |
4714 | + nuage_ctxt = {'core_plugin': driver, |
4715 | + 'neutron_plugin': 'vsp', |
4716 | + 'neutron_security_groups': self.neutron_security_groups, |
4717 | + 'local_ip': unit_private_ip(), |
4718 | + 'config': config} |
4719 | + |
4720 | + return nuage_ctxt |
4721 | + |
4722 | + def nvp_ctxt(self): |
4723 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4724 | + self.network_manager) |
4725 | + config = neutron_plugin_attribute(self.plugin, 'config', |
4726 | + self.network_manager) |
4727 | + nvp_ctxt = {'core_plugin': driver, |
4728 | + 'neutron_plugin': 'nvp', |
4729 | + 'neutron_security_groups': self.neutron_security_groups, |
4730 | + 'local_ip': unit_private_ip(), |
4731 | + 'config': config} |
4732 | + |
4733 | + return nvp_ctxt |
4734 | + |
4735 | + def n1kv_ctxt(self): |
4736 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4737 | + self.network_manager) |
4738 | + n1kv_config = neutron_plugin_attribute(self.plugin, 'config', |
4739 | + self.network_manager) |
4740 | + n1kv_user_config_flags = config('n1kv-config-flags') |
4741 | + restrict_policy_profiles = config('n1kv-restrict-policy-profiles') |
4742 | + n1kv_ctxt = {'core_plugin': driver, |
4743 | + 'neutron_plugin': 'n1kv', |
4744 | + 'neutron_security_groups': self.neutron_security_groups, |
4745 | + 'local_ip': unit_private_ip(), |
4746 | + 'config': n1kv_config, |
4747 | + 'vsm_ip': config('n1kv-vsm-ip'), |
4748 | + 'vsm_username': config('n1kv-vsm-username'), |
4749 | + 'vsm_password': config('n1kv-vsm-password'), |
4750 | + 'restrict_policy_profiles': restrict_policy_profiles} |
4751 | + |
4752 | + if n1kv_user_config_flags: |
4753 | + flags = config_flags_parser(n1kv_user_config_flags) |
4754 | + n1kv_ctxt['user_config_flags'] = flags |
4755 | + |
4756 | + return n1kv_ctxt |
4757 | + |
4758 | + def calico_ctxt(self): |
4759 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4760 | + self.network_manager) |
4761 | + config = neutron_plugin_attribute(self.plugin, 'config', |
4762 | + self.network_manager) |
4763 | + calico_ctxt = {'core_plugin': driver, |
4764 | + 'neutron_plugin': 'Calico', |
4765 | + 'neutron_security_groups': self.neutron_security_groups, |
4766 | + 'local_ip': unit_private_ip(), |
4767 | + 'config': config} |
4768 | + |
4769 | + return calico_ctxt |
4770 | + |
4771 | + def neutron_ctxt(self): |
4772 | + if https(): |
4773 | + proto = 'https' |
4774 | + else: |
4775 | + proto = 'http' |
4776 | + |
4777 | + if is_clustered(): |
4778 | + host = config('vip') |
4779 | + else: |
4780 | + host = unit_get('private-address') |
4781 | + |
4782 | + ctxt = {'network_manager': self.network_manager, |
4783 | + 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
4784 | + return ctxt |
4785 | + |
4786 | + def pg_ctxt(self): |
4787 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4788 | + self.network_manager) |
4789 | + config = neutron_plugin_attribute(self.plugin, 'config', |
4790 | + self.network_manager) |
4791 | + ovs_ctxt = {'core_plugin': driver, |
4792 | + 'neutron_plugin': 'plumgrid', |
4793 | + 'neutron_security_groups': self.neutron_security_groups, |
4794 | + 'local_ip': unit_private_ip(), |
4795 | + 'config': config} |
4796 | + return ovs_ctxt |
4797 | + |
4798 | + def midonet_ctxt(self): |
4799 | + driver = neutron_plugin_attribute(self.plugin, 'driver', |
4800 | + self.network_manager) |
4801 | + midonet_config = neutron_plugin_attribute(self.plugin, 'config', |
4802 | + self.network_manager) |
4803 | + mido_ctxt = {'core_plugin': driver, |
4804 | + 'neutron_plugin': 'midonet', |
4805 | + 'neutron_security_groups': self.neutron_security_groups, |
4806 | + 'local_ip': unit_private_ip(), |
4807 | + 'config': midonet_config} |
4808 | + |
4809 | + return mido_ctxt |
4810 | + |
4811 | + def __call__(self): |
4812 | + if self.network_manager not in ['quantum', 'neutron']: |
4813 | + return {} |
4814 | + |
4815 | + if not self.plugin: |
4816 | + return {} |
4817 | + |
4818 | + ctxt = self.neutron_ctxt() |
4819 | + |
4820 | + if self.plugin == 'ovs': |
4821 | + ctxt.update(self.ovs_ctxt()) |
4822 | + elif self.plugin in ['nvp', 'nsx']: |
4823 | + ctxt.update(self.nvp_ctxt()) |
4824 | + elif self.plugin == 'n1kv': |
4825 | + ctxt.update(self.n1kv_ctxt()) |
4826 | + elif self.plugin == 'Calico': |
4827 | + ctxt.update(self.calico_ctxt()) |
4828 | + elif self.plugin == 'vsp': |
4829 | + ctxt.update(self.nuage_ctxt()) |
4830 | + elif self.plugin == 'plumgrid': |
4831 | + ctxt.update(self.pg_ctxt()) |
4832 | + elif self.plugin == 'midonet': |
4833 | + ctxt.update(self.midonet_ctxt()) |
4834 | + |
4835 | + alchemy_flags = config('neutron-alchemy-flags') |
4836 | + if alchemy_flags: |
4837 | + flags = config_flags_parser(alchemy_flags) |
4838 | + ctxt['neutron_alchemy_flags'] = flags |
4839 | + |
4840 | + self._save_flag_file() |
4841 | + return ctxt |
4842 | + |
4843 | + |
4844 | +class NeutronPortContext(OSContextGenerator): |
4845 | + |
4846 | + def resolve_ports(self, ports): |
4847 | + """Resolve NICs not yet bound to bridge(s) |
4848 | + |
4849 | + If hwaddress provided then returns resolved hwaddress otherwise NIC. |
4850 | + """ |
4851 | + if not ports: |
4852 | + return None |
4853 | + |
4854 | + hwaddr_to_nic = {} |
4855 | + hwaddr_to_ip = {} |
4856 | + for nic in list_nics(): |
4857 | + # Ignore virtual interfaces (bond masters will be identified from |
4858 | + # their slaves) |
4859 | + if not is_phy_iface(nic): |
4860 | + continue |
4861 | + |
4862 | + _nic = get_bond_master(nic) |
4863 | + if _nic: |
4864 | + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), |
4865 | + level=DEBUG) |
4866 | + nic = _nic |
4867 | + |
4868 | + hwaddr = get_nic_hwaddr(nic) |
4869 | + hwaddr_to_nic[hwaddr] = nic |
4870 | + addresses = get_ipv4_addr(nic, fatal=False) |
4871 | + addresses += get_ipv6_addr(iface=nic, fatal=False) |
4872 | + hwaddr_to_ip[hwaddr] = addresses |
4873 | + |
4874 | + resolved = [] |
4875 | + mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) |
4876 | + for entry in ports: |
4877 | + if re.match(mac_regex, entry): |
4878 | + # NIC is in known NICs and does NOT hace an IP address |
4879 | + if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: |
4880 | + # If the nic is part of a bridge then don't use it |
4881 | + if is_bridge_member(hwaddr_to_nic[entry]): |
4882 | + continue |
4883 | + |
4884 | + # Entry is a MAC address for a valid interface that doesn't |
4885 | + # have an IP address assigned yet. |
4886 | + resolved.append(hwaddr_to_nic[entry]) |
4887 | + else: |
4888 | + # If the passed entry is not a MAC address, assume it's a valid |
4889 | + # interface, and that the user put it there on purpose (we can |
4890 | + # trust it to be the real external network). |
4891 | + resolved.append(entry) |
4892 | + |
4893 | + # Ensure no duplicates |
4894 | + return list(set(resolved)) |
4895 | + |
4896 | + |
4897 | +class OSConfigFlagContext(OSContextGenerator): |
4898 | + """Provides support for user-defined config flags. |
4899 | + |
4900 | + Users can define a comma-seperated list of key=value pairs |
4901 | + in the charm configuration and apply them at any point in |
4902 | + any file by using a template flag. |
4903 | + |
4904 | + Sometimes users might want config flags inserted within a |
4905 | + specific section so this class allows users to specify the |
4906 | + template flag name, allowing for multiple template flags |
4907 | + (sections) within the same context. |
4908 | + |
4909 | + NOTE: the value of config-flags may be a comma-separated list of |
4910 | + key=value pairs and some Openstack config files support |
4911 | + comma-separated lists as values. |
4912 | + """ |
4913 | + |
4914 | + def __init__(self, charm_flag='config-flags', |
4915 | + template_flag='user_config_flags'): |
4916 | + """ |
4917 | + :param charm_flag: config flags in charm configuration. |
4918 | + :param template_flag: insert point for user-defined flags in template |
4919 | + file. |
4920 | + """ |
4921 | + super(OSConfigFlagContext, self).__init__() |
4922 | + self._charm_flag = charm_flag |
4923 | + self._template_flag = template_flag |
4924 | + |
4925 | + def __call__(self): |
4926 | + config_flags = config(self._charm_flag) |
4927 | + if not config_flags: |
4928 | + return {} |
4929 | + |
4930 | + return {self._template_flag: |
4931 | + config_flags_parser(config_flags)} |
4932 | + |
4933 | + |
4934 | +class LibvirtConfigFlagsContext(OSContextGenerator): |
4935 | + """ |
4936 | + This context provides support for extending |
4937 | + the libvirt section through user-defined flags. |
4938 | + """ |
4939 | + def __call__(self): |
4940 | + ctxt = {} |
4941 | + libvirt_flags = config('libvirt-flags') |
4942 | + if libvirt_flags: |
4943 | + ctxt['libvirt_flags'] = config_flags_parser( |
4944 | + libvirt_flags) |
4945 | + return ctxt |
4946 | + |
4947 | + |
4948 | +class SubordinateConfigContext(OSContextGenerator): |
4949 | + |
4950 | + """ |
4951 | + Responsible for inspecting relations to subordinates that |
4952 | + may be exporting required config via a json blob. |
4953 | + |
4954 | + The subordinate interface allows subordinates to export their |
4955 | + configuration requirements to the principle for multiple config |
4956 | + files and multiple serivces. Ie, a subordinate that has interfaces |
4957 | + to both glance and nova may export to following yaml blob as json:: |
4958 | + |
4959 | + glance: |
4960 | + /etc/glance/glance-api.conf: |
4961 | + sections: |
4962 | + DEFAULT: |
4963 | + - [key1, value1] |
4964 | + /etc/glance/glance-registry.conf: |
4965 | + MYSECTION: |
4966 | + - [key2, value2] |
4967 | + nova: |
4968 | + /etc/nova/nova.conf: |
4969 | + sections: |
4970 | + DEFAULT: |
4971 | + - [key3, value3] |
4972 | + |
4973 | + |
4974 | + It is then up to the principle charms to subscribe this context to |
4975 | + the service+config file it is interestd in. Configuration data will |
4976 | + be available in the template context, in glance's case, as:: |
4977 | + |
4978 | + ctxt = { |
4979 | + ... other context ... |
4980 | + 'subordinate_configuration': { |
4981 | + 'DEFAULT': { |
4982 | + 'key1': 'value1', |
4983 | + }, |
4984 | + 'MYSECTION': { |
4985 | + 'key2': 'value2', |
4986 | + }, |
4987 | + } |
4988 | + } |
4989 | + """ |
4990 | + |
4991 | + def __init__(self, service, config_file, interface): |
4992 | + """ |
4993 | + :param service : Service name key to query in any subordinate |
4994 | + data found |
4995 | + :param config_file : Service's config file to query sections |
4996 | + :param interface : Subordinate interface to inspect |
4997 | + """ |
4998 | + self.config_file = config_file |
4999 | + if isinstance(service, list): |
5000 | + self.services = service |
Hi Chris
Charm development is no longer undertaken under bzr branches on launchpad; please read:
https:/ /github. com/openstack- charmers/ openstack- community/ blob/master/ README. dev-charms. md
and re-target your change to the git repositories under the OpenStack project.