Merge lp:~1chb1n/charms/trusty/glance/next-amulet-update into lp:~openstack-charmers-archive/charms/trusty/glance/trunk
- Trusty Tahr (14.04)
- next-amulet-update
- Merge into trunk
Proposed by
Ryan Beisner
Status: | Superseded |
---|---|
Proposed branch: | lp:~1chb1n/charms/trusty/glance/next-amulet-update |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/glance/trunk |
Diff against target: |
3606 lines (+1746/-542) (has conflicts) 28 files modified
Makefile (+10/-15) README.md (+77/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+46/-2) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+6/-2) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+122/-3) hooks/charmhelpers/contrib/openstack/context.py (+1/-1) hooks/charmhelpers/contrib/openstack/neutron.py (+16/-9) hooks/charmhelpers/contrib/openstack/utils.py (+82/-22) hooks/charmhelpers/contrib/python/packages.py (+30/-5) hooks/charmhelpers/core/hookenv.py (+231/-38) hooks/charmhelpers/core/host.py (+25/-7) hooks/charmhelpers/core/services/base.py (+43/-19) hooks/charmhelpers/fetch/__init__.py (+1/-1) hooks/charmhelpers/fetch/giturl.py (+7/-5) hooks/glance_relations.py (+3/-3) hooks/glance_utils.py (+50/-4) metadata.yaml (+1/-1) tests/00-setup (+5/-1) tests/020-basic-trusty-liberty (+11/-0) tests/021-basic-wily-liberty (+9/-0) tests/README (+9/-0) tests/basic_deployment.py (+288/-326) tests/charmhelpers/contrib/amulet/utils.py (+228/-10) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+41/-5) tests/charmhelpers/contrib/openstack/amulet/utils.py (+358/-51) tests/tests.yaml (+18/-0) unit_tests/test_glance_relations.py (+11/-5) unit_tests/test_glance_utils.py (+17/-7) Text conflict in README.md Text conflict in hooks/charmhelpers/contrib/hahelpers/cluster.py Text conflict in tests/basic_deployment.py |
To merge this branch: | bzr merge lp:~1chb1n/charms/trusty/glance/next-amulet-update |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Corey Bryant | Pending | ||
Review via email: mp+263411@code.launchpad.net |
This proposal has been superseded by a proposal from 2015-06-30.
Commit message
Description of the change
Update amulet tests for Kilo, prep for wily. Sync hooks/charmhelpers; Sync tests/charmhelpers.
To post a comment you must log in.
- 124. By Ryan Beisner
-
update tests
- 125. By Ryan Beisner
-
update tags for consistency with other openstack charms
- 126. By Ryan Beisner
-
update tests for vivid-kilo
Unmerged revisions
- 126. By Ryan Beisner
-
update tests for vivid-kilo
- 125. By Ryan Beisner
-
update tags for consistency with other openstack charms
- 124. By Ryan Beisner
-
update tests
- 123. By Ryan Beisner
-
sync tests/charmhelpers
- 122. By Ryan Beisner
-
sync hooks/charmhelpers
- 121. By Liam Young
-
[corey.bryant, r=gnuoy] charmhelper sync
- 120. By Billy Olsen
-
[corey.
bryant, r=billy- olsen] Fix global requirements for git-deploy. - 119. By Corey Bryant
-
[billy-
olsen,r= corey.bryant] Provide support for user-specified public endpoint hostname. - 118. By James Page
-
Add support for leader-election
- 117. By James Page
-
Fixup glance-api template sections for Kilo release.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2015-04-16 21:32:02 +0000 | |||
3 | +++ Makefile 2015-06-30 20:18:04 +0000 | |||
4 | @@ -2,16 +2,18 @@ | |||
5 | 2 | PYTHON := /usr/bin/env python | 2 | PYTHON := /usr/bin/env python |
6 | 3 | 3 | ||
7 | 4 | lint: | 4 | lint: |
12 | 5 | @echo "Running flake8 tests: " | 5 | @flake8 --exclude hooks/charmhelpers,tests/charmhelpers \ |
13 | 6 | @flake8 --exclude hooks/charmhelpers actions hooks unit_tests tests | 6 | actions hooks unit_tests tests |
10 | 7 | @echo "OK" | ||
11 | 8 | @echo "Running charm proof: " | ||
14 | 9 | @charm proof | 7 | @charm proof |
15 | 10 | @echo "OK" | ||
16 | 11 | 8 | ||
18 | 12 | unit_test: | 9 | test: |
19 | 10 | @# Bundletester expects unit tests here. | ||
20 | 13 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | 11 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests |
21 | 14 | 12 | ||
22 | 13 | functional_test: | ||
23 | 14 | @echo Starting Amulet tests... | ||
24 | 15 | @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 | ||
25 | 16 | |||
26 | 15 | bin/charm_helpers_sync.py: | 17 | bin/charm_helpers_sync.py: |
27 | 16 | @mkdir -p bin | 18 | @mkdir -p bin |
28 | 17 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ | 19 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ |
29 | @@ -21,15 +23,8 @@ | |||
30 | 21 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml | 23 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml |
31 | 22 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml | 24 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml |
32 | 23 | 25 | ||
41 | 24 | test: | 26 | publish: lint test |
34 | 25 | @echo Starting Amulet tests... | ||
35 | 26 | # /!\ Note: The -v should only be temporary until Amulet sends | ||
36 | 27 | # raise_status() messages to stderr: | ||
37 | 28 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
38 | 29 | @juju test -v -p AMULET_HTTP_PROXY,AMULET_OS_VIP --timeout 2700 | ||
39 | 30 | |||
40 | 31 | publish: lint unit_test | ||
42 | 32 | bzr push lp:charms/glance | 27 | bzr push lp:charms/glance |
43 | 33 | bzr push lp:charms/trusty/glance | 28 | bzr push lp:charms/trusty/glance |
44 | 34 | 29 | ||
46 | 35 | all: unit_test lint | 30 | all: test lint |
47 | 36 | 31 | ||
48 | === modified file 'README.md' | |||
49 | --- README.md 2015-04-30 15:23:58 +0000 | |||
50 | +++ README.md 2015-06-30 20:18:04 +0000 | |||
51 | @@ -86,6 +86,7 @@ | |||
52 | 86 | 86 | ||
53 | 87 | The minimum openstack-origin-git config required to deploy from source is: | 87 | The minimum openstack-origin-git config required to deploy from source is: |
54 | 88 | 88 | ||
55 | 89 | <<<<<<< TREE | ||
56 | 89 | openstack-origin-git: include-file://glance-juno.yaml | 90 | openstack-origin-git: include-file://glance-juno.yaml |
57 | 90 | 91 | ||
58 | 91 | glance-juno.yaml | 92 | glance-juno.yaml |
59 | @@ -97,6 +98,18 @@ | |||
60 | 97 | - {name: glance, | 98 | - {name: glance, |
61 | 98 | repository: 'git://github.com/openstack/glance', | 99 | repository: 'git://github.com/openstack/glance', |
62 | 99 | branch: stable/juno} | 100 | branch: stable/juno} |
63 | 101 | ======= | ||
64 | 102 | openstack-origin-git: include-file://glance-juno.yaml | ||
65 | 103 | |||
66 | 104 | glance-juno.yaml | ||
67 | 105 | repositories: | ||
68 | 106 | - {name: requirements, | ||
69 | 107 | repository: 'git://github.com/openstack/requirements', | ||
70 | 108 | branch: stable/juno} | ||
71 | 109 | - {name: glance, | ||
72 | 110 | repository: 'git://github.com/openstack/glance', | ||
73 | 111 | branch: stable/juno} | ||
74 | 112 | >>>>>>> MERGE-SOURCE | ||
75 | 100 | 113 | ||
76 | 101 | Note that there are only two 'name' values the charm knows about: 'requirements' | 114 | Note that there are only two 'name' values the charm knows about: 'requirements' |
77 | 102 | and 'glance'. These repositories must correspond to these 'name' values. | 115 | and 'glance'. These repositories must correspond to these 'name' values. |
78 | @@ -106,6 +119,7 @@ | |||
79 | 106 | 119 | ||
80 | 107 | The following is a full list of current tip repos (may not be up-to-date): | 120 | The following is a full list of current tip repos (may not be up-to-date): |
81 | 108 | 121 | ||
82 | 122 | <<<<<<< TREE | ||
83 | 109 | openstack-origin-git: include-file://glance-master.yaml | 123 | openstack-origin-git: include-file://glance-master.yaml |
84 | 110 | 124 | ||
85 | 111 | glance-master.yaml | 125 | glance-master.yaml |
86 | @@ -168,6 +182,69 @@ | |||
87 | 168 | - {name: glance, | 182 | - {name: glance, |
88 | 169 | repository: 'git://github.com/openstack/glance', | 183 | repository: 'git://github.com/openstack/glance', |
89 | 170 | branch: master} | 184 | branch: master} |
90 | 185 | ======= | ||
91 | 186 | openstack-origin-git: include-file://glance-master.yaml | ||
92 | 187 | |||
93 | 188 | glance-master.yaml | ||
94 | 189 | repositories: | ||
95 | 190 | - {name: requirements, | ||
96 | 191 | repository: 'git://github.com/openstack/requirements', | ||
97 | 192 | branch: master} | ||
98 | 193 | - {name: oslo-concurrency, | ||
99 | 194 | repository: 'git://github.com/openstack/oslo.concurrency', | ||
100 | 195 | branch: master} | ||
101 | 196 | - {name: oslo-config, | ||
102 | 197 | repository: 'git://github.com/openstack/oslo.config', | ||
103 | 198 | branch: master} | ||
104 | 199 | - {name: oslo-db, | ||
105 | 200 | repository: 'git://github.com/openstack/oslo.db', | ||
106 | 201 | branch: master} | ||
107 | 202 | - {name: oslo-i18n, | ||
108 | 203 | repository: 'git://github.com/openstack/oslo.i18n', | ||
109 | 204 | branch: master} | ||
110 | 205 | - {name: oslo-messaging, | ||
111 | 206 | repository: 'git://github.com/openstack/oslo.messaging', | ||
112 | 207 | branch: master} | ||
113 | 208 | - {name: oslo-serialization, | ||
114 | 209 | repository: 'git://github.com/openstack/oslo.serialization', | ||
115 | 210 | branch: master} | ||
116 | 211 | - {name: oslo-utils, | ||
117 | 212 | repository: 'git://github.com/openstack/oslo.utils', | ||
118 | 213 | branch: master} | ||
119 | 214 | - {name: oslo-vmware, | ||
120 | 215 | repository: 'git://github.com/openstack/oslo.vmware', | ||
121 | 216 | branch: master} | ||
122 | 217 | - {name: osprofiler, | ||
123 | 218 | repository: 'git://github.com/stackforge/osprofiler', | ||
124 | 219 | branch: master} | ||
125 | 220 | - {name: pbr, | ||
126 | 221 | repository: 'git://github.com/openstack-dev/pbr', | ||
127 | 222 | branch: master} | ||
128 | 223 | - {name: python-keystoneclient, | ||
129 | 224 | repository: 'git://github.com/openstack/python-keystoneclient', | ||
130 | 225 | branch: master} | ||
131 | 226 | - {name: python-swiftclient, | ||
132 | 227 | repository: 'git://github.com/openstack/python-swiftclient', | ||
133 | 228 | branch: master} | ||
134 | 229 | - {name: sqlalchemy-migrate, | ||
135 | 230 | repository: 'git://github.com/stackforge/sqlalchemy-migrate', | ||
136 | 231 | branch: master} | ||
137 | 232 | - {name: stevedore, | ||
138 | 233 | repository: 'git://github.com/openstack/stevedore', | ||
139 | 234 | branch: master} | ||
140 | 235 | - {name: wsme, | ||
141 | 236 | repository: 'git://github.com/stackforge/wsme', | ||
142 | 237 | branch: master} | ||
143 | 238 | - {name: keystonemiddleware, | ||
144 | 239 | repository: 'git://github.com/openstack/keystonemiddleware', | ||
145 | 240 | branch: master} | ||
146 | 241 | - {name: glance-store, | ||
147 | 242 | repository: 'git://github.com/openstack/glance_store', | ||
148 | 243 | branch: master} | ||
149 | 244 | - {name: glance, | ||
150 | 245 | repository: 'git://github.com/openstack/glance', | ||
151 | 246 | branch: master} | ||
152 | 247 | >>>>>>> MERGE-SOURCE | ||
153 | 171 | 248 | ||
154 | 172 | Contact Information | 249 | Contact Information |
155 | 173 | ------------------- | 250 | ------------------- |
156 | 174 | 251 | ||
157 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
158 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-18 23:26:31 +0000 | |||
159 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-06-30 20:18:04 +0000 | |||
160 | @@ -44,6 +44,7 @@ | |||
161 | 44 | ERROR, | 44 | ERROR, |
162 | 45 | WARNING, | 45 | WARNING, |
163 | 46 | unit_get, | 46 | unit_get, |
164 | 47 | is_leader as juju_is_leader | ||
165 | 47 | ) | 48 | ) |
166 | 48 | from charmhelpers.core.decorators import ( | 49 | from charmhelpers.core.decorators import ( |
167 | 49 | retry_on_exception, | 50 | retry_on_exception, |
168 | @@ -63,17 +64,30 @@ | |||
169 | 63 | pass | 64 | pass |
170 | 64 | 65 | ||
171 | 65 | 66 | ||
172 | 67 | class CRMDCNotFound(Exception): | ||
173 | 68 | pass | ||
174 | 69 | |||
175 | 70 | |||
176 | 66 | def is_elected_leader(resource): | 71 | def is_elected_leader(resource): |
177 | 67 | """ | 72 | """ |
178 | 68 | Returns True if the charm executing this is the elected cluster leader. | 73 | Returns True if the charm executing this is the elected cluster leader. |
179 | 69 | 74 | ||
180 | 70 | It relies on two mechanisms to determine leadership: | 75 | It relies on two mechanisms to determine leadership: |
182 | 71 | 1. If the charm is part of a corosync cluster, call corosync to | 76 | 1. If juju is sufficiently new and leadership election is supported, |
183 | 77 | the is_leader command will be used. | ||
184 | 78 | 2. If the charm is part of a corosync cluster, call corosync to | ||
185 | 72 | determine leadership. | 79 | determine leadership. |
187 | 73 | 2. If the charm is not part of a corosync cluster, the leader is | 80 | 3. If the charm is not part of a corosync cluster, the leader is |
188 | 74 | determined as being "the alive unit with the lowest unit numer". In | 81 | determined as being "the alive unit with the lowest unit numer". In |
189 | 75 | other words, the oldest surviving unit. | 82 | other words, the oldest surviving unit. |
190 | 76 | """ | 83 | """ |
191 | 84 | try: | ||
192 | 85 | return juju_is_leader() | ||
193 | 86 | except NotImplementedError: | ||
194 | 87 | log('Juju leadership election feature not enabled' | ||
195 | 88 | ', using fallback support', | ||
196 | 89 | level=WARNING) | ||
197 | 90 | |||
198 | 77 | if is_clustered(): | 91 | if is_clustered(): |
199 | 78 | if not is_crm_leader(resource): | 92 | if not is_crm_leader(resource): |
200 | 79 | log('Deferring action to CRM leader.', level=INFO) | 93 | log('Deferring action to CRM leader.', level=INFO) |
201 | @@ -97,6 +111,7 @@ | |||
202 | 97 | return False | 111 | return False |
203 | 98 | 112 | ||
204 | 99 | 113 | ||
205 | 114 | <<<<<<< TREE | ||
206 | 100 | def is_crm_dc(): | 115 | def is_crm_dc(): |
207 | 101 | """ | 116 | """ |
208 | 102 | Determine leadership by querying the pacemaker Designated Controller | 117 | Determine leadership by querying the pacemaker Designated Controller |
209 | @@ -119,6 +134,35 @@ | |||
210 | 119 | 134 | ||
211 | 120 | 135 | ||
212 | 121 | @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) | 136 | @retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) |
213 | 137 | ======= | ||
214 | 138 | def is_crm_dc(): | ||
215 | 139 | """ | ||
216 | 140 | Determine leadership by querying the pacemaker Designated Controller | ||
217 | 141 | """ | ||
218 | 142 | cmd = ['crm', 'status'] | ||
219 | 143 | try: | ||
220 | 144 | status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) | ||
221 | 145 | if not isinstance(status, six.text_type): | ||
222 | 146 | status = six.text_type(status, "utf-8") | ||
223 | 147 | except subprocess.CalledProcessError as ex: | ||
224 | 148 | raise CRMDCNotFound(str(ex)) | ||
225 | 149 | |||
226 | 150 | current_dc = '' | ||
227 | 151 | for line in status.split('\n'): | ||
228 | 152 | if line.startswith('Current DC'): | ||
229 | 153 | # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum | ||
230 | 154 | current_dc = line.split(':')[1].split()[0] | ||
231 | 155 | if current_dc == get_unit_hostname(): | ||
232 | 156 | return True | ||
233 | 157 | elif current_dc == 'NONE': | ||
234 | 158 | raise CRMDCNotFound('Current DC: NONE') | ||
235 | 159 | |||
236 | 160 | return False | ||
237 | 161 | |||
238 | 162 | |||
239 | 163 | @retry_on_exception(5, base_delay=2, | ||
240 | 164 | exc_type=(CRMResourceNotFound, CRMDCNotFound)) | ||
241 | 165 | >>>>>>> MERGE-SOURCE | ||
242 | 122 | def is_crm_leader(resource, retry=False): | 166 | def is_crm_leader(resource, retry=False): |
243 | 123 | """ | 167 | """ |
244 | 124 | Returns True if the charm calling this is the elected corosync leader, | 168 | Returns True if the charm calling this is the elected corosync leader, |
245 | 125 | 169 | ||
246 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
247 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:52:07 +0000 | |||
248 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-30 20:18:04 +0000 | |||
249 | @@ -110,7 +110,8 @@ | |||
250 | 110 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | 110 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
251 | 111 | self.precise_havana, self.precise_icehouse, | 111 | self.precise_havana, self.precise_icehouse, |
252 | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
254 | 113 | self.trusty_kilo, self.vivid_kilo) = range(10) | 113 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
255 | 114 | self.wily_liberty) = range(12) | ||
256 | 114 | 115 | ||
257 | 115 | releases = { | 116 | releases = { |
258 | 116 | ('precise', None): self.precise_essex, | 117 | ('precise', None): self.precise_essex, |
259 | @@ -121,8 +122,10 @@ | |||
260 | 121 | ('trusty', None): self.trusty_icehouse, | 122 | ('trusty', None): self.trusty_icehouse, |
261 | 122 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 123 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
262 | 123 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 124 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
263 | 125 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | ||
264 | 124 | ('utopic', None): self.utopic_juno, | 126 | ('utopic', None): self.utopic_juno, |
266 | 125 | ('vivid', None): self.vivid_kilo} | 127 | ('vivid', None): self.vivid_kilo, |
267 | 128 | ('wily', None): self.wily_liberty} | ||
268 | 126 | return releases[(self.series, self.openstack)] | 129 | return releases[(self.series, self.openstack)] |
269 | 127 | 130 | ||
270 | 128 | def _get_openstack_release_string(self): | 131 | def _get_openstack_release_string(self): |
271 | @@ -138,6 +141,7 @@ | |||
272 | 138 | ('trusty', 'icehouse'), | 141 | ('trusty', 'icehouse'), |
273 | 139 | ('utopic', 'juno'), | 142 | ('utopic', 'juno'), |
274 | 140 | ('vivid', 'kilo'), | 143 | ('vivid', 'kilo'), |
275 | 144 | ('wily', 'liberty'), | ||
276 | 141 | ]) | 145 | ]) |
277 | 142 | if self.openstack: | 146 | if self.openstack: |
278 | 143 | os_origin = self.openstack.split(':')[1] | 147 | os_origin = self.openstack.split(':')[1] |
279 | 144 | 148 | ||
280 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
281 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-03-20 17:15:02 +0000 | |||
282 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-30 20:18:04 +0000 | |||
283 | @@ -16,15 +16,15 @@ | |||
284 | 16 | 16 | ||
285 | 17 | import logging | 17 | import logging |
286 | 18 | import os | 18 | import os |
287 | 19 | import six | ||
288 | 19 | import time | 20 | import time |
289 | 20 | import urllib | 21 | import urllib |
290 | 21 | 22 | ||
291 | 22 | import glanceclient.v1.client as glance_client | 23 | import glanceclient.v1.client as glance_client |
292 | 24 | import heatclient.v1.client as heat_client | ||
293 | 23 | import keystoneclient.v2_0 as keystone_client | 25 | import keystoneclient.v2_0 as keystone_client |
294 | 24 | import novaclient.v1_1.client as nova_client | 26 | import novaclient.v1_1.client as nova_client |
295 | 25 | 27 | ||
296 | 26 | import six | ||
297 | 27 | |||
298 | 28 | from charmhelpers.contrib.amulet.utils import ( | 28 | from charmhelpers.contrib.amulet.utils import ( |
299 | 29 | AmuletUtils | 29 | AmuletUtils |
300 | 30 | ) | 30 | ) |
301 | @@ -37,7 +37,7 @@ | |||
302 | 37 | """OpenStack amulet utilities. | 37 | """OpenStack amulet utilities. |
303 | 38 | 38 | ||
304 | 39 | This class inherits from AmuletUtils and has additional support | 39 | This class inherits from AmuletUtils and has additional support |
306 | 40 | that is specifically for use by OpenStack charms. | 40 | that is specifically for use by OpenStack charm tests. |
307 | 41 | """ | 41 | """ |
308 | 42 | 42 | ||
309 | 43 | def __init__(self, log_level=ERROR): | 43 | def __init__(self, log_level=ERROR): |
310 | @@ -51,6 +51,8 @@ | |||
311 | 51 | Validate actual endpoint data vs expected endpoint data. The ports | 51 | Validate actual endpoint data vs expected endpoint data. The ports |
312 | 52 | are used to find the matching endpoint. | 52 | are used to find the matching endpoint. |
313 | 53 | """ | 53 | """ |
314 | 54 | self.log.debug('Validating endpoint data...') | ||
315 | 55 | self.log.debug('actual: {}'.format(repr(endpoints))) | ||
316 | 54 | found = False | 56 | found = False |
317 | 55 | for ep in endpoints: | 57 | for ep in endpoints: |
318 | 56 | self.log.debug('endpoint: {}'.format(repr(ep))) | 58 | self.log.debug('endpoint: {}'.format(repr(ep))) |
319 | @@ -77,6 +79,7 @@ | |||
320 | 77 | Validate a list of actual service catalog endpoints vs a list of | 79 | Validate a list of actual service catalog endpoints vs a list of |
321 | 78 | expected service catalog endpoints. | 80 | expected service catalog endpoints. |
322 | 79 | """ | 81 | """ |
323 | 82 | self.log.debug('Validating service catalog endpoint data...') | ||
324 | 80 | self.log.debug('actual: {}'.format(repr(actual))) | 83 | self.log.debug('actual: {}'.format(repr(actual))) |
325 | 81 | for k, v in six.iteritems(expected): | 84 | for k, v in six.iteritems(expected): |
326 | 82 | if k in actual: | 85 | if k in actual: |
327 | @@ -93,6 +96,7 @@ | |||
328 | 93 | Validate a list of actual tenant data vs list of expected tenant | 96 | Validate a list of actual tenant data vs list of expected tenant |
329 | 94 | data. | 97 | data. |
330 | 95 | """ | 98 | """ |
331 | 99 | self.log.debug('Validating tenant data...') | ||
332 | 96 | self.log.debug('actual: {}'.format(repr(actual))) | 100 | self.log.debug('actual: {}'.format(repr(actual))) |
333 | 97 | for e in expected: | 101 | for e in expected: |
334 | 98 | found = False | 102 | found = False |
335 | @@ -114,6 +118,7 @@ | |||
336 | 114 | Validate a list of actual role data vs a list of expected role | 118 | Validate a list of actual role data vs a list of expected role |
337 | 115 | data. | 119 | data. |
338 | 116 | """ | 120 | """ |
339 | 121 | self.log.debug('Validating role data...') | ||
340 | 117 | self.log.debug('actual: {}'.format(repr(actual))) | 122 | self.log.debug('actual: {}'.format(repr(actual))) |
341 | 118 | for e in expected: | 123 | for e in expected: |
342 | 119 | found = False | 124 | found = False |
343 | @@ -134,6 +139,7 @@ | |||
344 | 134 | Validate a list of actual user data vs a list of expected user | 139 | Validate a list of actual user data vs a list of expected user |
345 | 135 | data. | 140 | data. |
346 | 136 | """ | 141 | """ |
347 | 142 | self.log.debug('Validating user data...') | ||
348 | 137 | self.log.debug('actual: {}'.format(repr(actual))) | 143 | self.log.debug('actual: {}'.format(repr(actual))) |
349 | 138 | for e in expected: | 144 | for e in expected: |
350 | 139 | found = False | 145 | found = False |
351 | @@ -155,17 +161,20 @@ | |||
352 | 155 | 161 | ||
353 | 156 | Validate a list of actual flavors vs a list of expected flavors. | 162 | Validate a list of actual flavors vs a list of expected flavors. |
354 | 157 | """ | 163 | """ |
355 | 164 | self.log.debug('Validating flavor data...') | ||
356 | 158 | self.log.debug('actual: {}'.format(repr(actual))) | 165 | self.log.debug('actual: {}'.format(repr(actual))) |
357 | 159 | act = [a.name for a in actual] | 166 | act = [a.name for a in actual] |
358 | 160 | return self._validate_list_data(expected, act) | 167 | return self._validate_list_data(expected, act) |
359 | 161 | 168 | ||
360 | 162 | def tenant_exists(self, keystone, tenant): | 169 | def tenant_exists(self, keystone, tenant): |
361 | 163 | """Return True if tenant exists.""" | 170 | """Return True if tenant exists.""" |
362 | 171 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) | ||
363 | 164 | return tenant in [t.name for t in keystone.tenants.list()] | 172 | return tenant in [t.name for t in keystone.tenants.list()] |
364 | 165 | 173 | ||
365 | 166 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 174 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
366 | 167 | tenant): | 175 | tenant): |
367 | 168 | """Authenticates admin user with the keystone admin endpoint.""" | 176 | """Authenticates admin user with the keystone admin endpoint.""" |
368 | 177 | self.log.debug('Authenticating keystone admin...') | ||
369 | 169 | unit = keystone_sentry | 178 | unit = keystone_sentry |
370 | 170 | service_ip = unit.relation('shared-db', | 179 | service_ip = unit.relation('shared-db', |
371 | 171 | 'mysql:shared-db')['private-address'] | 180 | 'mysql:shared-db')['private-address'] |
372 | @@ -175,6 +184,7 @@ | |||
373 | 175 | 184 | ||
374 | 176 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 185 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
375 | 177 | """Authenticates a regular user with the keystone public endpoint.""" | 186 | """Authenticates a regular user with the keystone public endpoint.""" |
376 | 187 | self.log.debug('Authenticating keystone user ({})...'.format(user)) | ||
377 | 178 | ep = keystone.service_catalog.url_for(service_type='identity', | 188 | ep = keystone.service_catalog.url_for(service_type='identity', |
378 | 179 | endpoint_type='publicURL') | 189 | endpoint_type='publicURL') |
379 | 180 | return keystone_client.Client(username=user, password=password, | 190 | return keystone_client.Client(username=user, password=password, |
380 | @@ -182,12 +192,21 @@ | |||
381 | 182 | 192 | ||
382 | 183 | def authenticate_glance_admin(self, keystone): | 193 | def authenticate_glance_admin(self, keystone): |
383 | 184 | """Authenticates admin user with glance.""" | 194 | """Authenticates admin user with glance.""" |
384 | 195 | self.log.debug('Authenticating glance admin...') | ||
385 | 185 | ep = keystone.service_catalog.url_for(service_type='image', | 196 | ep = keystone.service_catalog.url_for(service_type='image', |
386 | 186 | endpoint_type='adminURL') | 197 | endpoint_type='adminURL') |
387 | 187 | return glance_client.Client(ep, token=keystone.auth_token) | 198 | return glance_client.Client(ep, token=keystone.auth_token) |
388 | 188 | 199 | ||
389 | 200 | def authenticate_heat_admin(self, keystone): | ||
390 | 201 | """Authenticates the admin user with heat.""" | ||
391 | 202 | self.log.debug('Authenticating heat admin...') | ||
392 | 203 | ep = keystone.service_catalog.url_for(service_type='orchestration', | ||
393 | 204 | endpoint_type='publicURL') | ||
394 | 205 | return heat_client.Client(endpoint=ep, token=keystone.auth_token) | ||
395 | 206 | |||
396 | 189 | def authenticate_nova_user(self, keystone, user, password, tenant): | 207 | def authenticate_nova_user(self, keystone, user, password, tenant): |
397 | 190 | """Authenticates a regular user with nova-api.""" | 208 | """Authenticates a regular user with nova-api.""" |
398 | 209 | self.log.debug('Authenticating nova user ({})...'.format(user)) | ||
399 | 191 | ep = keystone.service_catalog.url_for(service_type='identity', | 210 | ep = keystone.service_catalog.url_for(service_type='identity', |
400 | 192 | endpoint_type='publicURL') | 211 | endpoint_type='publicURL') |
401 | 193 | return nova_client.Client(username=user, api_key=password, | 212 | return nova_client.Client(username=user, api_key=password, |
402 | @@ -195,6 +214,7 @@ | |||
403 | 195 | 214 | ||
404 | 196 | def create_cirros_image(self, glance, image_name): | 215 | def create_cirros_image(self, glance, image_name): |
405 | 197 | """Download the latest cirros image and upload it to glance.""" | 216 | """Download the latest cirros image and upload it to glance.""" |
406 | 217 | self.log.debug('Creating glance image ({})...'.format(image_name)) | ||
407 | 198 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | 218 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
408 | 199 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | 219 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
409 | 200 | if http_proxy: | 220 | if http_proxy: |
410 | @@ -235,6 +255,11 @@ | |||
411 | 235 | 255 | ||
412 | 236 | def delete_image(self, glance, image): | 256 | def delete_image(self, glance, image): |
413 | 237 | """Delete the specified image.""" | 257 | """Delete the specified image.""" |
414 | 258 | |||
415 | 259 | # /!\ DEPRECATION WARNING | ||
416 | 260 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
417 | 261 | 'delete_resource instead of delete_image.') | ||
418 | 262 | self.log.debug('Deleting glance image ({})...'.format(image)) | ||
419 | 238 | num_before = len(list(glance.images.list())) | 263 | num_before = len(list(glance.images.list())) |
420 | 239 | glance.images.delete(image) | 264 | glance.images.delete(image) |
421 | 240 | 265 | ||
422 | @@ -254,6 +279,8 @@ | |||
423 | 254 | 279 | ||
424 | 255 | def create_instance(self, nova, image_name, instance_name, flavor): | 280 | def create_instance(self, nova, image_name, instance_name, flavor): |
425 | 256 | """Create the specified instance.""" | 281 | """Create the specified instance.""" |
426 | 282 | self.log.debug('Creating instance ' | ||
427 | 283 | '({}|{}|{})'.format(instance_name, image_name, flavor)) | ||
428 | 257 | image = nova.images.find(name=image_name) | 284 | image = nova.images.find(name=image_name) |
429 | 258 | flavor = nova.flavors.find(name=flavor) | 285 | flavor = nova.flavors.find(name=flavor) |
430 | 259 | instance = nova.servers.create(name=instance_name, image=image, | 286 | instance = nova.servers.create(name=instance_name, image=image, |
431 | @@ -276,6 +303,11 @@ | |||
432 | 276 | 303 | ||
433 | 277 | def delete_instance(self, nova, instance): | 304 | def delete_instance(self, nova, instance): |
434 | 278 | """Delete the specified instance.""" | 305 | """Delete the specified instance.""" |
435 | 306 | |||
436 | 307 | # /!\ DEPRECATION WARNING | ||
437 | 308 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
438 | 309 | 'delete_resource instead of delete_instance.') | ||
439 | 310 | self.log.debug('Deleting instance ({})...'.format(instance)) | ||
440 | 279 | num_before = len(list(nova.servers.list())) | 311 | num_before = len(list(nova.servers.list())) |
441 | 280 | nova.servers.delete(instance) | 312 | nova.servers.delete(instance) |
442 | 281 | 313 | ||
443 | @@ -292,3 +324,90 @@ | |||
444 | 292 | return False | 324 | return False |
445 | 293 | 325 | ||
446 | 294 | return True | 326 | return True |
447 | 327 | |||
448 | 328 | def create_or_get_keypair(self, nova, keypair_name="testkey"): | ||
449 | 329 | """Create a new keypair, or return pointer if it already exists.""" | ||
450 | 330 | try: | ||
451 | 331 | _keypair = nova.keypairs.get(keypair_name) | ||
452 | 332 | self.log.debug('Keypair ({}) already exists, ' | ||
453 | 333 | 'using it.'.format(keypair_name)) | ||
454 | 334 | return _keypair | ||
455 | 335 | except: | ||
456 | 336 | self.log.debug('Keypair ({}) does not exist, ' | ||
457 | 337 | 'creating it.'.format(keypair_name)) | ||
458 | 338 | |||
459 | 339 | _keypair = nova.keypairs.create(name=keypair_name) | ||
460 | 340 | return _keypair | ||
461 | 341 | |||
462 | 342 | def delete_resource(self, resource, resource_id, | ||
463 | 343 | msg="resource", max_wait=120): | ||
464 | 344 | """Delete one openstack resource, such as one instance, keypair, | ||
465 | 345 | image, volume, stack, etc., and confirm deletion within max wait time. | ||
466 | 346 | |||
467 | 347 | :param resource: pointer to os resource type, ex:glance_client.images | ||
468 | 348 | :param resource_id: unique name or id for the openstack resource | ||
469 | 349 | :param msg: text to identify purpose in logging | ||
470 | 350 | :param max_wait: maximum wait time in seconds | ||
471 | 351 | :returns: True if successful, otherwise False | ||
472 | 352 | """ | ||
473 | 353 | num_before = len(list(resource.list())) | ||
474 | 354 | resource.delete(resource_id) | ||
475 | 355 | |||
476 | 356 | tries = 0 | ||
477 | 357 | num_after = len(list(resource.list())) | ||
478 | 358 | while num_after != (num_before - 1) and tries < (max_wait / 4): | ||
479 | 359 | self.log.debug('{} delete check: ' | ||
480 | 360 | '{} [{}:{}] {}'.format(msg, tries, | ||
481 | 361 | num_before, | ||
482 | 362 | num_after, | ||
483 | 363 | resource_id)) | ||
484 | 364 | time.sleep(4) | ||
485 | 365 | num_after = len(list(resource.list())) | ||
486 | 366 | tries += 1 | ||
487 | 367 | |||
488 | 368 | self.log.debug('{}: expected, actual count = {}, ' | ||
489 | 369 | '{}'.format(msg, num_before - 1, num_after)) | ||
490 | 370 | |||
491 | 371 | if num_after == (num_before - 1): | ||
492 | 372 | return True | ||
493 | 373 | else: | ||
494 | 374 | self.log.error('{} delete timed out'.format(msg)) | ||
495 | 375 | return False | ||
496 | 376 | |||
497 | 377 | def resource_reaches_status(self, resource, resource_id, | ||
498 | 378 | expected_stat='available', | ||
499 | 379 | msg='resource', max_wait=120): | ||
500 | 380 | """Wait for an openstack resources status to reach an | ||
501 | 381 | expected status within a specified time. Useful to confirm that | ||
502 | 382 | nova instances, cinder vols, snapshots, glance images, heat stacks | ||
503 | 383 | and other resources eventually reach the expected status. | ||
504 | 384 | |||
505 | 385 | :param resource: pointer to os resource type, ex: heat_client.stacks | ||
506 | 386 | :param resource_id: unique id for the openstack resource | ||
507 | 387 | :param expected_stat: status to expect resource to reach | ||
508 | 388 | :param msg: text to identify purpose in logging | ||
509 | 389 | :param max_wait: maximum wait time in seconds | ||
510 | 390 | :returns: True if successful, False if status is not reached | ||
511 | 391 | """ | ||
512 | 392 | |||
513 | 393 | tries = 0 | ||
514 | 394 | resource_stat = resource.get(resource_id).status | ||
515 | 395 | while resource_stat != expected_stat and tries < (max_wait / 4): | ||
516 | 396 | self.log.debug('{} status check: ' | ||
517 | 397 | '{} [{}:{}] {}'.format(msg, tries, | ||
518 | 398 | resource_stat, | ||
519 | 399 | expected_stat, | ||
520 | 400 | resource_id)) | ||
521 | 401 | time.sleep(4) | ||
522 | 402 | resource_stat = resource.get(resource_id).status | ||
523 | 403 | tries += 1 | ||
524 | 404 | |||
525 | 405 | self.log.debug('{}: expected, actual status = {}, ' | ||
526 | 406 | '{}'.format(msg, resource_stat, expected_stat)) | ||
527 | 407 | |||
528 | 408 | if resource_stat == expected_stat: | ||
529 | 409 | return True | ||
530 | 410 | else: | ||
531 | 411 | self.log.debug('{} never reached expected status: ' | ||
532 | 412 | '{}'.format(resource_id, expected_stat)) | ||
533 | 413 | return False | ||
534 | 295 | 414 | ||
535 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
536 | --- hooks/charmhelpers/contrib/openstack/context.py 2015-04-16 21:33:32 +0000 | |||
537 | +++ hooks/charmhelpers/contrib/openstack/context.py 2015-06-30 20:18:04 +0000 | |||
538 | @@ -240,7 +240,7 @@ | |||
539 | 240 | if self.relation_prefix: | 240 | if self.relation_prefix: |
540 | 241 | password_setting = self.relation_prefix + '_password' | 241 | password_setting = self.relation_prefix + '_password' |
541 | 242 | 242 | ||
543 | 243 | for rid in relation_ids('shared-db'): | 243 | for rid in relation_ids(self.interfaces[0]): |
544 | 244 | for unit in related_units(rid): | 244 | for unit in related_units(rid): |
545 | 245 | rdata = relation_get(rid=rid, unit=unit) | 245 | rdata = relation_get(rid=rid, unit=unit) |
546 | 246 | host = rdata.get('db_host') | 246 | host = rdata.get('db_host') |
547 | 247 | 247 | ||
548 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
549 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2015-04-16 19:53:49 +0000 | |||
550 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-06-30 20:18:04 +0000 | |||
551 | @@ -172,14 +172,16 @@ | |||
552 | 172 | 'services': ['calico-felix', | 172 | 'services': ['calico-felix', |
553 | 173 | 'bird', | 173 | 'bird', |
554 | 174 | 'neutron-dhcp-agent', | 174 | 'neutron-dhcp-agent', |
556 | 175 | 'nova-api-metadata'], | 175 | 'nova-api-metadata', |
557 | 176 | 'etcd'], | ||
558 | 176 | 'packages': [[headers_package()] + determine_dkms_package(), | 177 | 'packages': [[headers_package()] + determine_dkms_package(), |
559 | 177 | ['calico-compute', | 178 | ['calico-compute', |
560 | 178 | 'bird', | 179 | 'bird', |
561 | 179 | 'neutron-dhcp-agent', | 180 | 'neutron-dhcp-agent', |
565 | 180 | 'nova-api-metadata']], | 181 | 'nova-api-metadata', |
566 | 181 | 'server_packages': ['neutron-server', 'calico-control'], | 182 | 'etcd']], |
567 | 182 | 'server_services': ['neutron-server'] | 183 | 'server_packages': ['neutron-server', 'calico-control', 'etcd'], |
568 | 184 | 'server_services': ['neutron-server', 'etcd'] | ||
569 | 183 | }, | 185 | }, |
570 | 184 | 'vsp': { | 186 | 'vsp': { |
571 | 185 | 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', | 187 | 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', |
572 | @@ -256,11 +258,14 @@ | |||
573 | 256 | def parse_mappings(mappings): | 258 | def parse_mappings(mappings): |
574 | 257 | parsed = {} | 259 | parsed = {} |
575 | 258 | if mappings: | 260 | if mappings: |
577 | 259 | mappings = mappings.split(' ') | 261 | mappings = mappings.split() |
578 | 260 | for m in mappings: | 262 | for m in mappings: |
579 | 261 | p = m.partition(':') | 263 | p = m.partition(':') |
582 | 262 | if p[1] == ':': | 264 | key = p[0].strip() |
583 | 263 | parsed[p[0].strip()] = p[2].strip() | 265 | if p[1]: |
584 | 266 | parsed[key] = p[2].strip() | ||
585 | 267 | else: | ||
586 | 268 | parsed[key] = '' | ||
587 | 264 | 269 | ||
588 | 265 | return parsed | 270 | return parsed |
589 | 266 | 271 | ||
590 | @@ -283,13 +288,13 @@ | |||
591 | 283 | Returns dict of the form {bridge:port}. | 288 | Returns dict of the form {bridge:port}. |
592 | 284 | """ | 289 | """ |
593 | 285 | _mappings = parse_mappings(mappings) | 290 | _mappings = parse_mappings(mappings) |
595 | 286 | if not _mappings: | 291 | if not _mappings or list(_mappings.values()) == ['']: |
596 | 287 | if not mappings: | 292 | if not mappings: |
597 | 288 | return {} | 293 | return {} |
598 | 289 | 294 | ||
599 | 290 | # For backwards-compatibility we need to support port-only provided in | 295 | # For backwards-compatibility we need to support port-only provided in |
600 | 291 | # config. | 296 | # config. |
602 | 292 | _mappings = {default_bridge: mappings.split(' ')[0]} | 297 | _mappings = {default_bridge: mappings.split()[0]} |
603 | 293 | 298 | ||
604 | 294 | bridges = _mappings.keys() | 299 | bridges = _mappings.keys() |
605 | 295 | ports = _mappings.values() | 300 | ports = _mappings.values() |
606 | @@ -309,6 +314,8 @@ | |||
607 | 309 | 314 | ||
608 | 310 | Mappings must be a space-delimited list of provider:start:end mappings. | 315 | Mappings must be a space-delimited list of provider:start:end mappings. |
609 | 311 | 316 | ||
610 | 317 | The start:end range is optional and may be omitted. | ||
611 | 318 | |||
612 | 312 | Returns dict of the form {provider: (start, end)}. | 319 | Returns dict of the form {provider: (start, end)}. |
613 | 313 | """ | 320 | """ |
614 | 314 | _mappings = parse_mappings(mappings) | 321 | _mappings = parse_mappings(mappings) |
615 | 315 | 322 | ||
616 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
617 | --- hooks/charmhelpers/contrib/openstack/utils.py 2015-04-16 19:53:49 +0000 | |||
618 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2015-06-30 20:18:04 +0000 | |||
619 | @@ -53,9 +53,13 @@ | |||
620 | 53 | get_ipv6_addr | 53 | get_ipv6_addr |
621 | 54 | ) | 54 | ) |
622 | 55 | 55 | ||
623 | 56 | from charmhelpers.contrib.python.packages import ( | ||
624 | 57 | pip_create_virtualenv, | ||
625 | 58 | pip_install, | ||
626 | 59 | ) | ||
627 | 60 | |||
628 | 56 | from charmhelpers.core.host import lsb_release, mounts, umount | 61 | from charmhelpers.core.host import lsb_release, mounts, umount |
629 | 57 | from charmhelpers.fetch import apt_install, apt_cache, install_remote | 62 | from charmhelpers.fetch import apt_install, apt_cache, install_remote |
630 | 58 | from charmhelpers.contrib.python.packages import pip_install | ||
631 | 59 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk | 63 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
632 | 60 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device | 64 | from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device |
633 | 61 | 65 | ||
634 | @@ -75,6 +79,7 @@ | |||
635 | 75 | ('trusty', 'icehouse'), | 79 | ('trusty', 'icehouse'), |
636 | 76 | ('utopic', 'juno'), | 80 | ('utopic', 'juno'), |
637 | 77 | ('vivid', 'kilo'), | 81 | ('vivid', 'kilo'), |
638 | 82 | ('wily', 'liberty'), | ||
639 | 78 | ]) | 83 | ]) |
640 | 79 | 84 | ||
641 | 80 | 85 | ||
642 | @@ -87,6 +92,7 @@ | |||
643 | 87 | ('2014.1', 'icehouse'), | 92 | ('2014.1', 'icehouse'), |
644 | 88 | ('2014.2', 'juno'), | 93 | ('2014.2', 'juno'), |
645 | 89 | ('2015.1', 'kilo'), | 94 | ('2015.1', 'kilo'), |
646 | 95 | ('2015.2', 'liberty'), | ||
647 | 90 | ]) | 96 | ]) |
648 | 91 | 97 | ||
649 | 92 | # The ugly duckling | 98 | # The ugly duckling |
650 | @@ -109,6 +115,7 @@ | |||
651 | 109 | ('2.2.0', 'juno'), | 115 | ('2.2.0', 'juno'), |
652 | 110 | ('2.2.1', 'kilo'), | 116 | ('2.2.1', 'kilo'), |
653 | 111 | ('2.2.2', 'kilo'), | 117 | ('2.2.2', 'kilo'), |
654 | 118 | ('2.3.0', 'liberty'), | ||
655 | 112 | ]) | 119 | ]) |
656 | 113 | 120 | ||
657 | 114 | DEFAULT_LOOPBACK_SIZE = '5G' | 121 | DEFAULT_LOOPBACK_SIZE = '5G' |
658 | @@ -317,6 +324,9 @@ | |||
659 | 317 | 'kilo': 'trusty-updates/kilo', | 324 | 'kilo': 'trusty-updates/kilo', |
660 | 318 | 'kilo/updates': 'trusty-updates/kilo', | 325 | 'kilo/updates': 'trusty-updates/kilo', |
661 | 319 | 'kilo/proposed': 'trusty-proposed/kilo', | 326 | 'kilo/proposed': 'trusty-proposed/kilo', |
662 | 327 | 'liberty': 'trusty-updates/liberty', | ||
663 | 328 | 'liberty/updates': 'trusty-updates/liberty', | ||
664 | 329 | 'liberty/proposed': 'trusty-proposed/liberty', | ||
665 | 320 | } | 330 | } |
666 | 321 | 331 | ||
667 | 322 | try: | 332 | try: |
668 | @@ -497,7 +507,17 @@ | |||
669 | 497 | requirements_dir = None | 507 | requirements_dir = None |
670 | 498 | 508 | ||
671 | 499 | 509 | ||
673 | 500 | def git_clone_and_install(projects_yaml, core_project): | 510 | def _git_yaml_load(projects_yaml): |
674 | 511 | """ | ||
675 | 512 | Load the specified yaml into a dictionary. | ||
676 | 513 | """ | ||
677 | 514 | if not projects_yaml: | ||
678 | 515 | return None | ||
679 | 516 | |||
680 | 517 | return yaml.load(projects_yaml) | ||
681 | 518 | |||
682 | 519 | |||
683 | 520 | def git_clone_and_install(projects_yaml, core_project, depth=1): | ||
684 | 501 | """ | 521 | """ |
685 | 502 | Clone/install all specified OpenStack repositories. | 522 | Clone/install all specified OpenStack repositories. |
686 | 503 | 523 | ||
687 | @@ -510,23 +530,22 @@ | |||
688 | 510 | repository: 'git://git.openstack.org/openstack/requirements.git', | 530 | repository: 'git://git.openstack.org/openstack/requirements.git', |
689 | 511 | branch: 'stable/icehouse'} | 531 | branch: 'stable/icehouse'} |
690 | 512 | directory: /mnt/openstack-git | 532 | directory: /mnt/openstack-git |
693 | 513 | http_proxy: http://squid.internal:3128 | 533 | http_proxy: squid-proxy-url |
694 | 514 | https_proxy: https://squid.internal:3128 | 534 | https_proxy: squid-proxy-url |
695 | 515 | 535 | ||
696 | 516 | The directory, http_proxy, and https_proxy keys are optional. | 536 | The directory, http_proxy, and https_proxy keys are optional. |
697 | 517 | """ | 537 | """ |
698 | 518 | global requirements_dir | 538 | global requirements_dir |
699 | 519 | parent_dir = '/mnt/openstack-git' | 539 | parent_dir = '/mnt/openstack-git' |
705 | 520 | 540 | http_proxy = None | |
706 | 521 | if not projects_yaml: | 541 | |
707 | 522 | return | 542 | projects = _git_yaml_load(projects_yaml) |
703 | 523 | |||
704 | 524 | projects = yaml.load(projects_yaml) | ||
708 | 525 | _git_validate_projects_yaml(projects, core_project) | 543 | _git_validate_projects_yaml(projects, core_project) |
709 | 526 | 544 | ||
710 | 527 | old_environ = dict(os.environ) | 545 | old_environ = dict(os.environ) |
711 | 528 | 546 | ||
712 | 529 | if 'http_proxy' in projects.keys(): | 547 | if 'http_proxy' in projects.keys(): |
713 | 548 | http_proxy = projects['http_proxy'] | ||
714 | 530 | os.environ['http_proxy'] = projects['http_proxy'] | 549 | os.environ['http_proxy'] = projects['http_proxy'] |
715 | 531 | if 'https_proxy' in projects.keys(): | 550 | if 'https_proxy' in projects.keys(): |
716 | 532 | os.environ['https_proxy'] = projects['https_proxy'] | 551 | os.environ['https_proxy'] = projects['https_proxy'] |
717 | @@ -534,15 +553,24 @@ | |||
718 | 534 | if 'directory' in projects.keys(): | 553 | if 'directory' in projects.keys(): |
719 | 535 | parent_dir = projects['directory'] | 554 | parent_dir = projects['directory'] |
720 | 536 | 555 | ||
721 | 556 | pip_create_virtualenv(os.path.join(parent_dir, 'venv')) | ||
722 | 557 | |||
723 | 558 | # Upgrade setuptools from default virtualenv version. The default version | ||
724 | 559 | # in trusty breaks update.py in global requirements master branch. | ||
725 | 560 | pip_install('setuptools', upgrade=True, proxy=http_proxy, | ||
726 | 561 | venv=os.path.join(parent_dir, 'venv')) | ||
727 | 562 | |||
728 | 537 | for p in projects['repositories']: | 563 | for p in projects['repositories']: |
729 | 538 | repo = p['repository'] | 564 | repo = p['repository'] |
730 | 539 | branch = p['branch'] | 565 | branch = p['branch'] |
731 | 540 | if p['name'] == 'requirements': | 566 | if p['name'] == 'requirements': |
733 | 541 | repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, | 567 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
734 | 568 | parent_dir, http_proxy, | ||
735 | 542 | update_requirements=False) | 569 | update_requirements=False) |
736 | 543 | requirements_dir = repo_dir | 570 | requirements_dir = repo_dir |
737 | 544 | else: | 571 | else: |
739 | 545 | repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, | 572 | repo_dir = _git_clone_and_install_single(repo, branch, depth, |
740 | 573 | parent_dir, http_proxy, | ||
741 | 546 | update_requirements=True) | 574 | update_requirements=True) |
742 | 547 | 575 | ||
743 | 548 | os.environ = old_environ | 576 | os.environ = old_environ |
744 | @@ -574,7 +602,8 @@ | |||
745 | 574 | error_out('openstack-origin-git key \'{}\' is missing'.format(key)) | 602 | error_out('openstack-origin-git key \'{}\' is missing'.format(key)) |
746 | 575 | 603 | ||
747 | 576 | 604 | ||
749 | 577 | def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): | 605 | def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, |
750 | 606 | update_requirements): | ||
751 | 578 | """ | 607 | """ |
752 | 579 | Clone and install a single git repository. | 608 | Clone and install a single git repository. |
753 | 580 | """ | 609 | """ |
754 | @@ -587,23 +616,29 @@ | |||
755 | 587 | 616 | ||
756 | 588 | if not os.path.exists(dest_dir): | 617 | if not os.path.exists(dest_dir): |
757 | 589 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) | 618 | juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) |
759 | 590 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch) | 619 | repo_dir = install_remote(repo, dest=parent_dir, branch=branch, |
760 | 620 | depth=depth) | ||
761 | 591 | else: | 621 | else: |
762 | 592 | repo_dir = dest_dir | 622 | repo_dir = dest_dir |
763 | 593 | 623 | ||
764 | 624 | venv = os.path.join(parent_dir, 'venv') | ||
765 | 625 | |||
766 | 594 | if update_requirements: | 626 | if update_requirements: |
767 | 595 | if not requirements_dir: | 627 | if not requirements_dir: |
768 | 596 | error_out('requirements repo must be cloned before ' | 628 | error_out('requirements repo must be cloned before ' |
769 | 597 | 'updating from global requirements.') | 629 | 'updating from global requirements.') |
771 | 598 | _git_update_requirements(repo_dir, requirements_dir) | 630 | _git_update_requirements(venv, repo_dir, requirements_dir) |
772 | 599 | 631 | ||
773 | 600 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) | 632 | juju_log('Installing git repo from dir: {}'.format(repo_dir)) |
775 | 601 | pip_install(repo_dir) | 633 | if http_proxy: |
776 | 634 | pip_install(repo_dir, proxy=http_proxy, venv=venv) | ||
777 | 635 | else: | ||
778 | 636 | pip_install(repo_dir, venv=venv) | ||
779 | 602 | 637 | ||
780 | 603 | return repo_dir | 638 | return repo_dir |
781 | 604 | 639 | ||
782 | 605 | 640 | ||
784 | 606 | def _git_update_requirements(package_dir, reqs_dir): | 641 | def _git_update_requirements(venv, package_dir, reqs_dir): |
785 | 607 | """ | 642 | """ |
786 | 608 | Update from global requirements. | 643 | Update from global requirements. |
787 | 609 | 644 | ||
788 | @@ -612,25 +647,38 @@ | |||
789 | 612 | """ | 647 | """ |
790 | 613 | orig_dir = os.getcwd() | 648 | orig_dir = os.getcwd() |
791 | 614 | os.chdir(reqs_dir) | 649 | os.chdir(reqs_dir) |
793 | 615 | cmd = ['python', 'update.py', package_dir] | 650 | python = os.path.join(venv, 'bin/python') |
794 | 651 | cmd = [python, 'update.py', package_dir] | ||
795 | 616 | try: | 652 | try: |
796 | 617 | subprocess.check_call(cmd) | 653 | subprocess.check_call(cmd) |
797 | 618 | except subprocess.CalledProcessError: | 654 | except subprocess.CalledProcessError: |
798 | 619 | package = os.path.basename(package_dir) | 655 | package = os.path.basename(package_dir) |
800 | 620 | error_out("Error updating {} from global-requirements.txt".format(package)) | 656 | error_out("Error updating {} from " |
801 | 657 | "global-requirements.txt".format(package)) | ||
802 | 621 | os.chdir(orig_dir) | 658 | os.chdir(orig_dir) |
803 | 622 | 659 | ||
804 | 623 | 660 | ||
805 | 661 | def git_pip_venv_dir(projects_yaml): | ||
806 | 662 | """ | ||
807 | 663 | Return the pip virtualenv path. | ||
808 | 664 | """ | ||
809 | 665 | parent_dir = '/mnt/openstack-git' | ||
810 | 666 | |||
811 | 667 | projects = _git_yaml_load(projects_yaml) | ||
812 | 668 | |||
813 | 669 | if 'directory' in projects.keys(): | ||
814 | 670 | parent_dir = projects['directory'] | ||
815 | 671 | |||
816 | 672 | return os.path.join(parent_dir, 'venv') | ||
817 | 673 | |||
818 | 674 | |||
819 | 624 | def git_src_dir(projects_yaml, project): | 675 | def git_src_dir(projects_yaml, project): |
820 | 625 | """ | 676 | """ |
821 | 626 | Return the directory where the specified project's source is located. | 677 | Return the directory where the specified project's source is located. |
822 | 627 | """ | 678 | """ |
823 | 628 | parent_dir = '/mnt/openstack-git' | 679 | parent_dir = '/mnt/openstack-git' |
824 | 629 | 680 | ||
829 | 630 | if not projects_yaml: | 681 | projects = _git_yaml_load(projects_yaml) |
826 | 631 | return | ||
827 | 632 | |||
828 | 633 | projects = yaml.load(projects_yaml) | ||
830 | 634 | 682 | ||
831 | 635 | if 'directory' in projects.keys(): | 683 | if 'directory' in projects.keys(): |
832 | 636 | parent_dir = projects['directory'] | 684 | parent_dir = projects['directory'] |
833 | @@ -640,3 +688,15 @@ | |||
834 | 640 | return os.path.join(parent_dir, os.path.basename(p['repository'])) | 688 | return os.path.join(parent_dir, os.path.basename(p['repository'])) |
835 | 641 | 689 | ||
836 | 642 | return None | 690 | return None |
837 | 691 | |||
838 | 692 | |||
839 | 693 | def git_yaml_value(projects_yaml, key): | ||
840 | 694 | """ | ||
841 | 695 | Return the value in projects_yaml for the specified key. | ||
842 | 696 | """ | ||
843 | 697 | projects = _git_yaml_load(projects_yaml) | ||
844 | 698 | |||
845 | 699 | if key in projects.keys(): | ||
846 | 700 | return projects[key] | ||
847 | 701 | |||
848 | 702 | return None | ||
849 | 643 | 703 | ||
850 | === modified file 'hooks/charmhelpers/contrib/python/packages.py' | |||
851 | --- hooks/charmhelpers/contrib/python/packages.py 2015-03-20 17:15:02 +0000 | |||
852 | +++ hooks/charmhelpers/contrib/python/packages.py 2015-06-30 20:18:04 +0000 | |||
853 | @@ -17,8 +17,11 @@ | |||
854 | 17 | # You should have received a copy of the GNU Lesser General Public License | 17 | # You should have received a copy of the GNU Lesser General Public License |
855 | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 18 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
856 | 19 | 19 | ||
857 | 20 | import os | ||
858 | 21 | import subprocess | ||
859 | 22 | |||
860 | 20 | from charmhelpers.fetch import apt_install, apt_update | 23 | from charmhelpers.fetch import apt_install, apt_update |
862 | 21 | from charmhelpers.core.hookenv import log | 24 | from charmhelpers.core.hookenv import charm_dir, log |
863 | 22 | 25 | ||
864 | 23 | try: | 26 | try: |
865 | 24 | from pip import main as pip_execute | 27 | from pip import main as pip_execute |
866 | @@ -33,6 +36,8 @@ | |||
867 | 33 | def parse_options(given, available): | 36 | def parse_options(given, available): |
868 | 34 | """Given a set of options, check if available""" | 37 | """Given a set of options, check if available""" |
869 | 35 | for key, value in sorted(given.items()): | 38 | for key, value in sorted(given.items()): |
870 | 39 | if not value: | ||
871 | 40 | continue | ||
872 | 36 | if key in available: | 41 | if key in available: |
873 | 37 | yield "--{0}={1}".format(key, value) | 42 | yield "--{0}={1}".format(key, value) |
874 | 38 | 43 | ||
875 | @@ -51,11 +56,15 @@ | |||
876 | 51 | pip_execute(command) | 56 | pip_execute(command) |
877 | 52 | 57 | ||
878 | 53 | 58 | ||
880 | 54 | def pip_install(package, fatal=False, upgrade=False, **options): | 59 | def pip_install(package, fatal=False, upgrade=False, venv=None, **options): |
881 | 55 | """Install a python package""" | 60 | """Install a python package""" |
883 | 56 | command = ["install"] | 61 | if venv: |
884 | 62 | venv_python = os.path.join(venv, 'bin/pip') | ||
885 | 63 | command = [venv_python, "install"] | ||
886 | 64 | else: | ||
887 | 65 | command = ["install"] | ||
888 | 57 | 66 | ||
890 | 58 | available_options = ('proxy', 'src', 'log', "index-url", ) | 67 | available_options = ('proxy', 'src', 'log', 'index-url', ) |
891 | 59 | for option in parse_options(options, available_options): | 68 | for option in parse_options(options, available_options): |
892 | 60 | command.append(option) | 69 | command.append(option) |
893 | 61 | 70 | ||
894 | @@ -69,7 +78,10 @@ | |||
895 | 69 | 78 | ||
896 | 70 | log("Installing {} package with options: {}".format(package, | 79 | log("Installing {} package with options: {}".format(package, |
897 | 71 | command)) | 80 | command)) |
899 | 72 | pip_execute(command) | 81 | if venv: |
900 | 82 | subprocess.check_call(command) | ||
901 | 83 | else: | ||
902 | 84 | pip_execute(command) | ||
903 | 73 | 85 | ||
904 | 74 | 86 | ||
905 | 75 | def pip_uninstall(package, **options): | 87 | def pip_uninstall(package, **options): |
906 | @@ -94,3 +106,16 @@ | |||
907 | 94 | """Returns the list of current python installed packages | 106 | """Returns the list of current python installed packages |
908 | 95 | """ | 107 | """ |
909 | 96 | return pip_execute(["list"]) | 108 | return pip_execute(["list"]) |
910 | 109 | |||
911 | 110 | |||
912 | 111 | def pip_create_virtualenv(path=None): | ||
913 | 112 | """Create an isolated Python environment.""" | ||
914 | 113 | apt_install('python-virtualenv') | ||
915 | 114 | |||
916 | 115 | if path: | ||
917 | 116 | venv_path = path | ||
918 | 117 | else: | ||
919 | 118 | venv_path = os.path.join(charm_dir(), 'venv') | ||
920 | 119 | |||
921 | 120 | if not os.path.exists(venv_path): | ||
922 | 121 | subprocess.check_call(['virtualenv', venv_path]) | ||
923 | 97 | 122 | ||
924 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
925 | --- hooks/charmhelpers/core/hookenv.py 2015-04-16 19:53:49 +0000 | |||
926 | +++ hooks/charmhelpers/core/hookenv.py 2015-06-30 20:18:04 +0000 | |||
927 | @@ -21,12 +21,16 @@ | |||
928 | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> |
929 | 22 | 22 | ||
930 | 23 | from __future__ import print_function | 23 | from __future__ import print_function |
931 | 24 | from distutils.version import LooseVersion | ||
932 | 25 | from functools import wraps | ||
933 | 26 | import glob | ||
934 | 24 | import os | 27 | import os |
935 | 25 | import json | 28 | import json |
936 | 26 | import yaml | 29 | import yaml |
937 | 27 | import subprocess | 30 | import subprocess |
938 | 28 | import sys | 31 | import sys |
939 | 29 | import errno | 32 | import errno |
940 | 33 | import tempfile | ||
941 | 30 | from subprocess import CalledProcessError | 34 | from subprocess import CalledProcessError |
942 | 31 | 35 | ||
943 | 32 | import six | 36 | import six |
944 | @@ -58,15 +62,17 @@ | |||
945 | 58 | 62 | ||
946 | 59 | will cache the result of unit_get + 'test' for future calls. | 63 | will cache the result of unit_get + 'test' for future calls. |
947 | 60 | """ | 64 | """ |
948 | 65 | @wraps(func) | ||
949 | 61 | def wrapper(*args, **kwargs): | 66 | def wrapper(*args, **kwargs): |
950 | 62 | global cache | 67 | global cache |
951 | 63 | key = str((func, args, kwargs)) | 68 | key = str((func, args, kwargs)) |
952 | 64 | try: | 69 | try: |
953 | 65 | return cache[key] | 70 | return cache[key] |
954 | 66 | except KeyError: | 71 | except KeyError: |
958 | 67 | res = func(*args, **kwargs) | 72 | pass # Drop out of the exception handler scope. |
959 | 68 | cache[key] = res | 73 | res = func(*args, **kwargs) |
960 | 69 | return res | 74 | cache[key] = res |
961 | 75 | return res | ||
962 | 70 | return wrapper | 76 | return wrapper |
963 | 71 | 77 | ||
964 | 72 | 78 | ||
965 | @@ -178,7 +184,7 @@ | |||
966 | 178 | 184 | ||
967 | 179 | def remote_unit(): | 185 | def remote_unit(): |
968 | 180 | """The remote unit for the current relation hook""" | 186 | """The remote unit for the current relation hook""" |
970 | 181 | return os.environ['JUJU_REMOTE_UNIT'] | 187 | return os.environ.get('JUJU_REMOTE_UNIT', None) |
971 | 182 | 188 | ||
972 | 183 | 189 | ||
973 | 184 | def service_name(): | 190 | def service_name(): |
974 | @@ -238,23 +244,7 @@ | |||
975 | 238 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | 244 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
976 | 239 | if os.path.exists(self.path): | 245 | if os.path.exists(self.path): |
977 | 240 | self.load_previous() | 246 | self.load_previous() |
995 | 241 | 247 | atexit(self._implicit_save) | |
979 | 242 | def __getitem__(self, key): | ||
980 | 243 | """For regular dict lookups, check the current juju config first, | ||
981 | 244 | then the previous (saved) copy. This ensures that user-saved values | ||
982 | 245 | will be returned by a dict lookup. | ||
983 | 246 | |||
984 | 247 | """ | ||
985 | 248 | try: | ||
986 | 249 | return dict.__getitem__(self, key) | ||
987 | 250 | except KeyError: | ||
988 | 251 | return (self._prev_dict or {})[key] | ||
989 | 252 | |||
990 | 253 | def keys(self): | ||
991 | 254 | prev_keys = [] | ||
992 | 255 | if self._prev_dict is not None: | ||
993 | 256 | prev_keys = self._prev_dict.keys() | ||
994 | 257 | return list(set(prev_keys + list(dict.keys(self)))) | ||
996 | 258 | 248 | ||
997 | 259 | def load_previous(self, path=None): | 249 | def load_previous(self, path=None): |
998 | 260 | """Load previous copy of config from disk. | 250 | """Load previous copy of config from disk. |
999 | @@ -273,6 +263,9 @@ | |||
1000 | 273 | self.path = path or self.path | 263 | self.path = path or self.path |
1001 | 274 | with open(self.path) as f: | 264 | with open(self.path) as f: |
1002 | 275 | self._prev_dict = json.load(f) | 265 | self._prev_dict = json.load(f) |
1003 | 266 | for k, v in self._prev_dict.items(): | ||
1004 | 267 | if k not in self: | ||
1005 | 268 | self[k] = v | ||
1006 | 276 | 269 | ||
1007 | 277 | def changed(self, key): | 270 | def changed(self, key): |
1008 | 278 | """Return True if the current value for this key is different from | 271 | """Return True if the current value for this key is different from |
1009 | @@ -304,13 +297,13 @@ | |||
1010 | 304 | instance. | 297 | instance. |
1011 | 305 | 298 | ||
1012 | 306 | """ | 299 | """ |
1013 | 307 | if self._prev_dict: | ||
1014 | 308 | for k, v in six.iteritems(self._prev_dict): | ||
1015 | 309 | if k not in self: | ||
1016 | 310 | self[k] = v | ||
1017 | 311 | with open(self.path, 'w') as f: | 300 | with open(self.path, 'w') as f: |
1018 | 312 | json.dump(self, f) | 301 | json.dump(self, f) |
1019 | 313 | 302 | ||
1020 | 303 | def _implicit_save(self): | ||
1021 | 304 | if self.implicit_save: | ||
1022 | 305 | self.save() | ||
1023 | 306 | |||
1024 | 314 | 307 | ||
1025 | 315 | @cached | 308 | @cached |
1026 | 316 | def config(scope=None): | 309 | def config(scope=None): |
1027 | @@ -353,18 +346,49 @@ | |||
1028 | 353 | """Set relation information for the current unit""" | 346 | """Set relation information for the current unit""" |
1029 | 354 | relation_settings = relation_settings if relation_settings else {} | 347 | relation_settings = relation_settings if relation_settings else {} |
1030 | 355 | relation_cmd_line = ['relation-set'] | 348 | relation_cmd_line = ['relation-set'] |
1031 | 349 | accepts_file = "--file" in subprocess.check_output( | ||
1032 | 350 | relation_cmd_line + ["--help"], universal_newlines=True) | ||
1033 | 356 | if relation_id is not None: | 351 | if relation_id is not None: |
1034 | 357 | relation_cmd_line.extend(('-r', relation_id)) | 352 | relation_cmd_line.extend(('-r', relation_id)) |
1041 | 358 | for k, v in (list(relation_settings.items()) + list(kwargs.items())): | 353 | settings = relation_settings.copy() |
1042 | 359 | if v is None: | 354 | settings.update(kwargs) |
1043 | 360 | relation_cmd_line.append('{}='.format(k)) | 355 | for key, value in settings.items(): |
1044 | 361 | else: | 356 | # Force value to be a string: it always should, but some call |
1045 | 362 | relation_cmd_line.append('{}={}'.format(k, v)) | 357 | # sites pass in things like dicts or numbers. |
1046 | 363 | subprocess.check_call(relation_cmd_line) | 358 | if value is not None: |
1047 | 359 | settings[key] = "{}".format(value) | ||
1048 | 360 | if accepts_file: | ||
1049 | 361 | # --file was introduced in Juju 1.23.2. Use it by default if | ||
1050 | 362 | # available, since otherwise we'll break if the relation data is | ||
1051 | 363 | # too big. Ideally we should tell relation-set to read the data from | ||
1052 | 364 | # stdin, but that feature is broken in 1.23.2: Bug #1454678. | ||
1053 | 365 | with tempfile.NamedTemporaryFile(delete=False) as settings_file: | ||
1054 | 366 | settings_file.write(yaml.safe_dump(settings).encode("utf-8")) | ||
1055 | 367 | subprocess.check_call( | ||
1056 | 368 | relation_cmd_line + ["--file", settings_file.name]) | ||
1057 | 369 | os.remove(settings_file.name) | ||
1058 | 370 | else: | ||
1059 | 371 | for key, value in settings.items(): | ||
1060 | 372 | if value is None: | ||
1061 | 373 | relation_cmd_line.append('{}='.format(key)) | ||
1062 | 374 | else: | ||
1063 | 375 | relation_cmd_line.append('{}={}'.format(key, value)) | ||
1064 | 376 | subprocess.check_call(relation_cmd_line) | ||
1065 | 364 | # Flush cache of any relation-gets for local unit | 377 | # Flush cache of any relation-gets for local unit |
1066 | 365 | flush(local_unit()) | 378 | flush(local_unit()) |
1067 | 366 | 379 | ||
1068 | 367 | 380 | ||
1069 | 381 | def relation_clear(r_id=None): | ||
1070 | 382 | ''' Clears any relation data already set on relation r_id ''' | ||
1071 | 383 | settings = relation_get(rid=r_id, | ||
1072 | 384 | unit=local_unit()) | ||
1073 | 385 | for setting in settings: | ||
1074 | 386 | if setting not in ['public-address', 'private-address']: | ||
1075 | 387 | settings[setting] = None | ||
1076 | 388 | relation_set(relation_id=r_id, | ||
1077 | 389 | **settings) | ||
1078 | 390 | |||
1079 | 391 | |||
1080 | 368 | @cached | 392 | @cached |
1081 | 369 | def relation_ids(reltype=None): | 393 | def relation_ids(reltype=None): |
1082 | 370 | """A list of relation_ids""" | 394 | """A list of relation_ids""" |
1083 | @@ -509,6 +533,11 @@ | |||
1084 | 509 | return None | 533 | return None |
1085 | 510 | 534 | ||
1086 | 511 | 535 | ||
1087 | 536 | def unit_public_ip(): | ||
1088 | 537 | """Get this unit's public IP address""" | ||
1089 | 538 | return unit_get('public-address') | ||
1090 | 539 | |||
1091 | 540 | |||
1092 | 512 | def unit_private_ip(): | 541 | def unit_private_ip(): |
1093 | 513 | """Get this unit's private IP address""" | 542 | """Get this unit's private IP address""" |
1094 | 514 | return unit_get('private-address') | 543 | return unit_get('private-address') |
1095 | @@ -541,10 +570,14 @@ | |||
1096 | 541 | hooks.execute(sys.argv) | 570 | hooks.execute(sys.argv) |
1097 | 542 | """ | 571 | """ |
1098 | 543 | 572 | ||
1100 | 544 | def __init__(self, config_save=True): | 573 | def __init__(self, config_save=None): |
1101 | 545 | super(Hooks, self).__init__() | 574 | super(Hooks, self).__init__() |
1102 | 546 | self._hooks = {} | 575 | self._hooks = {} |
1104 | 547 | self._config_save = config_save | 576 | |
1105 | 577 | # For unknown reasons, we allow the Hooks constructor to override | ||
1106 | 578 | # config().implicit_save. | ||
1107 | 579 | if config_save is not None: | ||
1108 | 580 | config().implicit_save = config_save | ||
1109 | 548 | 581 | ||
1110 | 549 | def register(self, name, function): | 582 | def register(self, name, function): |
1111 | 550 | """Register a hook""" | 583 | """Register a hook""" |
1112 | @@ -552,13 +585,16 @@ | |||
1113 | 552 | 585 | ||
1114 | 553 | def execute(self, args): | 586 | def execute(self, args): |
1115 | 554 | """Execute a registered hook based on args[0]""" | 587 | """Execute a registered hook based on args[0]""" |
1116 | 588 | _run_atstart() | ||
1117 | 555 | hook_name = os.path.basename(args[0]) | 589 | hook_name = os.path.basename(args[0]) |
1118 | 556 | if hook_name in self._hooks: | 590 | if hook_name in self._hooks: |
1124 | 557 | self._hooks[hook_name]() | 591 | try: |
1125 | 558 | if self._config_save: | 592 | self._hooks[hook_name]() |
1126 | 559 | cfg = config() | 593 | except SystemExit as x: |
1127 | 560 | if cfg.implicit_save: | 594 | if x.code is None or x.code == 0: |
1128 | 561 | cfg.save() | 595 | _run_atexit() |
1129 | 596 | raise | ||
1130 | 597 | _run_atexit() | ||
1131 | 562 | else: | 598 | else: |
1132 | 563 | raise UnregisteredHookError(hook_name) | 599 | raise UnregisteredHookError(hook_name) |
1133 | 564 | 600 | ||
1134 | @@ -605,3 +641,160 @@ | |||
1135 | 605 | 641 | ||
1136 | 606 | The results set by action_set are preserved.""" | 642 | The results set by action_set are preserved.""" |
1137 | 607 | subprocess.check_call(['action-fail', message]) | 643 | subprocess.check_call(['action-fail', message]) |
1138 | 644 | |||
1139 | 645 | |||
1140 | 646 | def status_set(workload_state, message): | ||
1141 | 647 | """Set the workload state with a message | ||
1142 | 648 | |||
1143 | 649 | Use status-set to set the workload state with a message which is visible | ||
1144 | 650 | to the user via juju status. If the status-set command is not found then | ||
1145 | 651 | assume this is juju < 1.23 and juju-log the message unstead. | ||
1146 | 652 | |||
1147 | 653 | workload_state -- valid juju workload state. | ||
1148 | 654 | message -- status update message | ||
1149 | 655 | """ | ||
1150 | 656 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] | ||
1151 | 657 | if workload_state not in valid_states: | ||
1152 | 658 | raise ValueError( | ||
1153 | 659 | '{!r} is not a valid workload state'.format(workload_state) | ||
1154 | 660 | ) | ||
1155 | 661 | cmd = ['status-set', workload_state, message] | ||
1156 | 662 | try: | ||
1157 | 663 | ret = subprocess.call(cmd) | ||
1158 | 664 | if ret == 0: | ||
1159 | 665 | return | ||
1160 | 666 | except OSError as e: | ||
1161 | 667 | if e.errno != errno.ENOENT: | ||
1162 | 668 | raise | ||
1163 | 669 | log_message = 'status-set failed: {} {}'.format(workload_state, | ||
1164 | 670 | message) | ||
1165 | 671 | log(log_message, level='INFO') | ||
1166 | 672 | |||
1167 | 673 | |||
1168 | 674 | def status_get(): | ||
1169 | 675 | """Retrieve the previously set juju workload state | ||
1170 | 676 | |||
1171 | 677 | If the status-set command is not found then assume this is juju < 1.23 and | ||
1172 | 678 | return 'unknown' | ||
1173 | 679 | """ | ||
1174 | 680 | cmd = ['status-get'] | ||
1175 | 681 | try: | ||
1176 | 682 | raw_status = subprocess.check_output(cmd, universal_newlines=True) | ||
1177 | 683 | status = raw_status.rstrip() | ||
1178 | 684 | return status | ||
1179 | 685 | except OSError as e: | ||
1180 | 686 | if e.errno == errno.ENOENT: | ||
1181 | 687 | return 'unknown' | ||
1182 | 688 | else: | ||
1183 | 689 | raise | ||
1184 | 690 | |||
1185 | 691 | |||
1186 | 692 | def translate_exc(from_exc, to_exc): | ||
1187 | 693 | def inner_translate_exc1(f): | ||
1188 | 694 | def inner_translate_exc2(*args, **kwargs): | ||
1189 | 695 | try: | ||
1190 | 696 | return f(*args, **kwargs) | ||
1191 | 697 | except from_exc: | ||
1192 | 698 | raise to_exc | ||
1193 | 699 | |||
1194 | 700 | return inner_translate_exc2 | ||
1195 | 701 | |||
1196 | 702 | return inner_translate_exc1 | ||
1197 | 703 | |||
1198 | 704 | |||
1199 | 705 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1200 | 706 | def is_leader(): | ||
1201 | 707 | """Does the current unit hold the juju leadership | ||
1202 | 708 | |||
1203 | 709 | Uses juju to determine whether the current unit is the leader of its peers | ||
1204 | 710 | """ | ||
1205 | 711 | cmd = ['is-leader', '--format=json'] | ||
1206 | 712 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1207 | 713 | |||
1208 | 714 | |||
1209 | 715 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1210 | 716 | def leader_get(attribute=None): | ||
1211 | 717 | """Juju leader get value(s)""" | ||
1212 | 718 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] | ||
1213 | 719 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
1214 | 720 | |||
1215 | 721 | |||
1216 | 722 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
1217 | 723 | def leader_set(settings=None, **kwargs): | ||
1218 | 724 | """Juju leader set value(s)""" | ||
1219 | 725 | # Don't log secrets. | ||
1220 | 726 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
1221 | 727 | cmd = ['leader-set'] | ||
1222 | 728 | settings = settings or {} | ||
1223 | 729 | settings.update(kwargs) | ||
1224 | 730 | for k, v in settings.items(): | ||
1225 | 731 | if v is None: | ||
1226 | 732 | cmd.append('{}='.format(k)) | ||
1227 | 733 | else: | ||
1228 | 734 | cmd.append('{}={}'.format(k, v)) | ||
1229 | 735 | subprocess.check_call(cmd) | ||
1230 | 736 | |||
1231 | 737 | |||
1232 | 738 | @cached | ||
1233 | 739 | def juju_version(): | ||
1234 | 740 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
1235 | 741 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
1236 | 742 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
1237 | 743 | return subprocess.check_output([jujud, 'version'], | ||
1238 | 744 | universal_newlines=True).strip() | ||
1239 | 745 | |||
1240 | 746 | |||
1241 | 747 | @cached | ||
1242 | 748 | def has_juju_version(minimum_version): | ||
1243 | 749 | """Return True if the Juju version is at least the provided version""" | ||
1244 | 750 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
1245 | 751 | |||
1246 | 752 | |||
1247 | 753 | _atexit = [] | ||
1248 | 754 | _atstart = [] | ||
1249 | 755 | |||
1250 | 756 | |||
1251 | 757 | def atstart(callback, *args, **kwargs): | ||
1252 | 758 | '''Schedule a callback to run before the main hook. | ||
1253 | 759 | |||
1254 | 760 | Callbacks are run in the order they were added. | ||
1255 | 761 | |||
1256 | 762 | This is useful for modules and classes to perform initialization | ||
1257 | 763 | and inject behavior. In particular: | ||
1258 | 764 | - Run common code before all of your hooks, such as logging | ||
1259 | 765 | the hook name or interesting relation data. | ||
1260 | 766 | - Defer object or module initialization that requires a hook | ||
1261 | 767 | context until we know there actually is a hook context, | ||
1262 | 768 | making testing easier. | ||
1263 | 769 | - Rather than requiring charm authors to include boilerplate to | ||
1264 | 770 | invoke your helper's behavior, have it run automatically if | ||
1265 | 771 | your object is instantiated or module imported. | ||
1266 | 772 | |||
1267 | 773 | This is not at all useful after your hook framework as been launched. | ||
1268 | 774 | ''' | ||
1269 | 775 | global _atstart | ||
1270 | 776 | _atstart.append((callback, args, kwargs)) | ||
1271 | 777 | |||
1272 | 778 | |||
1273 | 779 | def atexit(callback, *args, **kwargs): | ||
1274 | 780 | '''Schedule a callback to run on successful hook completion. | ||
1275 | 781 | |||
1276 | 782 | Callbacks are run in the reverse order that they were added.''' | ||
1277 | 783 | _atexit.append((callback, args, kwargs)) | ||
1278 | 784 | |||
1279 | 785 | |||
1280 | 786 | def _run_atstart(): | ||
1281 | 787 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
1282 | 788 | global _atstart | ||
1283 | 789 | for callback, args, kwargs in _atstart: | ||
1284 | 790 | callback(*args, **kwargs) | ||
1285 | 791 | del _atstart[:] | ||
1286 | 792 | |||
1287 | 793 | |||
1288 | 794 | def _run_atexit(): | ||
1289 | 795 | '''Hook frameworks must invoke this after the main hook body has | ||
1290 | 796 | successfully completed. Do not invoke it if the hook fails.''' | ||
1291 | 797 | global _atexit | ||
1292 | 798 | for callback, args, kwargs in reversed(_atexit): | ||
1293 | 799 | callback(*args, **kwargs) | ||
1294 | 800 | del _atexit[:] | ||
1295 | 608 | 801 | ||
1296 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1297 | --- hooks/charmhelpers/core/host.py 2015-03-20 17:15:02 +0000 | |||
1298 | +++ hooks/charmhelpers/core/host.py 2015-06-30 20:18:04 +0000 | |||
1299 | @@ -24,6 +24,7 @@ | |||
1300 | 24 | import os | 24 | import os |
1301 | 25 | import re | 25 | import re |
1302 | 26 | import pwd | 26 | import pwd |
1303 | 27 | import glob | ||
1304 | 27 | import grp | 28 | import grp |
1305 | 28 | import random | 29 | import random |
1306 | 29 | import string | 30 | import string |
1307 | @@ -90,7 +91,7 @@ | |||
1308 | 90 | ['service', service_name, 'status'], | 91 | ['service', service_name, 'status'], |
1309 | 91 | stderr=subprocess.STDOUT).decode('UTF-8') | 92 | stderr=subprocess.STDOUT).decode('UTF-8') |
1310 | 92 | except subprocess.CalledProcessError as e: | 93 | except subprocess.CalledProcessError as e: |
1312 | 93 | return 'unrecognized service' not in e.output | 94 | return b'unrecognized service' not in e.output |
1313 | 94 | else: | 95 | else: |
1314 | 95 | return True | 96 | return True |
1315 | 96 | 97 | ||
1316 | @@ -269,6 +270,21 @@ | |||
1317 | 269 | return None | 270 | return None |
1318 | 270 | 271 | ||
1319 | 271 | 272 | ||
1320 | 273 | def path_hash(path): | ||
1321 | 274 | """ | ||
1322 | 275 | Generate a hash checksum of all files matching 'path'. Standard wildcards | ||
1323 | 276 | like '*' and '?' are supported, see documentation for the 'glob' module for | ||
1324 | 277 | more information. | ||
1325 | 278 | |||
1326 | 279 | :return: dict: A { filename: hash } dictionary for all matched files. | ||
1327 | 280 | Empty if none found. | ||
1328 | 281 | """ | ||
1329 | 282 | return { | ||
1330 | 283 | filename: file_hash(filename) | ||
1331 | 284 | for filename in glob.iglob(path) | ||
1332 | 285 | } | ||
1333 | 286 | |||
1334 | 287 | |||
1335 | 272 | def check_hash(path, checksum, hash_type='md5'): | 288 | def check_hash(path, checksum, hash_type='md5'): |
1336 | 273 | """ | 289 | """ |
1337 | 274 | Validate a file using a cryptographic checksum. | 290 | Validate a file using a cryptographic checksum. |
1338 | @@ -296,23 +312,25 @@ | |||
1339 | 296 | 312 | ||
1340 | 297 | @restart_on_change({ | 313 | @restart_on_change({ |
1341 | 298 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 314 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
1342 | 315 | '/etc/apache/sites-enabled/*': [ 'apache2' ] | ||
1343 | 299 | }) | 316 | }) |
1345 | 300 | def ceph_client_changed(): | 317 | def config_changed(): |
1346 | 301 | pass # your code here | 318 | pass # your code here |
1347 | 302 | 319 | ||
1348 | 303 | In this example, the cinder-api and cinder-volume services | 320 | In this example, the cinder-api and cinder-volume services |
1349 | 304 | would be restarted if /etc/ceph/ceph.conf is changed by the | 321 | would be restarted if /etc/ceph/ceph.conf is changed by the |
1351 | 305 | ceph_client_changed function. | 322 | ceph_client_changed function. The apache2 service would be |
1352 | 323 | restarted if any file matching the pattern got changed, created | ||
1353 | 324 | or removed. Standard wildcards are supported, see documentation | ||
1354 | 325 | for the 'glob' module for more information. | ||
1355 | 306 | """ | 326 | """ |
1356 | 307 | def wrap(f): | 327 | def wrap(f): |
1357 | 308 | def wrapped_f(*args, **kwargs): | 328 | def wrapped_f(*args, **kwargs): |
1361 | 309 | checksums = {} | 329 | checksums = {path: path_hash(path) for path in restart_map} |
1359 | 310 | for path in restart_map: | ||
1360 | 311 | checksums[path] = file_hash(path) | ||
1362 | 312 | f(*args, **kwargs) | 330 | f(*args, **kwargs) |
1363 | 313 | restarts = [] | 331 | restarts = [] |
1364 | 314 | for path in restart_map: | 332 | for path in restart_map: |
1366 | 315 | if checksums[path] != file_hash(path): | 333 | if path_hash(path) != checksums[path]: |
1367 | 316 | restarts += restart_map[path] | 334 | restarts += restart_map[path] |
1368 | 317 | services_list = list(OrderedDict.fromkeys(restarts)) | 335 | services_list = list(OrderedDict.fromkeys(restarts)) |
1369 | 318 | if not stopstart: | 336 | if not stopstart: |
1370 | 319 | 337 | ||
1371 | === modified file 'hooks/charmhelpers/core/services/base.py' | |||
1372 | --- hooks/charmhelpers/core/services/base.py 2015-03-20 17:15:02 +0000 | |||
1373 | +++ hooks/charmhelpers/core/services/base.py 2015-06-30 20:18:04 +0000 | |||
1374 | @@ -15,9 +15,9 @@ | |||
1375 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
1376 | 16 | 16 | ||
1377 | 17 | import os | 17 | import os |
1378 | 18 | import re | ||
1379 | 19 | import json | 18 | import json |
1381 | 20 | from collections import Iterable | 19 | from inspect import getargspec |
1382 | 20 | from collections import Iterable, OrderedDict | ||
1383 | 21 | 21 | ||
1384 | 22 | from charmhelpers.core import host | 22 | from charmhelpers.core import host |
1385 | 23 | from charmhelpers.core import hookenv | 23 | from charmhelpers.core import hookenv |
1386 | @@ -119,7 +119,7 @@ | |||
1387 | 119 | """ | 119 | """ |
1388 | 120 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') | 120 | self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') |
1389 | 121 | self._ready = None | 121 | self._ready = None |
1391 | 122 | self.services = {} | 122 | self.services = OrderedDict() |
1392 | 123 | for service in services or []: | 123 | for service in services or []: |
1393 | 124 | service_name = service['service'] | 124 | service_name = service['service'] |
1394 | 125 | self.services[service_name] = service | 125 | self.services[service_name] = service |
1395 | @@ -128,15 +128,18 @@ | |||
1396 | 128 | """ | 128 | """ |
1397 | 129 | Handle the current hook by doing The Right Thing with the registered services. | 129 | Handle the current hook by doing The Right Thing with the registered services. |
1398 | 130 | """ | 130 | """ |
1408 | 131 | hook_name = hookenv.hook_name() | 131 | hookenv._run_atstart() |
1409 | 132 | if hook_name == 'stop': | 132 | try: |
1410 | 133 | self.stop_services() | 133 | hook_name = hookenv.hook_name() |
1411 | 134 | else: | 134 | if hook_name == 'stop': |
1412 | 135 | self.provide_data() | 135 | self.stop_services() |
1413 | 136 | self.reconfigure_services() | 136 | else: |
1414 | 137 | cfg = hookenv.config() | 137 | self.reconfigure_services() |
1415 | 138 | if cfg.implicit_save: | 138 | self.provide_data() |
1416 | 139 | cfg.save() | 139 | except SystemExit as x: |
1417 | 140 | if x.code is None or x.code == 0: | ||
1418 | 141 | hookenv._run_atexit() | ||
1419 | 142 | hookenv._run_atexit() | ||
1420 | 140 | 143 | ||
1421 | 141 | def provide_data(self): | 144 | def provide_data(self): |
1422 | 142 | """ | 145 | """ |
1423 | @@ -145,15 +148,36 @@ | |||
1424 | 145 | A provider must have a `name` attribute, which indicates which relation | 148 | A provider must have a `name` attribute, which indicates which relation |
1425 | 146 | to set data on, and a `provide_data()` method, which returns a dict of | 149 | to set data on, and a `provide_data()` method, which returns a dict of |
1426 | 147 | data to set. | 150 | data to set. |
1427 | 151 | |||
1428 | 152 | The `provide_data()` method can optionally accept two parameters: | ||
1429 | 153 | |||
1430 | 154 | * ``remote_service`` The name of the remote service that the data will | ||
1431 | 155 | be provided to. The `provide_data()` method will be called once | ||
1432 | 156 | for each connected service (not unit). This allows the method to | ||
1433 | 157 | tailor its data to the given service. | ||
1434 | 158 | * ``service_ready`` Whether or not the service definition had all of | ||
1435 | 159 | its requirements met, and thus the ``data_ready`` callbacks run. | ||
1436 | 160 | |||
1437 | 161 | Note that the ``provided_data`` methods are now called **after** the | ||
1438 | 162 | ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks | ||
1439 | 163 | a chance to generate any data necessary for the providing to the remote | ||
1440 | 164 | services. | ||
1441 | 148 | """ | 165 | """ |
1444 | 149 | hook_name = hookenv.hook_name() | 166 | for service_name, service in self.services.items(): |
1445 | 150 | for service in self.services.values(): | 167 | service_ready = self.is_ready(service_name) |
1446 | 151 | for provider in service.get('provided_data', []): | 168 | for provider in service.get('provided_data', []): |
1452 | 152 | if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): | 169 | for relid in hookenv.relation_ids(provider.name): |
1453 | 153 | data = provider.provide_data() | 170 | units = hookenv.related_units(relid) |
1454 | 154 | _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data | 171 | if not units: |
1455 | 155 | if _ready: | 172 | continue |
1456 | 156 | hookenv.relation_set(None, data) | 173 | remote_service = units[0].split('/')[0] |
1457 | 174 | argspec = getargspec(provider.provide_data) | ||
1458 | 175 | if len(argspec.args) > 1: | ||
1459 | 176 | data = provider.provide_data(remote_service, service_ready) | ||
1460 | 177 | else: | ||
1461 | 178 | data = provider.provide_data() | ||
1462 | 179 | if data: | ||
1463 | 180 | hookenv.relation_set(relid, data) | ||
1464 | 157 | 181 | ||
1465 | 158 | def reconfigure_services(self, *service_names): | 182 | def reconfigure_services(self, *service_names): |
1466 | 159 | """ | 183 | """ |
1467 | 160 | 184 | ||
1468 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1469 | --- hooks/charmhelpers/fetch/__init__.py 2015-03-20 17:15:02 +0000 | |||
1470 | +++ hooks/charmhelpers/fetch/__init__.py 2015-06-30 20:18:04 +0000 | |||
1471 | @@ -158,7 +158,7 @@ | |||
1472 | 158 | 158 | ||
1473 | 159 | def apt_cache(in_memory=True): | 159 | def apt_cache(in_memory=True): |
1474 | 160 | """Build and return an apt cache""" | 160 | """Build and return an apt cache""" |
1476 | 161 | import apt_pkg | 161 | from apt import apt_pkg |
1477 | 162 | apt_pkg.init() | 162 | apt_pkg.init() |
1478 | 163 | if in_memory: | 163 | if in_memory: |
1479 | 164 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | 164 | apt_pkg.config.set("Dir::Cache::pkgcache", "") |
1480 | 165 | 165 | ||
1481 | === modified file 'hooks/charmhelpers/fetch/giturl.py' | |||
1482 | --- hooks/charmhelpers/fetch/giturl.py 2015-03-20 17:15:02 +0000 | |||
1483 | +++ hooks/charmhelpers/fetch/giturl.py 2015-06-30 20:18:04 +0000 | |||
1484 | @@ -45,14 +45,16 @@ | |||
1485 | 45 | else: | 45 | else: |
1486 | 46 | return True | 46 | return True |
1487 | 47 | 47 | ||
1489 | 48 | def clone(self, source, dest, branch): | 48 | def clone(self, source, dest, branch, depth=None): |
1490 | 49 | if not self.can_handle(source): | 49 | if not self.can_handle(source): |
1491 | 50 | raise UnhandledSource("Cannot handle {}".format(source)) | 50 | raise UnhandledSource("Cannot handle {}".format(source)) |
1492 | 51 | 51 | ||
1495 | 52 | repo = Repo.clone_from(source, dest) | 52 | if depth: |
1496 | 53 | repo.git.checkout(branch) | 53 | Repo.clone_from(source, dest, branch=branch, depth=depth) |
1497 | 54 | else: | ||
1498 | 55 | Repo.clone_from(source, dest, branch=branch) | ||
1499 | 54 | 56 | ||
1501 | 55 | def install(self, source, branch="master", dest=None): | 57 | def install(self, source, branch="master", dest=None, depth=None): |
1502 | 56 | url_parts = self.parse_url(source) | 58 | url_parts = self.parse_url(source) |
1503 | 57 | branch_name = url_parts.path.strip("/").split("/")[-1] | 59 | branch_name = url_parts.path.strip("/").split("/")[-1] |
1504 | 58 | if dest: | 60 | if dest: |
1505 | @@ -63,7 +65,7 @@ | |||
1506 | 63 | if not os.path.exists(dest_dir): | 65 | if not os.path.exists(dest_dir): |
1507 | 64 | mkdir(dest_dir, perms=0o755) | 66 | mkdir(dest_dir, perms=0o755) |
1508 | 65 | try: | 67 | try: |
1510 | 66 | self.clone(source, dest_dir, branch) | 68 | self.clone(source, dest_dir, branch, depth) |
1511 | 67 | except GitCommandError as e: | 69 | except GitCommandError as e: |
1512 | 68 | raise UnhandledSource(e.message) | 70 | raise UnhandledSource(e.message) |
1513 | 69 | except OSError as e: | 71 | except OSError as e: |
1514 | 70 | 72 | ||
1515 | === modified file 'hooks/glance_relations.py' | |||
1516 | --- hooks/glance_relations.py 2015-05-01 11:37:27 +0000 | |||
1517 | +++ hooks/glance_relations.py 2015-06-30 20:18:04 +0000 | |||
1518 | @@ -53,7 +53,7 @@ | |||
1519 | 53 | filter_installed_packages | 53 | filter_installed_packages |
1520 | 54 | ) | 54 | ) |
1521 | 55 | from charmhelpers.contrib.hahelpers.cluster import ( | 55 | from charmhelpers.contrib.hahelpers.cluster import ( |
1523 | 56 | eligible_leader, | 56 | is_elected_leader, |
1524 | 57 | get_hacluster_config | 57 | get_hacluster_config |
1525 | 58 | ) | 58 | ) |
1526 | 59 | from charmhelpers.contrib.openstack.utils import ( | 59 | from charmhelpers.contrib.openstack.utils import ( |
1527 | @@ -160,7 +160,7 @@ | |||
1528 | 160 | if rel != "essex": | 160 | if rel != "essex": |
1529 | 161 | CONFIGS.write(GLANCE_API_CONF) | 161 | CONFIGS.write(GLANCE_API_CONF) |
1530 | 162 | 162 | ||
1532 | 163 | if eligible_leader(CLUSTER_RES): | 163 | if is_elected_leader(CLUSTER_RES): |
1533 | 164 | # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units | 164 | # Bugs 1353135 & 1187508. Dbs can appear to be ready before the units |
1534 | 165 | # acl entry has been added. So, if the db supports passing a list of | 165 | # acl entry has been added. So, if the db supports passing a list of |
1535 | 166 | # permitted units then check if we're in the list. | 166 | # permitted units then check if we're in the list. |
1536 | @@ -194,7 +194,7 @@ | |||
1537 | 194 | if rel != "essex": | 194 | if rel != "essex": |
1538 | 195 | CONFIGS.write(GLANCE_API_CONF) | 195 | CONFIGS.write(GLANCE_API_CONF) |
1539 | 196 | 196 | ||
1541 | 197 | if eligible_leader(CLUSTER_RES): | 197 | if is_elected_leader(CLUSTER_RES): |
1542 | 198 | if rel == "essex": | 198 | if rel == "essex": |
1543 | 199 | status = call(['glance-manage', 'db_version']) | 199 | status = call(['glance-manage', 'db_version']) |
1544 | 200 | if status != 0: | 200 | if status != 0: |
1545 | 201 | 201 | ||
1546 | === modified file 'hooks/glance_utils.py' | |||
1547 | --- hooks/glance_utils.py 2015-04-17 12:05:48 +0000 | |||
1548 | +++ hooks/glance_utils.py 2015-06-30 20:18:04 +0000 | |||
1549 | @@ -14,6 +14,10 @@ | |||
1550 | 14 | apt_install, | 14 | apt_install, |
1551 | 15 | add_source) | 15 | add_source) |
1552 | 16 | 16 | ||
1553 | 17 | from charmhelpers.contrib.python.packages import ( | ||
1554 | 18 | pip_install, | ||
1555 | 19 | ) | ||
1556 | 20 | |||
1557 | 17 | from charmhelpers.core.hookenv import ( | 21 | from charmhelpers.core.hookenv import ( |
1558 | 18 | charm_dir, | 22 | charm_dir, |
1559 | 19 | config, | 23 | config, |
1560 | @@ -38,7 +42,7 @@ | |||
1561 | 38 | context,) | 42 | context,) |
1562 | 39 | 43 | ||
1563 | 40 | from charmhelpers.contrib.hahelpers.cluster import ( | 44 | from charmhelpers.contrib.hahelpers.cluster import ( |
1565 | 41 | eligible_leader, | 45 | is_elected_leader, |
1566 | 42 | ) | 46 | ) |
1567 | 43 | 47 | ||
1568 | 44 | from charmhelpers.contrib.openstack.alternatives import install_alternative | 48 | from charmhelpers.contrib.openstack.alternatives import install_alternative |
1569 | @@ -47,12 +51,18 @@ | |||
1570 | 47 | git_install_requested, | 51 | git_install_requested, |
1571 | 48 | git_clone_and_install, | 52 | git_clone_and_install, |
1572 | 49 | git_src_dir, | 53 | git_src_dir, |
1573 | 54 | git_yaml_value, | ||
1574 | 55 | git_pip_venv_dir, | ||
1575 | 50 | configure_installation_source, | 56 | configure_installation_source, |
1576 | 51 | os_release, | 57 | os_release, |
1577 | 52 | ) | 58 | ) |
1578 | 53 | 59 | ||
1579 | 54 | from charmhelpers.core.templating import render | 60 | from charmhelpers.core.templating import render |
1580 | 55 | 61 | ||
1581 | 62 | from charmhelpers.core.decorators import ( | ||
1582 | 63 | retry_on_exception, | ||
1583 | 64 | ) | ||
1584 | 65 | |||
1585 | 56 | CLUSTER_RES = "grp_glance_vips" | 66 | CLUSTER_RES = "grp_glance_vips" |
1586 | 57 | 67 | ||
1587 | 58 | PACKAGES = [ | 68 | PACKAGES = [ |
1588 | @@ -60,8 +70,12 @@ | |||
1589 | 60 | "python-psycopg2", "python-keystone", "python-six", "uuid", "haproxy", ] | 70 | "python-psycopg2", "python-keystone", "python-six", "uuid", "haproxy", ] |
1590 | 61 | 71 | ||
1591 | 62 | BASE_GIT_PACKAGES = [ | 72 | BASE_GIT_PACKAGES = [ |
1592 | 73 | 'libffi-dev', | ||
1593 | 74 | 'libmysqlclient-dev', | ||
1594 | 63 | 'libxml2-dev', | 75 | 'libxml2-dev', |
1595 | 64 | 'libxslt1-dev', | 76 | 'libxslt1-dev', |
1596 | 77 | 'libssl-dev', | ||
1597 | 78 | 'libyaml-dev', | ||
1598 | 65 | 'python-dev', | 79 | 'python-dev', |
1599 | 66 | 'python-pip', | 80 | 'python-pip', |
1600 | 67 | 'python-setuptools', | 81 | 'python-setuptools', |
1601 | @@ -209,6 +223,9 @@ | |||
1602 | 209 | return configs | 223 | return configs |
1603 | 210 | 224 | ||
1604 | 211 | 225 | ||
1605 | 226 | # NOTE(jamespage): Retry deals with sync issues during one-shot HA deploys. | ||
1606 | 227 | # mysql might be restarting or suchlike. | ||
1607 | 228 | @retry_on_exception(5, base_delay=3, exc_type=subprocess.CalledProcessError) | ||
1608 | 212 | def determine_packages(): | 229 | def determine_packages(): |
1609 | 213 | packages = [] + PACKAGES | 230 | packages = [] + PACKAGES |
1610 | 214 | 231 | ||
1611 | @@ -256,7 +273,7 @@ | |||
1612 | 256 | configs.write_all() | 273 | configs.write_all() |
1613 | 257 | 274 | ||
1614 | 258 | [service_stop(s) for s in services()] | 275 | [service_stop(s) for s in services()] |
1616 | 259 | if eligible_leader(CLUSTER_RES): | 276 | if is_elected_leader(CLUSTER_RES): |
1617 | 260 | migrate_database() | 277 | migrate_database() |
1618 | 261 | [service_start(s) for s in services()] | 278 | [service_start(s) for s in services()] |
1619 | 262 | 279 | ||
1620 | @@ -340,6 +357,14 @@ | |||
1621 | 340 | 357 | ||
1622 | 341 | def git_post_install(projects_yaml): | 358 | def git_post_install(projects_yaml): |
1623 | 342 | """Perform glance post-install setup.""" | 359 | """Perform glance post-install setup.""" |
1624 | 360 | http_proxy = git_yaml_value(projects_yaml, 'http_proxy') | ||
1625 | 361 | if http_proxy: | ||
1626 | 362 | pip_install('mysql-python', proxy=http_proxy, | ||
1627 | 363 | venv=git_pip_venv_dir(projects_yaml)) | ||
1628 | 364 | else: | ||
1629 | 365 | pip_install('mysql-python', | ||
1630 | 366 | venv=git_pip_venv_dir(projects_yaml)) | ||
1631 | 367 | |||
1632 | 343 | src_etc = os.path.join(git_src_dir(projects_yaml, 'glance'), 'etc') | 368 | src_etc = os.path.join(git_src_dir(projects_yaml, 'glance'), 'etc') |
1633 | 344 | configs = { | 369 | configs = { |
1634 | 345 | 'src': src_etc, | 370 | 'src': src_etc, |
1635 | @@ -350,13 +375,34 @@ | |||
1636 | 350 | shutil.rmtree(configs['dest']) | 375 | shutil.rmtree(configs['dest']) |
1637 | 351 | shutil.copytree(configs['src'], configs['dest']) | 376 | shutil.copytree(configs['src'], configs['dest']) |
1638 | 352 | 377 | ||
1639 | 378 | symlinks = [ | ||
1640 | 379 | # NOTE(coreycb): Need to find better solution than bin symlinks. | ||
1641 | 380 | {'src': os.path.join(git_pip_venv_dir(projects_yaml), | ||
1642 | 381 | 'bin/glance-manage'), | ||
1643 | 382 | 'link': '/usr/local/bin/glance-manage'}, | ||
1644 | 383 | # NOTE(coreycb): This is ugly but couldn't find pypi package that | ||
1645 | 384 | # installs rbd.py and rados.py. | ||
1646 | 385 | {'src': '/usr/lib/python2.7/dist-packages/rbd.py', | ||
1647 | 386 | 'link': os.path.join(git_pip_venv_dir(projects_yaml), | ||
1648 | 387 | 'lib/python2.7/site-packages/rbd.py')}, | ||
1649 | 388 | {'src': '/usr/lib/python2.7/dist-packages/rados.py', | ||
1650 | 389 | 'link': os.path.join(git_pip_venv_dir(projects_yaml), | ||
1651 | 390 | 'lib/python2.7/site-packages/rados.py')}, | ||
1652 | 391 | ] | ||
1653 | 392 | |||
1654 | 393 | for s in symlinks: | ||
1655 | 394 | if os.path.lexists(s['link']): | ||
1656 | 395 | os.remove(s['link']) | ||
1657 | 396 | os.symlink(s['src'], s['link']) | ||
1658 | 397 | |||
1659 | 398 | bin_dir = os.path.join(git_pip_venv_dir(projects_yaml), 'bin') | ||
1660 | 353 | glance_api_context = { | 399 | glance_api_context = { |
1661 | 354 | 'service_description': 'Glance API server', | 400 | 'service_description': 'Glance API server', |
1662 | 355 | 'service_name': 'Glance', | 401 | 'service_name': 'Glance', |
1663 | 356 | 'user_name': 'glance', | 402 | 'user_name': 'glance', |
1664 | 357 | 'start_dir': '/var/lib/glance', | 403 | 'start_dir': '/var/lib/glance', |
1665 | 358 | 'process_name': 'glance-api', | 404 | 'process_name': 'glance-api', |
1667 | 359 | 'executable_name': '/usr/local/bin/glance-api', | 405 | 'executable_name': os.path.join(bin_dir, 'glance-api'), |
1668 | 360 | 'config_files': ['/etc/glance/glance-api.conf'], | 406 | 'config_files': ['/etc/glance/glance-api.conf'], |
1669 | 361 | 'log_file': '/var/log/glance/api.log', | 407 | 'log_file': '/var/log/glance/api.log', |
1670 | 362 | } | 408 | } |
1671 | @@ -367,7 +413,7 @@ | |||
1672 | 367 | 'user_name': 'glance', | 413 | 'user_name': 'glance', |
1673 | 368 | 'start_dir': '/var/lib/glance', | 414 | 'start_dir': '/var/lib/glance', |
1674 | 369 | 'process_name': 'glance-registry', | 415 | 'process_name': 'glance-registry', |
1676 | 370 | 'executable_name': '/usr/local/bin/glance-registry', | 416 | 'executable_name': os.path.join(bin_dir, 'glance-registry'), |
1677 | 371 | 'config_files': ['/etc/glance/glance-registry.conf'], | 417 | 'config_files': ['/etc/glance/glance-registry.conf'], |
1678 | 372 | 'log_file': '/var/log/glance/registry.log', | 418 | 'log_file': '/var/log/glance/registry.log', |
1679 | 373 | } | 419 | } |
1680 | 374 | 420 | ||
1681 | === modified file 'metadata.yaml' | |||
1682 | --- metadata.yaml 2014-10-30 03:30:35 +0000 | |||
1683 | +++ metadata.yaml 2015-06-30 20:18:04 +0000 | |||
1684 | @@ -6,7 +6,7 @@ | |||
1685 | 6 | (Parallax) and an image delivery service (Teller). These services are used | 6 | (Parallax) and an image delivery service (Teller). These services are used |
1686 | 7 | in conjunction by Nova to deliver images from object stores, such as | 7 | in conjunction by Nova to deliver images from object stores, such as |
1687 | 8 | OpenStack's Swift service, to Nova's compute nodes. | 8 | OpenStack's Swift service, to Nova's compute nodes. |
1689 | 9 | categories: | 9 | tags: |
1690 | 10 | - miscellaneous | 10 | - miscellaneous |
1691 | 11 | provides: | 11 | provides: |
1692 | 12 | nrpe-external-master: | 12 | nrpe-external-master: |
1693 | 13 | 13 | ||
1694 | === modified file 'tests/00-setup' | |||
1695 | --- tests/00-setup 2014-10-08 20:18:38 +0000 | |||
1696 | +++ tests/00-setup 2015-06-30 20:18:04 +0000 | |||
1697 | @@ -5,6 +5,10 @@ | |||
1698 | 5 | sudo add-apt-repository --yes ppa:juju/stable | 5 | sudo add-apt-repository --yes ppa:juju/stable |
1699 | 6 | sudo apt-get update --yes | 6 | sudo apt-get update --yes |
1700 | 7 | sudo apt-get install --yes python-amulet \ | 7 | sudo apt-get install --yes python-amulet \ |
1701 | 8 | python-cinderclient \ | ||
1702 | 9 | python-distro-info \ | ||
1703 | 10 | python-glanceclient \ | ||
1704 | 11 | python-heatclient \ | ||
1705 | 8 | python-keystoneclient \ | 12 | python-keystoneclient \ |
1706 | 9 | python-glanceclient \ | ||
1707 | 10 | python-novaclient | 13 | python-novaclient |
1708 | 14 | python-swiftclient | ||
1709 | 11 | 15 | ||
1710 | === modified file 'tests/017-basic-trusty-kilo' (properties changed: -x to +x) | |||
1711 | === modified file 'tests/019-basic-vivid-kilo' (properties changed: -x to +x) | |||
1712 | === added file 'tests/020-basic-trusty-liberty' | |||
1713 | --- tests/020-basic-trusty-liberty 1970-01-01 00:00:00 +0000 | |||
1714 | +++ tests/020-basic-trusty-liberty 2015-06-30 20:18:04 +0000 | |||
1715 | @@ -0,0 +1,11 @@ | |||
1716 | 1 | #!/usr/bin/python | ||
1717 | 2 | |||
1718 | 3 | """Amulet tests on a basic glance deployment on trusty-liberty.""" | ||
1719 | 4 | |||
1720 | 5 | from basic_deployment import GlanceBasicDeployment | ||
1721 | 6 | |||
1722 | 7 | if __name__ == '__main__': | ||
1723 | 8 | deployment = GlanceBasicDeployment(series='trusty', | ||
1724 | 9 | openstack='cloud:trusty-liberty', | ||
1725 | 10 | source='cloud:trusty-updates/liberty') | ||
1726 | 11 | deployment.run_tests() | ||
1727 | 0 | 12 | ||
1728 | === added file 'tests/021-basic-wily-liberty' | |||
1729 | --- tests/021-basic-wily-liberty 1970-01-01 00:00:00 +0000 | |||
1730 | +++ tests/021-basic-wily-liberty 2015-06-30 20:18:04 +0000 | |||
1731 | @@ -0,0 +1,9 @@ | |||
1732 | 1 | #!/usr/bin/python | ||
1733 | 2 | |||
1734 | 3 | """Amulet tests on a basic glance deployment on wily-liberty.""" | ||
1735 | 4 | |||
1736 | 5 | from basic_deployment import GlanceBasicDeployment | ||
1737 | 6 | |||
1738 | 7 | if __name__ == '__main__': | ||
1739 | 8 | deployment = GlanceBasicDeployment(series='wily') | ||
1740 | 9 | deployment.run_tests() | ||
1741 | 0 | 10 | ||
1742 | === modified file 'tests/README' | |||
1743 | --- tests/README 2014-10-08 20:18:38 +0000 | |||
1744 | +++ tests/README 2015-06-30 20:18:04 +0000 | |||
1745 | @@ -1,6 +1,15 @@ | |||
1746 | 1 | This directory provides Amulet tests that focus on verification of Glance | 1 | This directory provides Amulet tests that focus on verification of Glance |
1747 | 2 | deployments. | 2 | deployments. |
1748 | 3 | 3 | ||
1749 | 4 | test_* methods are called in lexical sort order. | ||
1750 | 5 | |||
1751 | 6 | Test name convention to ensure desired test order: | ||
1752 | 7 | 1xx service and endpoint checks | ||
1753 | 8 | 2xx relation checks | ||
1754 | 9 | 3xx config checks | ||
1755 | 10 | 4xx functional checks | ||
1756 | 11 | 9xx restarts and other final checks | ||
1757 | 12 | |||
1758 | 4 | In order to run tests, you'll need charm-tools installed (in addition to | 13 | In order to run tests, you'll need charm-tools installed (in addition to |
1759 | 5 | juju, of course): | 14 | juju, of course): |
1760 | 6 | sudo add-apt-repository ppa:juju/stable | 15 | sudo add-apt-repository ppa:juju/stable |
1761 | 7 | 16 | ||
1762 | === modified file 'tests/basic_deployment.py' | |||
1763 | --- tests/basic_deployment.py 2015-04-24 10:07:08 +0000 | |||
1764 | +++ tests/basic_deployment.py 2015-06-30 20:18:04 +0000 | |||
1765 | @@ -2,6 +2,7 @@ | |||
1766 | 2 | 2 | ||
1767 | 3 | import amulet | 3 | import amulet |
1768 | 4 | import os | 4 | import os |
1769 | 5 | import time | ||
1770 | 5 | import yaml | 6 | import yaml |
1771 | 6 | 7 | ||
1772 | 7 | from charmhelpers.contrib.openstack.amulet.deployment import ( | 8 | from charmhelpers.contrib.openstack.amulet.deployment import ( |
1773 | @@ -10,25 +11,30 @@ | |||
1774 | 10 | 11 | ||
1775 | 11 | from charmhelpers.contrib.openstack.amulet.utils import ( | 12 | from charmhelpers.contrib.openstack.amulet.utils import ( |
1776 | 12 | OpenStackAmuletUtils, | 13 | OpenStackAmuletUtils, |
1779 | 13 | DEBUG, # flake8: noqa | 14 | DEBUG, |
1780 | 14 | ERROR | 15 | # ERROR |
1781 | 15 | ) | 16 | ) |
1782 | 16 | 17 | ||
1783 | 17 | # Use DEBUG to turn on debug logging | 18 | # Use DEBUG to turn on debug logging |
1784 | 18 | u = OpenStackAmuletUtils(DEBUG) | 19 | u = OpenStackAmuletUtils(DEBUG) |
1785 | 19 | 20 | ||
1786 | 21 | |||
1787 | 20 | class GlanceBasicDeployment(OpenStackAmuletDeployment): | 22 | class GlanceBasicDeployment(OpenStackAmuletDeployment): |
1794 | 21 | '''Amulet tests on a basic file-backed glance deployment. Verify relations, | 23 | """Amulet tests on a basic file-backed glance deployment. Verify |
1795 | 22 | service status, endpoint service catalog, create and delete new image.''' | 24 | relations, service status, endpoint service catalog, create and |
1796 | 23 | 25 | delete new image.""" | |
1791 | 24 | # TO-DO(beisner): | ||
1792 | 25 | # * Add tests with different storage back ends | ||
1793 | 26 | # * Resolve Essex->Havana juju set charm bug | ||
1797 | 27 | 26 | ||
1798 | 28 | def __init__(self, series=None, openstack=None, source=None, git=False, | 27 | def __init__(self, series=None, openstack=None, source=None, git=False, |
1799 | 28 | <<<<<<< TREE | ||
1800 | 29 | stable=True): | 29 | stable=True): |
1801 | 30 | '''Deploy the entire test environment.''' | 30 | '''Deploy the entire test environment.''' |
1802 | 31 | super(GlanceBasicDeployment, self).__init__(series, openstack, source, stable) | 31 | super(GlanceBasicDeployment, self).__init__(series, openstack, source, stable) |
1803 | 32 | ======= | ||
1804 | 33 | stable=False): | ||
1805 | 34 | """Deploy the entire test environment.""" | ||
1806 | 35 | super(GlanceBasicDeployment, self).__init__(series, openstack, | ||
1807 | 36 | source, stable) | ||
1808 | 37 | >>>>>>> MERGE-SOURCE | ||
1809 | 32 | self.git = git | 38 | self.git = git |
1810 | 33 | self._add_services() | 39 | self._add_services() |
1811 | 34 | self._add_relations() | 40 | self._add_relations() |
1812 | @@ -37,20 +43,21 @@ | |||
1813 | 37 | self._initialize_tests() | 43 | self._initialize_tests() |
1814 | 38 | 44 | ||
1815 | 39 | def _add_services(self): | 45 | def _add_services(self): |
1817 | 40 | '''Add services | 46 | """Add services |
1818 | 41 | 47 | ||
1819 | 42 | Add the services that we're testing, where glance is local, | 48 | Add the services that we're testing, where glance is local, |
1820 | 43 | and the rest of the service are from lp branches that are | 49 | and the rest of the service are from lp branches that are |
1821 | 44 | compatible with the local charm (e.g. stable or next). | 50 | compatible with the local charm (e.g. stable or next). |
1823 | 45 | ''' | 51 | """ |
1824 | 46 | this_service = {'name': 'glance'} | 52 | this_service = {'name': 'glance'} |
1826 | 47 | other_services = [{'name': 'mysql'}, {'name': 'rabbitmq-server'}, | 53 | other_services = [{'name': 'mysql'}, |
1827 | 54 | {'name': 'rabbitmq-server'}, | ||
1828 | 48 | {'name': 'keystone'}] | 55 | {'name': 'keystone'}] |
1829 | 49 | super(GlanceBasicDeployment, self)._add_services(this_service, | 56 | super(GlanceBasicDeployment, self)._add_services(this_service, |
1830 | 50 | other_services) | 57 | other_services) |
1831 | 51 | 58 | ||
1832 | 52 | def _add_relations(self): | 59 | def _add_relations(self): |
1834 | 53 | '''Add relations for the services.''' | 60 | """Add relations for the services.""" |
1835 | 54 | relations = {'glance:identity-service': 'keystone:identity-service', | 61 | relations = {'glance:identity-service': 'keystone:identity-service', |
1836 | 55 | 'glance:shared-db': 'mysql:shared-db', | 62 | 'glance:shared-db': 'mysql:shared-db', |
1837 | 56 | 'keystone:shared-db': 'mysql:shared-db', | 63 | 'keystone:shared-db': 'mysql:shared-db', |
1838 | @@ -58,7 +65,7 @@ | |||
1839 | 58 | super(GlanceBasicDeployment, self)._add_relations(relations) | 65 | super(GlanceBasicDeployment, self)._add_relations(relations) |
1840 | 59 | 66 | ||
1841 | 60 | def _configure_services(self): | 67 | def _configure_services(self): |
1843 | 61 | '''Configure all of the services.''' | 68 | """Configure all of the services.""" |
1844 | 62 | glance_config = {} | 69 | glance_config = {} |
1845 | 63 | if self.git: | 70 | if self.git: |
1846 | 64 | branch = 'stable/' + self._get_openstack_release_string() | 71 | branch = 'stable/' + self._get_openstack_release_string() |
1847 | @@ -66,17 +73,18 @@ | |||
1848 | 66 | openstack_origin_git = { | 73 | openstack_origin_git = { |
1849 | 67 | 'repositories': [ | 74 | 'repositories': [ |
1850 | 68 | {'name': 'requirements', | 75 | {'name': 'requirements', |
1852 | 69 | 'repository': 'git://git.openstack.org/openstack/requirements', | 76 | 'repository': 'git://github.com/openstack/requirements', |
1853 | 70 | 'branch': branch}, | 77 | 'branch': branch}, |
1854 | 71 | {'name': 'glance', | 78 | {'name': 'glance', |
1856 | 72 | 'repository': 'git://git.openstack.org/openstack/glance', | 79 | 'repository': 'git://github.com/openstack/glance', |
1857 | 73 | 'branch': branch}, | 80 | 'branch': branch}, |
1858 | 74 | ], | 81 | ], |
1859 | 75 | 'directory': '/mnt/openstack-git', | 82 | 'directory': '/mnt/openstack-git', |
1860 | 76 | 'http_proxy': amulet_http_proxy, | 83 | 'http_proxy': amulet_http_proxy, |
1861 | 77 | 'https_proxy': amulet_http_proxy, | 84 | 'https_proxy': amulet_http_proxy, |
1862 | 78 | } | 85 | } |
1864 | 79 | glance_config['openstack-origin-git'] = yaml.dump(openstack_origin_git) | 86 | glance_config['openstack-origin-git'] = \ |
1865 | 87 | yaml.dump(openstack_origin_git) | ||
1866 | 80 | 88 | ||
1867 | 81 | keystone_config = {'admin-password': 'openstack', | 89 | keystone_config = {'admin-password': 'openstack', |
1868 | 82 | 'admin-token': 'ubuntutesting'} | 90 | 'admin-token': 'ubuntutesting'} |
1869 | @@ -87,12 +95,19 @@ | |||
1870 | 87 | super(GlanceBasicDeployment, self)._configure_services(configs) | 95 | super(GlanceBasicDeployment, self)._configure_services(configs) |
1871 | 88 | 96 | ||
1872 | 89 | def _initialize_tests(self): | 97 | def _initialize_tests(self): |
1874 | 90 | '''Perform final initialization before tests get run.''' | 98 | """Perform final initialization before tests get run.""" |
1875 | 91 | # Access the sentries for inspecting service units | 99 | # Access the sentries for inspecting service units |
1876 | 92 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | 100 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] |
1877 | 93 | self.glance_sentry = self.d.sentry.unit['glance/0'] | 101 | self.glance_sentry = self.d.sentry.unit['glance/0'] |
1878 | 94 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | 102 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] |
1879 | 95 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] | 103 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] |
1880 | 104 | u.log.debug('openstack release val: {}'.format( | ||
1881 | 105 | self._get_openstack_release())) | ||
1882 | 106 | u.log.debug('openstack release str: {}'.format( | ||
1883 | 107 | self._get_openstack_release_string())) | ||
1884 | 108 | |||
1885 | 109 | # Let things settle a bit before moving forward | ||
1886 | 110 | time.sleep(30) | ||
1887 | 96 | 111 | ||
1888 | 97 | # Authenticate admin with keystone | 112 | # Authenticate admin with keystone |
1889 | 98 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | 113 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, |
1890 | @@ -103,46 +118,99 @@ | |||
1891 | 103 | # Authenticate admin with glance endpoint | 118 | # Authenticate admin with glance endpoint |
1892 | 104 | self.glance = u.authenticate_glance_admin(self.keystone) | 119 | self.glance = u.authenticate_glance_admin(self.keystone) |
1893 | 105 | 120 | ||
1904 | 106 | u.log.debug('openstack release: {}'.format(self._get_openstack_release())) | 121 | def test_100_services(self): |
1905 | 107 | 122 | """Verify that the expected services are running on the | |
1906 | 108 | def test_services(self): | 123 | corresponding service units.""" |
1907 | 109 | '''Verify that the expected services are running on the | 124 | services = { |
1908 | 110 | corresponding service units.''' | 125 | self.mysql_sentry: ['mysql'], |
1909 | 111 | commands = { | 126 | self.keystone_sentry: ['keystone'], |
1910 | 112 | self.mysql_sentry: ['status mysql'], | 127 | self.glance_sentry: ['glance-api', 'glance-registry'], |
1911 | 113 | self.keystone_sentry: ['status keystone'], | 128 | self.rabbitmq_sentry: ['rabbitmq-server'] |
1902 | 114 | self.glance_sentry: ['status glance-api', 'status glance-registry'], | ||
1903 | 115 | self.rabbitmq_sentry: ['sudo service rabbitmq-server status'] | ||
1912 | 116 | } | 129 | } |
1915 | 117 | u.log.debug('commands: {}'.format(commands)) | 130 | |
1916 | 118 | ret = u.validate_services(commands) | 131 | ret = u.validate_services_by_name(services) |
1917 | 119 | if ret: | 132 | if ret: |
1918 | 120 | amulet.raise_status(amulet.FAIL, msg=ret) | 133 | amulet.raise_status(amulet.FAIL, msg=ret) |
1919 | 121 | 134 | ||
1936 | 122 | def test_service_catalog(self): | 135 | def test_102_service_catalog(self): |
1937 | 123 | '''Verify that the service catalog endpoint data''' | 136 | """Verify that the service catalog endpoint data is valid.""" |
1938 | 124 | endpoint_vol = {'adminURL': u.valid_url, | 137 | u.log.debug('Checking keystone service catalog...') |
1939 | 125 | 'region': 'RegionOne', | 138 | endpoint_check = { |
1940 | 126 | 'publicURL': u.valid_url, | 139 | 'adminURL': u.valid_url, |
1941 | 127 | 'internalURL': u.valid_url} | 140 | 'id': u.not_null, |
1942 | 128 | endpoint_id = {'adminURL': u.valid_url, | 141 | 'region': 'RegionOne', |
1943 | 129 | 'region': 'RegionOne', | 142 | 'publicURL': u.valid_url, |
1944 | 130 | 'publicURL': u.valid_url, | 143 | 'internalURL': u.valid_url |
1945 | 131 | 'internalURL': u.valid_url} | 144 | } |
1946 | 132 | if self._get_openstack_release() >= self.trusty_icehouse: | 145 | expected = { |
1947 | 133 | endpoint_vol['id'] = u.not_null | 146 | 'image': [endpoint_check], |
1948 | 134 | endpoint_id['id'] = u.not_null | 147 | 'identity': [endpoint_check] |
1949 | 135 | 148 | } | |
1934 | 136 | expected = {'image': [endpoint_id], | ||
1935 | 137 | 'identity': [endpoint_id]} | ||
1950 | 138 | actual = self.keystone.service_catalog.get_endpoints() | 149 | actual = self.keystone.service_catalog.get_endpoints() |
1951 | 139 | 150 | ||
1952 | 140 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | 151 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) |
1953 | 141 | if ret: | 152 | if ret: |
1954 | 142 | amulet.raise_status(amulet.FAIL, msg=ret) | 153 | amulet.raise_status(amulet.FAIL, msg=ret) |
1955 | 143 | 154 | ||
1958 | 144 | def test_mysql_glance_db_relation(self): | 155 | def test_104_glance_endpoint(self): |
1959 | 145 | '''Verify the mysql:glance shared-db relation data''' | 156 | """Verify the glance endpoint data.""" |
1960 | 157 | u.log.debug('Checking glance api endpoint data...') | ||
1961 | 158 | endpoints = self.keystone.endpoints.list() | ||
1962 | 159 | admin_port = internal_port = public_port = '9292' | ||
1963 | 160 | expected = {'id': u.not_null, | ||
1964 | 161 | 'region': 'RegionOne', | ||
1965 | 162 | 'adminurl': u.valid_url, | ||
1966 | 163 | 'internalurl': u.valid_url, | ||
1967 | 164 | 'publicurl': u.valid_url, | ||
1968 | 165 | 'service_id': u.not_null} | ||
1969 | 166 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1970 | 167 | public_port, expected) | ||
1971 | 168 | |||
1972 | 169 | if ret: | ||
1973 | 170 | amulet.raise_status(amulet.FAIL, | ||
1974 | 171 | msg='glance endpoint: {}'.format(ret)) | ||
1975 | 172 | |||
1976 | 173 | def test_106_keystone_endpoint(self): | ||
1977 | 174 | """Verify the keystone endpoint data.""" | ||
1978 | 175 | u.log.debug('Checking keystone api endpoint data...') | ||
1979 | 176 | endpoints = self.keystone.endpoints.list() | ||
1980 | 177 | admin_port = '35357' | ||
1981 | 178 | internal_port = public_port = '5000' | ||
1982 | 179 | expected = {'id': u.not_null, | ||
1983 | 180 | 'region': 'RegionOne', | ||
1984 | 181 | 'adminurl': u.valid_url, | ||
1985 | 182 | 'internalurl': u.valid_url, | ||
1986 | 183 | 'publicurl': u.valid_url, | ||
1987 | 184 | 'service_id': u.not_null} | ||
1988 | 185 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1989 | 186 | public_port, expected) | ||
1990 | 187 | if ret: | ||
1991 | 188 | amulet.raise_status(amulet.FAIL, | ||
1992 | 189 | msg='keystone endpoint: {}'.format(ret)) | ||
1993 | 190 | |||
1994 | 191 | def test_110_users(self): | ||
1995 | 192 | """Verify expected users.""" | ||
1996 | 193 | u.log.debug('Checking keystone users...') | ||
1997 | 194 | user0 = {'name': 'admin', | ||
1998 | 195 | 'enabled': True, | ||
1999 | 196 | 'tenantId': u.not_null, | ||
2000 | 197 | 'id': u.not_null, | ||
2001 | 198 | 'email': 'juju@localhost'} | ||
2002 | 199 | user1 = {'name': 'glance', | ||
2003 | 200 | 'enabled': True, | ||
2004 | 201 | 'tenantId': u.not_null, | ||
2005 | 202 | 'id': u.not_null, | ||
2006 | 203 | 'email': 'juju@localhost'} | ||
2007 | 204 | expected = [user0, user1] | ||
2008 | 205 | actual = self.keystone.users.list() | ||
2009 | 206 | |||
2010 | 207 | ret = u.validate_user_data(expected, actual) | ||
2011 | 208 | if ret: | ||
2012 | 209 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
2013 | 210 | |||
2014 | 211 | def test_200_mysql_glance_db_relation(self): | ||
2015 | 212 | """Verify the mysql:glance shared-db relation data""" | ||
2016 | 213 | u.log.debug('Checking mysql to glance shared-db relation data...') | ||
2017 | 146 | unit = self.mysql_sentry | 214 | unit = self.mysql_sentry |
2018 | 147 | relation = ['shared-db', 'glance:shared-db'] | 215 | relation = ['shared-db', 'glance:shared-db'] |
2019 | 148 | expected = { | 216 | expected = { |
2020 | @@ -154,8 +222,9 @@ | |||
2021 | 154 | message = u.relation_error('mysql shared-db', ret) | 222 | message = u.relation_error('mysql shared-db', ret) |
2022 | 155 | amulet.raise_status(amulet.FAIL, msg=message) | 223 | amulet.raise_status(amulet.FAIL, msg=message) |
2023 | 156 | 224 | ||
2026 | 157 | def test_glance_mysql_db_relation(self): | 225 | def test_201_glance_mysql_db_relation(self): |
2027 | 158 | '''Verify the glance:mysql shared-db relation data''' | 226 | """Verify the glance:mysql shared-db relation data""" |
2028 | 227 | u.log.debug('Checking glance to mysql shared-db relation data...') | ||
2029 | 159 | unit = self.glance_sentry | 228 | unit = self.glance_sentry |
2030 | 160 | relation = ['shared-db', 'mysql:shared-db'] | 229 | relation = ['shared-db', 'mysql:shared-db'] |
2031 | 161 | expected = { | 230 | expected = { |
2032 | @@ -169,8 +238,9 @@ | |||
2033 | 169 | message = u.relation_error('glance shared-db', ret) | 238 | message = u.relation_error('glance shared-db', ret) |
2034 | 170 | amulet.raise_status(amulet.FAIL, msg=message) | 239 | amulet.raise_status(amulet.FAIL, msg=message) |
2035 | 171 | 240 | ||
2038 | 172 | def test_keystone_glance_id_relation(self): | 241 | def test_202_keystone_glance_id_relation(self): |
2039 | 173 | '''Verify the keystone:glance identity-service relation data''' | 242 | """Verify the keystone:glance identity-service relation data""" |
2040 | 243 | u.log.debug('Checking keystone to glance id relation data...') | ||
2041 | 174 | unit = self.keystone_sentry | 244 | unit = self.keystone_sentry |
2042 | 175 | relation = ['identity-service', | 245 | relation = ['identity-service', |
2043 | 176 | 'glance:identity-service'] | 246 | 'glance:identity-service'] |
2044 | @@ -193,8 +263,9 @@ | |||
2045 | 193 | message = u.relation_error('keystone identity-service', ret) | 263 | message = u.relation_error('keystone identity-service', ret) |
2046 | 194 | amulet.raise_status(amulet.FAIL, msg=message) | 264 | amulet.raise_status(amulet.FAIL, msg=message) |
2047 | 195 | 265 | ||
2050 | 196 | def test_glance_keystone_id_relation(self): | 266 | def test_203_glance_keystone_id_relation(self): |
2051 | 197 | '''Verify the glance:keystone identity-service relation data''' | 267 | """Verify the glance:keystone identity-service relation data""" |
2052 | 268 | u.log.debug('Checking glance to keystone relation data...') | ||
2053 | 198 | unit = self.glance_sentry | 269 | unit = self.glance_sentry |
2054 | 199 | relation = ['identity-service', | 270 | relation = ['identity-service', |
2055 | 200 | 'keystone:identity-service'] | 271 | 'keystone:identity-service'] |
2056 | @@ -211,8 +282,9 @@ | |||
2057 | 211 | message = u.relation_error('glance identity-service', ret) | 282 | message = u.relation_error('glance identity-service', ret) |
2058 | 212 | amulet.raise_status(amulet.FAIL, msg=message) | 283 | amulet.raise_status(amulet.FAIL, msg=message) |
2059 | 213 | 284 | ||
2062 | 214 | def test_rabbitmq_glance_amqp_relation(self): | 285 | def test_204_rabbitmq_glance_amqp_relation(self): |
2063 | 215 | '''Verify the rabbitmq-server:glance amqp relation data''' | 286 | """Verify the rabbitmq-server:glance amqp relation data""" |
2064 | 287 | u.log.debug('Checking rmq to glance amqp relation data...') | ||
2065 | 216 | unit = self.rabbitmq_sentry | 288 | unit = self.rabbitmq_sentry |
2066 | 217 | relation = ['amqp', 'glance:amqp'] | 289 | relation = ['amqp', 'glance:amqp'] |
2067 | 218 | expected = { | 290 | expected = { |
2068 | @@ -225,8 +297,9 @@ | |||
2069 | 225 | message = u.relation_error('rabbitmq amqp', ret) | 297 | message = u.relation_error('rabbitmq amqp', ret) |
2070 | 226 | amulet.raise_status(amulet.FAIL, msg=message) | 298 | amulet.raise_status(amulet.FAIL, msg=message) |
2071 | 227 | 299 | ||
2074 | 228 | def test_glance_rabbitmq_amqp_relation(self): | 300 | def test_205_glance_rabbitmq_amqp_relation(self): |
2075 | 229 | '''Verify the glance:rabbitmq-server amqp relation data''' | 301 | """Verify the glance:rabbitmq-server amqp relation data""" |
2076 | 302 | u.log.debug('Checking glance to rmq amqp relation data...') | ||
2077 | 230 | unit = self.glance_sentry | 303 | unit = self.glance_sentry |
2078 | 231 | relation = ['amqp', 'rabbitmq-server:amqp'] | 304 | relation = ['amqp', 'rabbitmq-server:amqp'] |
2079 | 232 | expected = { | 305 | expected = { |
2080 | @@ -239,291 +312,180 @@ | |||
2081 | 239 | message = u.relation_error('glance amqp', ret) | 312 | message = u.relation_error('glance amqp', ret) |
2082 | 240 | amulet.raise_status(amulet.FAIL, msg=message) | 313 | amulet.raise_status(amulet.FAIL, msg=message) |
2083 | 241 | 314 | ||
2110 | 242 | def test_image_create_delete(self): | 315 | def test_300_glance_api_default_config(self): |
2111 | 243 | '''Create new cirros image in glance, verify, then delete it''' | 316 | """Verify default section configs in glance-api.conf and |
2112 | 244 | 317 | compare some of the parameters to relation data.""" | |
2113 | 245 | # Create a new image | 318 | u.log.debug('Checking glance api config file...') |
2088 | 246 | image_name = 'cirros-image-1' | ||
2089 | 247 | image_new = u.create_cirros_image(self.glance, image_name) | ||
2090 | 248 | |||
2091 | 249 | # Confirm image is created and has status of 'active' | ||
2092 | 250 | if not image_new: | ||
2093 | 251 | message = 'glance image create failed' | ||
2094 | 252 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2095 | 253 | |||
2096 | 254 | # Verify new image name | ||
2097 | 255 | images_list = list(self.glance.images.list()) | ||
2098 | 256 | if images_list[0].name != image_name: | ||
2099 | 257 | message = 'glance image create failed or unexpected image name {}'.format(images_list[0].name) | ||
2100 | 258 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2101 | 259 | |||
2102 | 260 | # Delete the new image | ||
2103 | 261 | u.log.debug('image count before delete: {}'.format(len(list(self.glance.images.list())))) | ||
2104 | 262 | u.delete_image(self.glance, image_new) | ||
2105 | 263 | u.log.debug('image count after delete: {}'.format(len(list(self.glance.images.list())))) | ||
2106 | 264 | |||
2107 | 265 | def test_glance_api_default_config(self): | ||
2108 | 266 | '''Verify default section configs in glance-api.conf and | ||
2109 | 267 | compare some of the parameters to relation data.''' | ||
2114 | 268 | unit = self.glance_sentry | 319 | unit = self.glance_sentry |
2115 | 320 | unit_ks = self.keystone_sentry | ||
2116 | 269 | rel_gl_mq = unit.relation('amqp', 'rabbitmq-server:amqp') | 321 | rel_gl_mq = unit.relation('amqp', 'rabbitmq-server:amqp') |
2153 | 270 | conf = '/etc/glance/glance-api.conf' | 322 | rel_ks_gl = unit_ks.relation('identity-service', |
2154 | 271 | expected = {'use_syslog': 'False', | 323 | 'glance:identity-service') |
2155 | 272 | 'default_store': 'file', | 324 | rel_my_gl = self.mysql_sentry.relation('shared-db', 'glance:shared-db') |
2156 | 273 | 'filesystem_store_datadir': '/var/lib/glance/images/', | 325 | db_uri = "mysql://{}:{}@{}/{}".format('glance', rel_my_gl['password'], |
2157 | 274 | 'rabbit_userid': rel_gl_mq['username'], | 326 | rel_my_gl['db_host'], 'glance') |
2158 | 275 | 'log_file': '/var/log/glance/api.log', | 327 | conf = '/etc/glance/glance-api.conf' |
2159 | 276 | 'debug': 'False', | 328 | expected = { |
2160 | 277 | 'verbose': 'False'} | 329 | 'DEFAULT': { |
2161 | 278 | section = 'DEFAULT' | 330 | 'debug': 'False', |
2162 | 279 | 331 | 'verbose': 'False', | |
2163 | 280 | if self._get_openstack_release() <= self.precise_havana: | 332 | 'use_syslog': 'False', |
2164 | 281 | # Defaults were different before icehouse | 333 | 'log_file': '/var/log/glance/api.log', |
2165 | 282 | expected['debug'] = 'True' | 334 | 'default_store': 'file', |
2166 | 283 | expected['verbose'] = 'True' | 335 | 'filesystem_store_datadir': '/var/lib/glance/images/', |
2167 | 284 | 336 | 'rabbit_userid': rel_gl_mq['username'], | |
2168 | 285 | ret = u.validate_config_data(unit, conf, section, expected) | 337 | 'bind_host': '0.0.0.0', |
2169 | 286 | if ret: | 338 | 'bind_port': '9282', |
2170 | 287 | message = "glance-api default config error: {}".format(ret) | 339 | 'registry_host': '0.0.0.0', |
2171 | 288 | amulet.raise_status(amulet.FAIL, msg=message) | 340 | 'registry_port': '9191', |
2172 | 289 | 341 | 'registry_client_protocol': 'http', | |
2173 | 290 | def test_glance_api_auth_config(self): | 342 | 'delayed_delete': 'False', |
2174 | 291 | '''Verify authtoken section config in glance-api.conf using | 343 | 'scrub_time': '43200', |
2175 | 292 | glance/keystone relation data.''' | 344 | 'notification_driver': 'rabbit', |
2176 | 293 | unit_gl = self.glance_sentry | 345 | 'filesystem_store_datadir': '/var/lib/glance/images/', |
2177 | 294 | unit_ks = self.keystone_sentry | 346 | 'scrubber_datadir': '/var/lib/glance/scrubber', |
2178 | 295 | rel_gl_mq = unit_gl.relation('amqp', 'rabbitmq-server:amqp') | 347 | 'image_cache_dir': '/var/lib/glance/image-cache/', |
2179 | 296 | rel_ks_gl = unit_ks.relation('identity-service', 'glance:identity-service') | 348 | 'db_enforce_mysql_charset': 'False', |
2180 | 297 | conf = '/etc/glance/glance-api.conf' | 349 | 'rabbit_userid': 'glance', |
2181 | 298 | section = 'keystone_authtoken' | 350 | 'rabbit_virtual_host': 'openstack', |
2182 | 299 | 351 | 'rabbit_password': u.not_null, | |
2183 | 300 | if self._get_openstack_release() > self.precise_havana: | 352 | 'rabbit_host': u.valid_ip |
2184 | 301 | # No auth config exists in this file before icehouse | 353 | }, |
2185 | 302 | expected = {'admin_user': 'glance', | 354 | 'keystone_authtoken': { |
2186 | 303 | 'admin_password': rel_ks_gl['service_password']} | 355 | 'admin_user': 'glance', |
2187 | 304 | 356 | 'admin_password': rel_ks_gl['service_password'], | |
2188 | 305 | ret = u.validate_config_data(unit_gl, conf, section, expected) | 357 | 'auth_uri': u.valid_url, |
2189 | 358 | 'auth_host': u.valid_ip, | ||
2190 | 359 | 'auth_port': '35357', | ||
2191 | 360 | 'auth_protocol': 'http', | ||
2192 | 361 | }, | ||
2193 | 362 | 'database': { | ||
2194 | 363 | 'connection': db_uri, | ||
2195 | 364 | 'sql_idle_timeout': '3600' | ||
2196 | 365 | } | ||
2197 | 366 | } | ||
2198 | 367 | |||
2199 | 368 | for section, pairs in expected.iteritems(): | ||
2200 | 369 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
2201 | 306 | if ret: | 370 | if ret: |
2203 | 307 | message = "glance-api auth config error: {}".format(ret) | 371 | message = "glance api config error: {}".format(ret) |
2204 | 308 | amulet.raise_status(amulet.FAIL, msg=message) | 372 | amulet.raise_status(amulet.FAIL, msg=message) |
2205 | 309 | 373 | ||
2213 | 310 | def test_glance_api_paste_auth_config(self): | 374 | def _get_filter_factory_expected_dict(self): |
2214 | 311 | '''Verify authtoken section config in glance-api-paste.ini using | 375 | """Return expected authtoken filter factory dict for OS release""" |
2215 | 312 | glance/keystone relation data.''' | 376 | if self._get_openstack_release() < self.vivid_kilo: |
2216 | 313 | unit_gl = self.glance_sentry | 377 | # Juno and earlier |
2217 | 314 | unit_ks = self.keystone_sentry | 378 | val = 'keystoneclient.middleware.auth_token:filter_factory' |
2218 | 315 | rel_gl_mq = unit_gl.relation('amqp', 'rabbitmq-server:amqp') | 379 | else: |
2219 | 316 | rel_ks_gl = unit_ks.relation('identity-service', 'glance:identity-service') | 380 | # Kilo and later |
2220 | 381 | val = 'keystonemiddleware.auth_token: filter_factory' | ||
2221 | 382 | |||
2222 | 383 | return {'filter:authtoken': {'paste.filter_factory': val}} | ||
2223 | 384 | |||
2224 | 385 | def test_304_glance_api_paste_auth_config(self): | ||
2225 | 386 | """Verify authtoken section config in glance-api-paste.ini using | ||
2226 | 387 | glance/keystone relation data.""" | ||
2227 | 388 | u.log.debug('Checking glance api paste config file...') | ||
2228 | 389 | unit = self.glance_sentry | ||
2229 | 317 | conf = '/etc/glance/glance-api-paste.ini' | 390 | conf = '/etc/glance/glance-api-paste.ini' |
2238 | 318 | section = 'filter:authtoken' | 391 | expected = self._get_filter_factory_expected_dict() |
2239 | 319 | 392 | ||
2240 | 320 | if self._get_openstack_release() <= self.precise_havana: | 393 | for section, pairs in expected.iteritems(): |
2241 | 321 | # No auth config exists in this file after havana | 394 | ret = u.validate_config_data(unit, conf, section, pairs) |
2234 | 322 | expected = {'admin_user': 'glance', | ||
2235 | 323 | 'admin_password': rel_ks_gl['service_password']} | ||
2236 | 324 | |||
2237 | 325 | ret = u.validate_config_data(unit_gl, conf, section, expected) | ||
2242 | 326 | if ret: | 395 | if ret: |
2244 | 327 | message = "glance-api-paste auth config error: {}".format(ret) | 396 | message = "glance api paste config error: {}".format(ret) |
2245 | 328 | amulet.raise_status(amulet.FAIL, msg=message) | 397 | amulet.raise_status(amulet.FAIL, msg=message) |
2246 | 329 | 398 | ||
2254 | 330 | def test_glance_registry_paste_auth_config(self): | 399 | def test_306_glance_registry_paste_auth_config(self): |
2255 | 331 | '''Verify authtoken section config in glance-registry-paste.ini using | 400 | """Verify authtoken section config in glance-registry-paste.ini using |
2256 | 332 | glance/keystone relation data.''' | 401 | glance/keystone relation data.""" |
2257 | 333 | unit_gl = self.glance_sentry | 402 | u.log.debug('Checking glance registry paste config file...') |
2258 | 334 | unit_ks = self.keystone_sentry | 403 | unit = self.glance_sentry |
2252 | 335 | rel_gl_mq = unit_gl.relation('amqp', 'rabbitmq-server:amqp') | ||
2253 | 336 | rel_ks_gl = unit_ks.relation('identity-service', 'glance:identity-service') | ||
2259 | 337 | conf = '/etc/glance/glance-registry-paste.ini' | 404 | conf = '/etc/glance/glance-registry-paste.ini' |
2268 | 338 | section = 'filter:authtoken' | 405 | expected = self._get_filter_factory_expected_dict() |
2269 | 339 | 406 | ||
2270 | 340 | if self._get_openstack_release() <= self.precise_havana: | 407 | for section, pairs in expected.iteritems(): |
2271 | 341 | # No auth config exists in this file after havana | 408 | ret = u.validate_config_data(unit, conf, section, pairs) |
2264 | 342 | expected = {'admin_user': 'glance', | ||
2265 | 343 | 'admin_password': rel_ks_gl['service_password']} | ||
2266 | 344 | |||
2267 | 345 | ret = u.validate_config_data(unit_gl, conf, section, expected) | ||
2272 | 346 | if ret: | 409 | if ret: |
2274 | 347 | message = "glance-registry-paste auth config error: {}".format(ret) | 410 | message = "glance registry paste config error: {}".format(ret) |
2275 | 348 | amulet.raise_status(amulet.FAIL, msg=message) | 411 | amulet.raise_status(amulet.FAIL, msg=message) |
2276 | 349 | 412 | ||
2279 | 350 | def test_glance_registry_default_config(self): | 413 | def test_308_glance_registry_default_config(self): |
2280 | 351 | '''Verify default section configs in glance-registry.conf''' | 414 | """Verify configs in glance-registry.conf""" |
2281 | 415 | u.log.debug('Checking glance registry config file...') | ||
2282 | 352 | unit = self.glance_sentry | 416 | unit = self.glance_sentry |
2283 | 353 | conf = '/etc/glance/glance-registry.conf' | ||
2284 | 354 | expected = {'use_syslog': 'False', | ||
2285 | 355 | 'log_file': '/var/log/glance/registry.log', | ||
2286 | 356 | 'debug': 'False', | ||
2287 | 357 | 'verbose': 'False'} | ||
2288 | 358 | section = 'DEFAULT' | ||
2289 | 359 | |||
2290 | 360 | if self._get_openstack_release() <= self.precise_havana: | ||
2291 | 361 | # Defaults were different before icehouse | ||
2292 | 362 | expected['debug'] = 'True' | ||
2293 | 363 | expected['verbose'] = 'True' | ||
2294 | 364 | |||
2295 | 365 | ret = u.validate_config_data(unit, conf, section, expected) | ||
2296 | 366 | if ret: | ||
2297 | 367 | message = "glance-registry default config error: {}".format(ret) | ||
2298 | 368 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2299 | 369 | |||
2300 | 370 | def test_glance_registry_auth_config(self): | ||
2301 | 371 | '''Verify authtoken section config in glance-registry.conf | ||
2302 | 372 | using glance/keystone relation data.''' | ||
2303 | 373 | unit_gl = self.glance_sentry | ||
2304 | 374 | unit_ks = self.keystone_sentry | 417 | unit_ks = self.keystone_sentry |
2307 | 375 | rel_gl_mq = unit_gl.relation('amqp', 'rabbitmq-server:amqp') | 418 | rel_ks_gl = unit_ks.relation('identity-service', |
2308 | 376 | rel_ks_gl = unit_ks.relation('identity-service', 'glance:identity-service') | 419 | 'glance:identity-service') |
2309 | 420 | rel_my_gl = self.mysql_sentry.relation('shared-db', 'glance:shared-db') | ||
2310 | 421 | db_uri = "mysql://{}:{}@{}/{}".format('glance', rel_my_gl['password'], | ||
2311 | 422 | rel_my_gl['db_host'], 'glance') | ||
2312 | 377 | conf = '/etc/glance/glance-registry.conf' | 423 | conf = '/etc/glance/glance-registry.conf' |
2321 | 378 | section = 'keystone_authtoken' | 424 | |
2322 | 379 | 425 | expected = { | |
2323 | 380 | if self._get_openstack_release() > self.precise_havana: | 426 | 'DEFAULT': { |
2324 | 381 | # No auth config exists in this file before icehouse | 427 | 'use_syslog': 'False', |
2325 | 382 | expected = {'admin_user': 'glance', | 428 | 'log_file': '/var/log/glance/registry.log', |
2326 | 383 | 'admin_password': rel_ks_gl['service_password']} | 429 | 'debug': 'False', |
2327 | 384 | 430 | 'verbose': 'False', | |
2328 | 385 | ret = u.validate_config_data(unit_gl, conf, section, expected) | 431 | 'bind_host': '0.0.0.0', |
2329 | 432 | 'bind_port': '9191' | ||
2330 | 433 | }, | ||
2331 | 434 | 'database': { | ||
2332 | 435 | 'connection': db_uri, | ||
2333 | 436 | 'sql_idle_timeout': '3600' | ||
2334 | 437 | }, | ||
2335 | 438 | 'keystone_authtoken': { | ||
2336 | 439 | 'admin_user': 'glance', | ||
2337 | 440 | 'admin_password': rel_ks_gl['service_password'], | ||
2338 | 441 | 'auth_uri': u.valid_url, | ||
2339 | 442 | 'auth_host': u.valid_ip, | ||
2340 | 443 | 'auth_port': '35357', | ||
2341 | 444 | 'auth_protocol': 'http', | ||
2342 | 445 | }, | ||
2343 | 446 | } | ||
2344 | 447 | |||
2345 | 448 | for section, pairs in expected.iteritems(): | ||
2346 | 449 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
2347 | 386 | if ret: | 450 | if ret: |
2349 | 387 | message = "glance-registry keystone_authtoken config error: {}".format(ret) | 451 | message = "glance registry paste config error: {}".format(ret) |
2350 | 388 | amulet.raise_status(amulet.FAIL, msg=message) | 452 | amulet.raise_status(amulet.FAIL, msg=message) |
2351 | 389 | 453 | ||
2492 | 390 | def test_glance_api_database_config(self): | 454 | def test_410_glance_image_create_delete(self): |
2493 | 391 | '''Verify database config in glance-api.conf and | 455 | """Create new cirros image in glance, verify, then delete it.""" |
2494 | 392 | compare with a db uri constructed from relation data.''' | 456 | u.log.debug('Creating, checking and deleting glance image...') |
2495 | 393 | unit = self.glance_sentry | 457 | img_new = u.create_cirros_image(self.glance, "cirros-image-1") |
2496 | 394 | conf = '/etc/glance/glance-api.conf' | 458 | img_id = img_new.id |
2497 | 395 | relation = self.mysql_sentry.relation('shared-db', 'glance:shared-db') | 459 | u.delete_resource(self.glance.images, img_id, msg="glance image") |
2498 | 396 | db_uri = "mysql://{}:{}@{}/{}".format('glance', relation['password'], | 460 | |
2499 | 397 | relation['db_host'], 'glance') | 461 | def test_900_glance_restart_on_config_change(self): |
2500 | 398 | expected = {'connection': db_uri, 'sql_idle_timeout': '3600'} | 462 | """Verify that the specified services are restarted when the config |
2501 | 399 | section = 'database' | 463 | is changed.""" |
2502 | 400 | 464 | sentry = self.glance_sentry | |
2503 | 401 | if self._get_openstack_release() <= self.precise_havana: | 465 | juju_service = 'glance' |
2504 | 402 | # Section and directive for this config changed in icehouse | 466 | |
2505 | 403 | expected = {'sql_connection': db_uri, 'sql_idle_timeout': '3600'} | 467 | # Expected default and alternate values |
2506 | 404 | section = 'DEFAULT' | 468 | set_default = {'use-syslog': 'False'} |
2507 | 405 | 469 | set_alternate = {'use-syslog': 'True'} | |
2508 | 406 | ret = u.validate_config_data(unit, conf, section, expected) | 470 | |
2509 | 407 | if ret: | 471 | # Config file affected by juju set config change |
2510 | 408 | message = "glance db config error: {}".format(ret) | 472 | conf_file = '/etc/glance/glance-api.conf' |
2511 | 409 | amulet.raise_status(amulet.FAIL, msg=message) | 473 | |
2512 | 410 | 474 | # Services which are expected to restart upon config change | |
2513 | 411 | def test_glance_registry_database_config(self): | 475 | services = ['glance-api', 'glance-registry'] |
2514 | 412 | '''Verify database config in glance-registry.conf and | 476 | |
2515 | 413 | compare with a db uri constructed from relation data.''' | 477 | # Make config change, check for service restarts |
2516 | 414 | unit = self.glance_sentry | 478 | u.log.debug('Making config change on {}...'.format(juju_service)) |
2517 | 415 | conf = '/etc/glance/glance-registry.conf' | 479 | self.d.configure(juju_service, set_alternate) |
2518 | 416 | relation = self.mysql_sentry.relation('shared-db', 'glance:shared-db') | 480 | |
2519 | 417 | db_uri = "mysql://{}:{}@{}/{}".format('glance', relation['password'], | 481 | sleep_time = 30 |
2520 | 418 | relation['db_host'], 'glance') | 482 | for s in services: |
2521 | 419 | expected = {'connection': db_uri, 'sql_idle_timeout': '3600'} | 483 | u.log.debug("Checking that service restarted: {}".format(s)) |
2522 | 420 | section = 'database' | 484 | if not u.service_restarted(sentry, s, |
2523 | 421 | 485 | conf_file, sleep_time=sleep_time): | |
2524 | 422 | if self._get_openstack_release() <= self.precise_havana: | 486 | self.d.configure(juju_service, set_default) |
2525 | 423 | # Section and directive for this config changed in icehouse | 487 | msg = "service {} didn't restart after config change".format(s) |
2526 | 424 | expected = {'sql_connection': db_uri, 'sql_idle_timeout': '3600'} | 488 | amulet.raise_status(amulet.FAIL, msg=msg) |
2527 | 425 | section = 'DEFAULT' | 489 | sleep_time = 0 |
2528 | 426 | 490 | ||
2529 | 427 | ret = u.validate_config_data(unit, conf, section, expected) | 491 | self.d.configure(juju_service, set_default) |
2390 | 428 | if ret: | ||
2391 | 429 | message = "glance db config error: {}".format(ret) | ||
2392 | 430 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2393 | 431 | |||
2394 | 432 | def test_glance_endpoint(self): | ||
2395 | 433 | '''Verify the glance endpoint data.''' | ||
2396 | 434 | endpoints = self.keystone.endpoints.list() | ||
2397 | 435 | admin_port = internal_port = public_port = '9292' | ||
2398 | 436 | expected = {'id': u.not_null, | ||
2399 | 437 | 'region': 'RegionOne', | ||
2400 | 438 | 'adminurl': u.valid_url, | ||
2401 | 439 | 'internalurl': u.valid_url, | ||
2402 | 440 | 'publicurl': u.valid_url, | ||
2403 | 441 | 'service_id': u.not_null} | ||
2404 | 442 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
2405 | 443 | public_port, expected) | ||
2406 | 444 | |||
2407 | 445 | if ret: | ||
2408 | 446 | amulet.raise_status(amulet.FAIL, | ||
2409 | 447 | msg='glance endpoint: {}'.format(ret)) | ||
2410 | 448 | |||
2411 | 449 | def test_keystone_endpoint(self): | ||
2412 | 450 | '''Verify the keystone endpoint data.''' | ||
2413 | 451 | endpoints = self.keystone.endpoints.list() | ||
2414 | 452 | admin_port = '35357' | ||
2415 | 453 | internal_port = public_port = '5000' | ||
2416 | 454 | expected = {'id': u.not_null, | ||
2417 | 455 | 'region': 'RegionOne', | ||
2418 | 456 | 'adminurl': u.valid_url, | ||
2419 | 457 | 'internalurl': u.valid_url, | ||
2420 | 458 | 'publicurl': u.valid_url, | ||
2421 | 459 | 'service_id': u.not_null} | ||
2422 | 460 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
2423 | 461 | public_port, expected) | ||
2424 | 462 | if ret: | ||
2425 | 463 | amulet.raise_status(amulet.FAIL, | ||
2426 | 464 | msg='keystone endpoint: {}'.format(ret)) | ||
2427 | 465 | |||
2428 | 466 | def _change_config(self): | ||
2429 | 467 | if self._get_openstack_release() > self.precise_havana: | ||
2430 | 468 | self.d.configure('glance', {'debug': 'True'}) | ||
2431 | 469 | else: | ||
2432 | 470 | self.d.configure('glance', {'debug': 'False'}) | ||
2433 | 471 | |||
2434 | 472 | def _restore_config(self): | ||
2435 | 473 | if self._get_openstack_release() > self.precise_havana: | ||
2436 | 474 | self.d.configure('glance', {'debug': 'False'}) | ||
2437 | 475 | else: | ||
2438 | 476 | self.d.configure('glance', {'debug': 'True'}) | ||
2439 | 477 | |||
2440 | 478 | def test_z_glance_restart_on_config_change(self): | ||
2441 | 479 | '''Verify that glance is restarted when the config is changed. | ||
2442 | 480 | |||
2443 | 481 | Note(coreycb): The method name with the _z_ is a little odd | ||
2444 | 482 | but it forces the test to run last. It just makes things | ||
2445 | 483 | easier because restarting services requires re-authorization. | ||
2446 | 484 | ''' | ||
2447 | 485 | if self._get_openstack_release() <= self.precise_havana: | ||
2448 | 486 | # /!\ NOTE(beisner): Glance charm before Icehouse doesn't respond | ||
2449 | 487 | # to attempted config changes via juju / juju set. | ||
2450 | 488 | # https://bugs.launchpad.net/charms/+source/glance/+bug/1340307 | ||
2451 | 489 | u.log.error('NOTE(beisner): skipping glance restart on config ' + | ||
2452 | 490 | 'change check due to bug 1340307.') | ||
2453 | 491 | return | ||
2454 | 492 | |||
2455 | 493 | # Make config change to trigger a service restart | ||
2456 | 494 | self._change_config() | ||
2457 | 495 | |||
2458 | 496 | if not u.service_restarted(self.glance_sentry, 'glance-api', | ||
2459 | 497 | '/etc/glance/glance-api.conf'): | ||
2460 | 498 | self._restore_config() | ||
2461 | 499 | message = "glance service didn't restart after config change" | ||
2462 | 500 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2463 | 501 | |||
2464 | 502 | if not u.service_restarted(self.glance_sentry, 'glance-registry', | ||
2465 | 503 | '/etc/glance/glance-registry.conf', | ||
2466 | 504 | sleep_time=0): | ||
2467 | 505 | self._restore_config() | ||
2468 | 506 | message = "glance service didn't restart after config change" | ||
2469 | 507 | amulet.raise_status(amulet.FAIL, msg=message) | ||
2470 | 508 | |||
2471 | 509 | # Return to original config | ||
2472 | 510 | self._restore_config() | ||
2473 | 511 | |||
2474 | 512 | def test_users(self): | ||
2475 | 513 | '''Verify expected users.''' | ||
2476 | 514 | user0 = {'name': 'glance', | ||
2477 | 515 | 'enabled': True, | ||
2478 | 516 | 'tenantId': u.not_null, | ||
2479 | 517 | 'id': u.not_null, | ||
2480 | 518 | 'email': 'juju@localhost'} | ||
2481 | 519 | user1 = {'name': 'admin', | ||
2482 | 520 | 'enabled': True, | ||
2483 | 521 | 'tenantId': u.not_null, | ||
2484 | 522 | 'id': u.not_null, | ||
2485 | 523 | 'email': 'juju@localhost'} | ||
2486 | 524 | expected = [user0, user1] | ||
2487 | 525 | actual = self.keystone.users.list() | ||
2488 | 526 | |||
2489 | 527 | ret = u.validate_user_data(expected, actual) | ||
2490 | 528 | if ret: | ||
2491 | 529 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
2530 | 530 | 492 | ||
2531 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
2532 | --- tests/charmhelpers/contrib/amulet/utils.py 2015-04-23 14:52:07 +0000 | |||
2533 | +++ tests/charmhelpers/contrib/amulet/utils.py 2015-06-30 20:18:04 +0000 | |||
2534 | @@ -15,13 +15,15 @@ | |||
2535 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2536 | 16 | 16 | ||
2537 | 17 | import ConfigParser | 17 | import ConfigParser |
2538 | 18 | import distro_info | ||
2539 | 18 | import io | 19 | import io |
2540 | 19 | import logging | 20 | import logging |
2541 | 21 | import os | ||
2542 | 20 | import re | 22 | import re |
2543 | 23 | import six | ||
2544 | 21 | import sys | 24 | import sys |
2545 | 22 | import time | 25 | import time |
2548 | 23 | 26 | import urlparse | |
2547 | 24 | import six | ||
2549 | 25 | 27 | ||
2550 | 26 | 28 | ||
2551 | 27 | class AmuletUtils(object): | 29 | class AmuletUtils(object): |
2552 | @@ -33,6 +35,7 @@ | |||
2553 | 33 | 35 | ||
2554 | 34 | def __init__(self, log_level=logging.ERROR): | 36 | def __init__(self, log_level=logging.ERROR): |
2555 | 35 | self.log = self.get_logger(level=log_level) | 37 | self.log = self.get_logger(level=log_level) |
2556 | 38 | self.ubuntu_releases = self.get_ubuntu_releases() | ||
2557 | 36 | 39 | ||
2558 | 37 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | 40 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): |
2559 | 38 | """Get a logger object that will log to stdout.""" | 41 | """Get a logger object that will log to stdout.""" |
2560 | @@ -70,12 +73,44 @@ | |||
2561 | 70 | else: | 73 | else: |
2562 | 71 | return False | 74 | return False |
2563 | 72 | 75 | ||
2564 | 76 | def get_ubuntu_release_from_sentry(self, sentry_unit): | ||
2565 | 77 | """Get Ubuntu release codename from sentry unit. | ||
2566 | 78 | |||
2567 | 79 | :param sentry_unit: amulet sentry/service unit pointer | ||
2568 | 80 | :returns: list of strings - release codename, failure message | ||
2569 | 81 | """ | ||
2570 | 82 | msg = None | ||
2571 | 83 | cmd = 'lsb_release -cs' | ||
2572 | 84 | release, code = sentry_unit.run(cmd) | ||
2573 | 85 | if code == 0: | ||
2574 | 86 | self.log.debug('{} lsb_release: {}'.format( | ||
2575 | 87 | sentry_unit.info['unit_name'], release)) | ||
2576 | 88 | else: | ||
2577 | 89 | msg = ('{} `{}` returned {} ' | ||
2578 | 90 | '{}'.format(sentry_unit.info['unit_name'], | ||
2579 | 91 | cmd, release, code)) | ||
2580 | 92 | if release not in self.ubuntu_releases: | ||
2581 | 93 | msg = ("Release ({}) not found in Ubuntu releases " | ||
2582 | 94 | "({})".format(release, self.ubuntu_releases)) | ||
2583 | 95 | return release, msg | ||
2584 | 96 | |||
2585 | 73 | def validate_services(self, commands): | 97 | def validate_services(self, commands): |
2589 | 74 | """Validate services. | 98 | """Validate that lists of commands succeed on service units. Can be |
2590 | 75 | 99 | used to verify system services are running on the corresponding | |
2588 | 76 | Verify the specified services are running on the corresponding | ||
2591 | 77 | service units. | 100 | service units. |
2593 | 78 | """ | 101 | |
2594 | 102 | :param commands: dict with sentry keys and arbitrary command list vals | ||
2595 | 103 | :returns: None if successful, Failure string message otherwise | ||
2596 | 104 | """ | ||
2597 | 105 | self.log.debug('Checking status of system services...') | ||
2598 | 106 | |||
2599 | 107 | # /!\ DEPRECATION WARNING (beisner): | ||
2600 | 108 | # New and existing tests should be rewritten to use | ||
2601 | 109 | # validate_services_by_name() as it is aware of init systems. | ||
2602 | 110 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | ||
2603 | 111 | 'validate_services_by_name instead of validate_services ' | ||
2604 | 112 | 'due to init system differences.') | ||
2605 | 113 | |||
2606 | 79 | for k, v in six.iteritems(commands): | 114 | for k, v in six.iteritems(commands): |
2607 | 80 | for cmd in v: | 115 | for cmd in v: |
2608 | 81 | output, code = k.run(cmd) | 116 | output, code = k.run(cmd) |
2609 | @@ -86,6 +121,41 @@ | |||
2610 | 86 | return "command `{}` returned {}".format(cmd, str(code)) | 121 | return "command `{}` returned {}".format(cmd, str(code)) |
2611 | 87 | return None | 122 | return None |
2612 | 88 | 123 | ||
2613 | 124 | def validate_services_by_name(self, sentry_services): | ||
2614 | 125 | """Validate system service status by service name, automatically | ||
2615 | 126 | detecting init system based on Ubuntu release codename. | ||
2616 | 127 | |||
2617 | 128 | :param sentry_services: dict with sentry keys and svc list values | ||
2618 | 129 | :returns: None if successful, Failure string message otherwise | ||
2619 | 130 | """ | ||
2620 | 131 | self.log.debug('Checking status of system services...') | ||
2621 | 132 | |||
2622 | 133 | # Point at which systemd became a thing | ||
2623 | 134 | systemd_switch = self.ubuntu_releases.index('vivid') | ||
2624 | 135 | |||
2625 | 136 | for sentry_unit, services_list in six.iteritems(sentry_services): | ||
2626 | 137 | # Get lsb_release codename from unit | ||
2627 | 138 | release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) | ||
2628 | 139 | if ret: | ||
2629 | 140 | return ret | ||
2630 | 141 | |||
2631 | 142 | for service_name in services_list: | ||
2632 | 143 | if (self.ubuntu_releases.index(release) >= systemd_switch or | ||
2633 | 144 | service_name == "rabbitmq-server"): | ||
2634 | 145 | # init is systemd | ||
2635 | 146 | cmd = 'sudo service {} status'.format(service_name) | ||
2636 | 147 | elif self.ubuntu_releases.index(release) < systemd_switch: | ||
2637 | 148 | # init is upstart | ||
2638 | 149 | cmd = 'sudo status {}'.format(service_name) | ||
2639 | 150 | |||
2640 | 151 | output, code = sentry_unit.run(cmd) | ||
2641 | 152 | self.log.debug('{} `{}` returned ' | ||
2642 | 153 | '{}'.format(sentry_unit.info['unit_name'], | ||
2643 | 154 | cmd, code)) | ||
2644 | 155 | if code != 0: | ||
2645 | 156 | return "command `{}` returned {}".format(cmd, str(code)) | ||
2646 | 157 | return None | ||
2647 | 158 | |||
2648 | 89 | def _get_config(self, unit, filename): | 159 | def _get_config(self, unit, filename): |
2649 | 90 | """Get a ConfigParser object for parsing a unit's config file.""" | 160 | """Get a ConfigParser object for parsing a unit's config file.""" |
2650 | 91 | file_contents = unit.file_contents(filename) | 161 | file_contents = unit.file_contents(filename) |
2651 | @@ -104,6 +174,9 @@ | |||
2652 | 104 | Verify that the specified section of the config file contains | 174 | Verify that the specified section of the config file contains |
2653 | 105 | the expected option key:value pairs. | 175 | the expected option key:value pairs. |
2654 | 106 | """ | 176 | """ |
2655 | 177 | self.log.debug('Validating config file data ({} in {} on {})' | ||
2656 | 178 | '...'.format(section, config_file, | ||
2657 | 179 | sentry_unit.info['unit_name'])) | ||
2658 | 107 | config = self._get_config(sentry_unit, config_file) | 180 | config = self._get_config(sentry_unit, config_file) |
2659 | 108 | 181 | ||
2660 | 109 | if section != 'DEFAULT' and not config.has_section(section): | 182 | if section != 'DEFAULT' and not config.has_section(section): |
2661 | @@ -112,10 +185,23 @@ | |||
2662 | 112 | for k in expected.keys(): | 185 | for k in expected.keys(): |
2663 | 113 | if not config.has_option(section, k): | 186 | if not config.has_option(section, k): |
2664 | 114 | return "section [{}] is missing option {}".format(section, k) | 187 | return "section [{}] is missing option {}".format(section, k) |
2669 | 115 | if config.get(section, k) != expected[k]: | 188 | |
2670 | 116 | return "section [{}] {}:{} != expected {}:{}".format( | 189 | actual = config.get(section, k) |
2671 | 117 | section, k, config.get(section, k), k, expected[k]) | 190 | v = expected[k] |
2672 | 118 | return None | 191 | if (isinstance(v, six.string_types) or |
2673 | 192 | isinstance(v, bool) or | ||
2674 | 193 | isinstance(v, six.integer_types)): | ||
2675 | 194 | # handle explicit values | ||
2676 | 195 | if actual != v: | ||
2677 | 196 | return "section [{}] {}:{} != expected {}:{}".format( | ||
2678 | 197 | section, k, actual, k, expected[k]) | ||
2679 | 198 | else: | ||
2680 | 199 | # handle not_null, valid_ip boolean comparison methods, etc. | ||
2681 | 200 | if v(actual): | ||
2682 | 201 | return None | ||
2683 | 202 | else: | ||
2684 | 203 | return "section [{}] {}:{} != expected {}:{}".format( | ||
2685 | 204 | section, k, actual, k, expected[k]) | ||
2686 | 119 | 205 | ||
2687 | 120 | def _validate_dict_data(self, expected, actual): | 206 | def _validate_dict_data(self, expected, actual): |
2688 | 121 | """Validate dictionary data. | 207 | """Validate dictionary data. |
2689 | @@ -321,3 +407,135 @@ | |||
2690 | 321 | 407 | ||
2691 | 322 | def endpoint_error(self, name, data): | 408 | def endpoint_error(self, name, data): |
2692 | 323 | return 'unexpected endpoint data in {} - {}'.format(name, data) | 409 | return 'unexpected endpoint data in {} - {}'.format(name, data) |
2693 | 410 | |||
2694 | 411 | def get_ubuntu_releases(self): | ||
2695 | 412 | """Return a list of all Ubuntu releases in order of release.""" | ||
2696 | 413 | _d = distro_info.UbuntuDistroInfo() | ||
2697 | 414 | _release_list = _d.all | ||
2698 | 415 | self.log.debug('Ubuntu release list: {}'.format(_release_list)) | ||
2699 | 416 | return _release_list | ||
2700 | 417 | |||
2701 | 418 | def file_to_url(self, file_rel_path): | ||
2702 | 419 | """Convert a relative file path to a file URL.""" | ||
2703 | 420 | _abs_path = os.path.abspath(file_rel_path) | ||
2704 | 421 | return urlparse.urlparse(_abs_path, scheme='file').geturl() | ||
2705 | 422 | |||
2706 | 423 | def check_commands_on_units(self, commands, sentry_units): | ||
2707 | 424 | """Check that all commands in a list exit zero on all | ||
2708 | 425 | sentry units in a list. | ||
2709 | 426 | |||
2710 | 427 | :param commands: list of bash commands | ||
2711 | 428 | :param sentry_units: list of sentry unit pointers | ||
2712 | 429 | :returns: None if successful; Failure message otherwise | ||
2713 | 430 | """ | ||
2714 | 431 | self.log.debug('Checking exit codes for {} commands on {} ' | ||
2715 | 432 | 'sentry units...'.format(len(commands), | ||
2716 | 433 | len(sentry_units))) | ||
2717 | 434 | for sentry_unit in sentry_units: | ||
2718 | 435 | for cmd in commands: | ||
2719 | 436 | output, code = sentry_unit.run(cmd) | ||
2720 | 437 | if code == 0: | ||
2721 | 438 | msg = ('{} `{}` returned {} ' | ||
2722 | 439 | '(OK)'.format(sentry_unit.info['unit_name'], | ||
2723 | 440 | cmd, code)) | ||
2724 | 441 | self.log.debug(msg) | ||
2725 | 442 | else: | ||
2726 | 443 | msg = ('{} `{}` returned {} ' | ||
2727 | 444 | '{}'.format(sentry_unit.info['unit_name'], | ||
2728 | 445 | cmd, code, output)) | ||
2729 | 446 | return msg | ||
2730 | 447 | return None | ||
2731 | 448 | |||
2732 | 449 | def get_process_id_list(self, sentry_unit, process_name): | ||
2733 | 450 | """Get a list of process ID(s) from a single sentry juju unit | ||
2734 | 451 | for a single process name. | ||
2735 | 452 | |||
2736 | 453 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
2737 | 454 | :param process_name: Process name | ||
2738 | 455 | :returns: List of process IDs | ||
2739 | 456 | """ | ||
2740 | 457 | cmd = 'pidof {}'.format(process_name) | ||
2741 | 458 | output, code = sentry_unit.run(cmd) | ||
2742 | 459 | if code != 0: | ||
2743 | 460 | msg = ('{} `{}` returned {} ' | ||
2744 | 461 | '{}'.format(sentry_unit.info['unit_name'], | ||
2745 | 462 | cmd, code, output)) | ||
2746 | 463 | raise RuntimeError(msg) | ||
2747 | 464 | return str(output).split() | ||
2748 | 465 | |||
2749 | 466 | def get_unit_process_ids(self, unit_processes): | ||
2750 | 467 | """Construct a dict containing unit sentries, process names, and | ||
2751 | 468 | process IDs.""" | ||
2752 | 469 | pid_dict = {} | ||
2753 | 470 | for sentry_unit, process_list in unit_processes.iteritems(): | ||
2754 | 471 | pid_dict[sentry_unit] = {} | ||
2755 | 472 | for process in process_list: | ||
2756 | 473 | pids = self.get_process_id_list(sentry_unit, process) | ||
2757 | 474 | pid_dict[sentry_unit].update({process: pids}) | ||
2758 | 475 | return pid_dict | ||
2759 | 476 | |||
2760 | 477 | def validate_unit_process_ids(self, expected, actual): | ||
2761 | 478 | """Validate process id quantities for services on units.""" | ||
2762 | 479 | self.log.debug('Checking units for running processes...') | ||
2763 | 480 | self.log.debug('Expected PIDs: {}'.format(expected)) | ||
2764 | 481 | self.log.debug('Actual PIDs: {}'.format(actual)) | ||
2765 | 482 | |||
2766 | 483 | if len(actual) != len(expected): | ||
2767 | 484 | msg = ('Unit count mismatch. expected, actual: {}, ' | ||
2768 | 485 | '{} '.format(len(expected), len(actual))) | ||
2769 | 486 | return msg | ||
2770 | 487 | |||
2771 | 488 | for (e_sentry, e_proc_names) in expected.iteritems(): | ||
2772 | 489 | e_sentry_name = e_sentry.info['unit_name'] | ||
2773 | 490 | if e_sentry in actual.keys(): | ||
2774 | 491 | a_proc_names = actual[e_sentry] | ||
2775 | 492 | else: | ||
2776 | 493 | msg = ('Expected sentry ({}) not found in actual dict data.' | ||
2777 | 494 | '{}'.format(e_sentry_name, e_sentry)) | ||
2778 | 495 | return msg | ||
2779 | 496 | |||
2780 | 497 | if len(e_proc_names.keys()) != len(a_proc_names.keys()): | ||
2781 | 498 | msg = ('Process name count mismatch. expected, actual: {}, ' | ||
2782 | 499 | '{}'.format(len(expected), len(actual))) | ||
2783 | 500 | return msg | ||
2784 | 501 | |||
2785 | 502 | for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ | ||
2786 | 503 | zip(e_proc_names.items(), a_proc_names.items()): | ||
2787 | 504 | if e_proc_name != a_proc_name: | ||
2788 | 505 | msg = ('Process name mismatch. expected, actual: {}, ' | ||
2789 | 506 | '{}'.format(e_proc_name, a_proc_name)) | ||
2790 | 507 | return msg | ||
2791 | 508 | |||
2792 | 509 | a_pids_length = len(a_pids) | ||
2793 | 510 | if e_pids_length != a_pids_length: | ||
2794 | 511 | msg = ('PID count mismatch. {} ({}) expected, actual: {}, ' | ||
2795 | 512 | '{} ({})'.format(e_sentry_name, | ||
2796 | 513 | e_proc_name, | ||
2797 | 514 | e_pids_length, | ||
2798 | 515 | a_pids_length, | ||
2799 | 516 | a_pids)) | ||
2800 | 517 | return msg | ||
2801 | 518 | else: | ||
2802 | 519 | msg = ('PID check OK: {} {} {}: ' | ||
2803 | 520 | '{}'.format(e_sentry_name, | ||
2804 | 521 | e_proc_name, | ||
2805 | 522 | e_pids_length, | ||
2806 | 523 | a_pids)) | ||
2807 | 524 | self.log.debug(msg) | ||
2808 | 525 | return None | ||
2809 | 526 | |||
2810 | 527 | def validate_list_of_identical_dicts(self, list_of_dicts): | ||
2811 | 528 | """Check that all dicts within a list are identical.""" | ||
2812 | 529 | hashes = [] | ||
2813 | 530 | for _dict in list_of_dicts: | ||
2814 | 531 | hashes.append(hash(frozenset(_dict.items()))) | ||
2815 | 532 | |||
2816 | 533 | self.log.debug('Hashes: {}'.format(hashes)) | ||
2817 | 534 | if len(set(hashes)) == 1: | ||
2818 | 535 | msg = 'Dicts within list are identical' | ||
2819 | 536 | self.log.debug(msg) | ||
2820 | 537 | else: | ||
2821 | 538 | msg = 'Dicts within list are not identical' | ||
2822 | 539 | return msg | ||
2823 | 540 | |||
2824 | 541 | return None | ||
2825 | 324 | 542 | ||
2826 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
2827 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-04-23 14:52:07 +0000 | |||
2828 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2015-06-30 20:18:04 +0000 | |||
2829 | @@ -79,9 +79,9 @@ | |||
2830 | 79 | services.append(this_service) | 79 | services.append(this_service) |
2831 | 80 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', | 80 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
2832 | 81 | 'ceph-osd', 'ceph-radosgw'] | 81 | 'ceph-osd', 'ceph-radosgw'] |
2836 | 82 | # Openstack subordinate charms do not expose an origin option as that | 82 | # Most OpenStack subordinate charms do not expose an origin option |
2837 | 83 | # is controlled by the principle | 83 | # as that is controlled by the principle. |
2838 | 84 | ignore = ['neutron-openvswitch'] | 84 | ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] |
2839 | 85 | 85 | ||
2840 | 86 | if self.openstack: | 86 | if self.openstack: |
2841 | 87 | for svc in services: | 87 | for svc in services: |
2842 | @@ -110,7 +110,8 @@ | |||
2843 | 110 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, | 110 | (self.precise_essex, self.precise_folsom, self.precise_grizzly, |
2844 | 111 | self.precise_havana, self.precise_icehouse, | 111 | self.precise_havana, self.precise_icehouse, |
2845 | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, | 112 | self.trusty_icehouse, self.trusty_juno, self.utopic_juno, |
2847 | 113 | self.trusty_kilo, self.vivid_kilo) = range(10) | 113 | self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, |
2848 | 114 | self.wily_liberty) = range(12) | ||
2849 | 114 | 115 | ||
2850 | 115 | releases = { | 116 | releases = { |
2851 | 116 | ('precise', None): self.precise_essex, | 117 | ('precise', None): self.precise_essex, |
2852 | @@ -121,8 +122,10 @@ | |||
2853 | 121 | ('trusty', None): self.trusty_icehouse, | 122 | ('trusty', None): self.trusty_icehouse, |
2854 | 122 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, | 123 | ('trusty', 'cloud:trusty-juno'): self.trusty_juno, |
2855 | 123 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, | 124 | ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, |
2856 | 125 | ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, | ||
2857 | 124 | ('utopic', None): self.utopic_juno, | 126 | ('utopic', None): self.utopic_juno, |
2859 | 125 | ('vivid', None): self.vivid_kilo} | 127 | ('vivid', None): self.vivid_kilo, |
2860 | 128 | ('wily', None): self.wily_liberty} | ||
2861 | 126 | return releases[(self.series, self.openstack)] | 129 | return releases[(self.series, self.openstack)] |
2862 | 127 | 130 | ||
2863 | 128 | def _get_openstack_release_string(self): | 131 | def _get_openstack_release_string(self): |
2864 | @@ -138,9 +141,42 @@ | |||
2865 | 138 | ('trusty', 'icehouse'), | 141 | ('trusty', 'icehouse'), |
2866 | 139 | ('utopic', 'juno'), | 142 | ('utopic', 'juno'), |
2867 | 140 | ('vivid', 'kilo'), | 143 | ('vivid', 'kilo'), |
2868 | 144 | ('wily', 'liberty'), | ||
2869 | 141 | ]) | 145 | ]) |
2870 | 142 | if self.openstack: | 146 | if self.openstack: |
2871 | 143 | os_origin = self.openstack.split(':')[1] | 147 | os_origin = self.openstack.split(':')[1] |
2872 | 144 | return os_origin.split('%s-' % self.series)[1].split('/')[0] | 148 | return os_origin.split('%s-' % self.series)[1].split('/')[0] |
2873 | 145 | else: | 149 | else: |
2874 | 146 | return releases[self.series] | 150 | return releases[self.series] |
2875 | 151 | |||
2876 | 152 | def get_ceph_expected_pools(self, radosgw=False): | ||
2877 | 153 | """Return a list of expected ceph pools based on Ubuntu-OpenStack | ||
2878 | 154 | release and whether ceph radosgw is flagged as present or not.""" | ||
2879 | 155 | |||
2880 | 156 | if self._get_openstack_release() >= self.trusty_kilo: | ||
2881 | 157 | # Kilo or later | ||
2882 | 158 | pools = [ | ||
2883 | 159 | 'rbd', | ||
2884 | 160 | 'cinder', | ||
2885 | 161 | 'glance' | ||
2886 | 162 | ] | ||
2887 | 163 | else: | ||
2888 | 164 | # Juno or earlier | ||
2889 | 165 | pools = [ | ||
2890 | 166 | 'data', | ||
2891 | 167 | 'metadata', | ||
2892 | 168 | 'rbd', | ||
2893 | 169 | 'cinder', | ||
2894 | 170 | 'glance' | ||
2895 | 171 | ] | ||
2896 | 172 | |||
2897 | 173 | if radosgw: | ||
2898 | 174 | pools.extend([ | ||
2899 | 175 | '.rgw.root', | ||
2900 | 176 | '.rgw.control', | ||
2901 | 177 | '.rgw', | ||
2902 | 178 | '.rgw.gc', | ||
2903 | 179 | '.users.uid' | ||
2904 | 180 | ]) | ||
2905 | 181 | |||
2906 | 182 | return pools | ||
2907 | 147 | 183 | ||
2908 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
2909 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-03-20 17:15:02 +0000 | |||
2910 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2015-06-30 20:18:04 +0000 | |||
2911 | @@ -14,16 +14,19 @@ | |||
2912 | 14 | # You should have received a copy of the GNU Lesser General Public License | 14 | # You should have received a copy of the GNU Lesser General Public License |
2913 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2914 | 16 | 16 | ||
2915 | 17 | import json | ||
2916 | 17 | import logging | 18 | import logging |
2917 | 18 | import os | 19 | import os |
2918 | 20 | import six | ||
2919 | 19 | import time | 21 | import time |
2920 | 20 | import urllib | 22 | import urllib |
2921 | 21 | 23 | ||
2922 | 24 | import cinderclient.v1.client as cinder_client | ||
2923 | 22 | import glanceclient.v1.client as glance_client | 25 | import glanceclient.v1.client as glance_client |
2924 | 26 | import heatclient.v1.client as heat_client | ||
2925 | 23 | import keystoneclient.v2_0 as keystone_client | 27 | import keystoneclient.v2_0 as keystone_client |
2926 | 24 | import novaclient.v1_1.client as nova_client | 28 | import novaclient.v1_1.client as nova_client |
2929 | 25 | 29 | import swiftclient | |
2928 | 26 | import six | ||
2930 | 27 | 30 | ||
2931 | 28 | from charmhelpers.contrib.amulet.utils import ( | 31 | from charmhelpers.contrib.amulet.utils import ( |
2932 | 29 | AmuletUtils | 32 | AmuletUtils |
2933 | @@ -37,7 +40,7 @@ | |||
2934 | 37 | """OpenStack amulet utilities. | 40 | """OpenStack amulet utilities. |
2935 | 38 | 41 | ||
2936 | 39 | This class inherits from AmuletUtils and has additional support | 42 | This class inherits from AmuletUtils and has additional support |
2938 | 40 | that is specifically for use by OpenStack charms. | 43 | that is specifically for use by OpenStack charm tests. |
2939 | 41 | """ | 44 | """ |
2940 | 42 | 45 | ||
2941 | 43 | def __init__(self, log_level=ERROR): | 46 | def __init__(self, log_level=ERROR): |
2942 | @@ -51,6 +54,8 @@ | |||
2943 | 51 | Validate actual endpoint data vs expected endpoint data. The ports | 54 | Validate actual endpoint data vs expected endpoint data. The ports |
2944 | 52 | are used to find the matching endpoint. | 55 | are used to find the matching endpoint. |
2945 | 53 | """ | 56 | """ |
2946 | 57 | self.log.debug('Validating endpoint data...') | ||
2947 | 58 | self.log.debug('actual: {}'.format(repr(endpoints))) | ||
2948 | 54 | found = False | 59 | found = False |
2949 | 55 | for ep in endpoints: | 60 | for ep in endpoints: |
2950 | 56 | self.log.debug('endpoint: {}'.format(repr(ep))) | 61 | self.log.debug('endpoint: {}'.format(repr(ep))) |
2951 | @@ -77,6 +82,7 @@ | |||
2952 | 77 | Validate a list of actual service catalog endpoints vs a list of | 82 | Validate a list of actual service catalog endpoints vs a list of |
2953 | 78 | expected service catalog endpoints. | 83 | expected service catalog endpoints. |
2954 | 79 | """ | 84 | """ |
2955 | 85 | self.log.debug('Validating service catalog endpoint data...') | ||
2956 | 80 | self.log.debug('actual: {}'.format(repr(actual))) | 86 | self.log.debug('actual: {}'.format(repr(actual))) |
2957 | 81 | for k, v in six.iteritems(expected): | 87 | for k, v in six.iteritems(expected): |
2958 | 82 | if k in actual: | 88 | if k in actual: |
2959 | @@ -93,6 +99,7 @@ | |||
2960 | 93 | Validate a list of actual tenant data vs list of expected tenant | 99 | Validate a list of actual tenant data vs list of expected tenant |
2961 | 94 | data. | 100 | data. |
2962 | 95 | """ | 101 | """ |
2963 | 102 | self.log.debug('Validating tenant data...') | ||
2964 | 96 | self.log.debug('actual: {}'.format(repr(actual))) | 103 | self.log.debug('actual: {}'.format(repr(actual))) |
2965 | 97 | for e in expected: | 104 | for e in expected: |
2966 | 98 | found = False | 105 | found = False |
2967 | @@ -114,6 +121,7 @@ | |||
2968 | 114 | Validate a list of actual role data vs a list of expected role | 121 | Validate a list of actual role data vs a list of expected role |
2969 | 115 | data. | 122 | data. |
2970 | 116 | """ | 123 | """ |
2971 | 124 | self.log.debug('Validating role data...') | ||
2972 | 117 | self.log.debug('actual: {}'.format(repr(actual))) | 125 | self.log.debug('actual: {}'.format(repr(actual))) |
2973 | 118 | for e in expected: | 126 | for e in expected: |
2974 | 119 | found = False | 127 | found = False |
2975 | @@ -134,6 +142,7 @@ | |||
2976 | 134 | Validate a list of actual user data vs a list of expected user | 142 | Validate a list of actual user data vs a list of expected user |
2977 | 135 | data. | 143 | data. |
2978 | 136 | """ | 144 | """ |
2979 | 145 | self.log.debug('Validating user data...') | ||
2980 | 137 | self.log.debug('actual: {}'.format(repr(actual))) | 146 | self.log.debug('actual: {}'.format(repr(actual))) |
2981 | 138 | for e in expected: | 147 | for e in expected: |
2982 | 139 | found = False | 148 | found = False |
2983 | @@ -155,17 +164,29 @@ | |||
2984 | 155 | 164 | ||
2985 | 156 | Validate a list of actual flavors vs a list of expected flavors. | 165 | Validate a list of actual flavors vs a list of expected flavors. |
2986 | 157 | """ | 166 | """ |
2987 | 167 | self.log.debug('Validating flavor data...') | ||
2988 | 158 | self.log.debug('actual: {}'.format(repr(actual))) | 168 | self.log.debug('actual: {}'.format(repr(actual))) |
2989 | 159 | act = [a.name for a in actual] | 169 | act = [a.name for a in actual] |
2990 | 160 | return self._validate_list_data(expected, act) | 170 | return self._validate_list_data(expected, act) |
2991 | 161 | 171 | ||
2992 | 162 | def tenant_exists(self, keystone, tenant): | 172 | def tenant_exists(self, keystone, tenant): |
2993 | 163 | """Return True if tenant exists.""" | 173 | """Return True if tenant exists.""" |
2994 | 174 | self.log.debug('Checking if tenant exists ({})...'.format(tenant)) | ||
2995 | 164 | return tenant in [t.name for t in keystone.tenants.list()] | 175 | return tenant in [t.name for t in keystone.tenants.list()] |
2996 | 165 | 176 | ||
2997 | 177 | def authenticate_cinder_admin(self, keystone_sentry, username, | ||
2998 | 178 | password, tenant): | ||
2999 | 179 | """Authenticates admin user with cinder.""" | ||
3000 | 180 | service_ip = \ | ||
3001 | 181 | keystone_sentry.relation('shared-db', | ||
3002 | 182 | 'mysql:shared-db')['private-address'] | ||
3003 | 183 | ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) | ||
3004 | 184 | return cinder_client.Client(username, password, tenant, ept) | ||
3005 | 185 | |||
3006 | 166 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 186 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
3007 | 167 | tenant): | 187 | tenant): |
3008 | 168 | """Authenticates admin user with the keystone admin endpoint.""" | 188 | """Authenticates admin user with the keystone admin endpoint.""" |
3009 | 189 | self.log.debug('Authenticating keystone admin...') | ||
3010 | 169 | unit = keystone_sentry | 190 | unit = keystone_sentry |
3011 | 170 | service_ip = unit.relation('shared-db', | 191 | service_ip = unit.relation('shared-db', |
3012 | 171 | 'mysql:shared-db')['private-address'] | 192 | 'mysql:shared-db')['private-address'] |
3013 | @@ -175,6 +196,7 @@ | |||
3014 | 175 | 196 | ||
3015 | 176 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 197 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
3016 | 177 | """Authenticates a regular user with the keystone public endpoint.""" | 198 | """Authenticates a regular user with the keystone public endpoint.""" |
3017 | 199 | self.log.debug('Authenticating keystone user ({})...'.format(user)) | ||
3018 | 178 | ep = keystone.service_catalog.url_for(service_type='identity', | 200 | ep = keystone.service_catalog.url_for(service_type='identity', |
3019 | 179 | endpoint_type='publicURL') | 201 | endpoint_type='publicURL') |
3020 | 180 | return keystone_client.Client(username=user, password=password, | 202 | return keystone_client.Client(username=user, password=password, |
3021 | @@ -182,19 +204,49 @@ | |||
3022 | 182 | 204 | ||
3023 | 183 | def authenticate_glance_admin(self, keystone): | 205 | def authenticate_glance_admin(self, keystone): |
3024 | 184 | """Authenticates admin user with glance.""" | 206 | """Authenticates admin user with glance.""" |
3025 | 207 | self.log.debug('Authenticating glance admin...') | ||
3026 | 185 | ep = keystone.service_catalog.url_for(service_type='image', | 208 | ep = keystone.service_catalog.url_for(service_type='image', |
3027 | 186 | endpoint_type='adminURL') | 209 | endpoint_type='adminURL') |
3028 | 187 | return glance_client.Client(ep, token=keystone.auth_token) | 210 | return glance_client.Client(ep, token=keystone.auth_token) |
3029 | 188 | 211 | ||
3030 | 212 | def authenticate_heat_admin(self, keystone): | ||
3031 | 213 | """Authenticates the admin user with heat.""" | ||
3032 | 214 | self.log.debug('Authenticating heat admin...') | ||
3033 | 215 | ep = keystone.service_catalog.url_for(service_type='orchestration', | ||
3034 | 216 | endpoint_type='publicURL') | ||
3035 | 217 | return heat_client.Client(endpoint=ep, token=keystone.auth_token) | ||
3036 | 218 | |||
3037 | 189 | def authenticate_nova_user(self, keystone, user, password, tenant): | 219 | def authenticate_nova_user(self, keystone, user, password, tenant): |
3038 | 190 | """Authenticates a regular user with nova-api.""" | 220 | """Authenticates a regular user with nova-api.""" |
3039 | 221 | self.log.debug('Authenticating nova user ({})...'.format(user)) | ||
3040 | 191 | ep = keystone.service_catalog.url_for(service_type='identity', | 222 | ep = keystone.service_catalog.url_for(service_type='identity', |
3041 | 192 | endpoint_type='publicURL') | 223 | endpoint_type='publicURL') |
3042 | 193 | return nova_client.Client(username=user, api_key=password, | 224 | return nova_client.Client(username=user, api_key=password, |
3043 | 194 | project_id=tenant, auth_url=ep) | 225 | project_id=tenant, auth_url=ep) |
3044 | 195 | 226 | ||
3045 | 227 | def authenticate_swift_user(self, keystone, user, password, tenant): | ||
3046 | 228 | """Authenticates a regular user with swift api.""" | ||
3047 | 229 | self.log.debug('Authenticating swift user ({})...'.format(user)) | ||
3048 | 230 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
3049 | 231 | endpoint_type='publicURL') | ||
3050 | 232 | return swiftclient.Connection(authurl=ep, | ||
3051 | 233 | user=user, | ||
3052 | 234 | key=password, | ||
3053 | 235 | tenant_name=tenant, | ||
3054 | 236 | auth_version='2.0') | ||
3055 | 237 | |||
3056 | 196 | def create_cirros_image(self, glance, image_name): | 238 | def create_cirros_image(self, glance, image_name): |
3058 | 197 | """Download the latest cirros image and upload it to glance.""" | 239 | """Download the latest cirros image and upload it to glance, |
3059 | 240 | validate and return a resource pointer. | ||
3060 | 241 | |||
3061 | 242 | :param glance: pointer to authenticated glance connection | ||
3062 | 243 | :param image_name: display name for new image | ||
3063 | 244 | :returns: glance image pointer | ||
3064 | 245 | """ | ||
3065 | 246 | self.log.debug('Creating glance cirros image ' | ||
3066 | 247 | '({})...'.format(image_name)) | ||
3067 | 248 | |||
3068 | 249 | # Download cirros image | ||
3069 | 198 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | 250 | http_proxy = os.getenv('AMULET_HTTP_PROXY') |
3070 | 199 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | 251 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) |
3071 | 200 | if http_proxy: | 252 | if http_proxy: |
3072 | @@ -203,57 +255,67 @@ | |||
3073 | 203 | else: | 255 | else: |
3074 | 204 | opener = urllib.FancyURLopener() | 256 | opener = urllib.FancyURLopener() |
3075 | 205 | 257 | ||
3077 | 206 | f = opener.open("http://download.cirros-cloud.net/version/released") | 258 | f = opener.open('http://download.cirros-cloud.net/version/released') |
3078 | 207 | version = f.read().strip() | 259 | version = f.read().strip() |
3080 | 208 | cirros_img = "cirros-{}-x86_64-disk.img".format(version) | 260 | cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) |
3081 | 209 | local_path = os.path.join('tests', cirros_img) | 261 | local_path = os.path.join('tests', cirros_img) |
3082 | 210 | 262 | ||
3083 | 211 | if not os.path.exists(local_path): | 263 | if not os.path.exists(local_path): |
3085 | 212 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | 264 | cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', |
3086 | 213 | version, cirros_img) | 265 | version, cirros_img) |
3087 | 214 | opener.retrieve(cirros_url, local_path) | 266 | opener.retrieve(cirros_url, local_path) |
3088 | 215 | f.close() | 267 | f.close() |
3089 | 216 | 268 | ||
3090 | 269 | # Create glance image | ||
3091 | 217 | with open(local_path) as f: | 270 | with open(local_path) as f: |
3092 | 218 | image = glance.images.create(name=image_name, is_public=True, | 271 | image = glance.images.create(name=image_name, is_public=True, |
3093 | 219 | disk_format='qcow2', | 272 | disk_format='qcow2', |
3094 | 220 | container_format='bare', data=f) | 273 | container_format='bare', data=f) |
3107 | 221 | count = 1 | 274 | |
3108 | 222 | status = image.status | 275 | # Wait for image to reach active status |
3109 | 223 | while status != 'active' and count < 10: | 276 | img_id = image.id |
3110 | 224 | time.sleep(3) | 277 | ret = self.resource_reaches_status(glance.images, img_id, |
3111 | 225 | image = glance.images.get(image.id) | 278 | expected_stat='active', |
3112 | 226 | status = image.status | 279 | msg='Image status wait') |
3113 | 227 | self.log.debug('image status: {}'.format(status)) | 280 | if not ret: |
3114 | 228 | count += 1 | 281 | msg = 'Glance image failed to reach expected state.' |
3115 | 229 | 282 | raise RuntimeError(msg) | |
3116 | 230 | if status != 'active': | 283 | |
3117 | 231 | self.log.error('image creation timed out') | 284 | # Re-validate new image |
3118 | 232 | return None | 285 | self.log.debug('Validating image attributes...') |
3119 | 286 | val_img_name = glance.images.get(img_id).name | ||
3120 | 287 | val_img_stat = glance.images.get(img_id).status | ||
3121 | 288 | val_img_pub = glance.images.get(img_id).is_public | ||
3122 | 289 | val_img_cfmt = glance.images.get(img_id).container_format | ||
3123 | 290 | val_img_dfmt = glance.images.get(img_id).disk_format | ||
3124 | 291 | msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' | ||
3125 | 292 | 'container fmt:{} disk fmt:{}'.format( | ||
3126 | 293 | val_img_name, val_img_pub, img_id, | ||
3127 | 294 | val_img_stat, val_img_cfmt, val_img_dfmt)) | ||
3128 | 295 | |||
3129 | 296 | if val_img_name == image_name and val_img_stat == 'active' \ | ||
3130 | 297 | and val_img_pub is True and val_img_cfmt == 'bare' \ | ||
3131 | 298 | and val_img_dfmt == 'qcow2': | ||
3132 | 299 | self.log.debug(msg_attr) | ||
3133 | 300 | else: | ||
3134 | 301 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
3135 | 302 | raise RuntimeError(msg) | ||
3136 | 233 | 303 | ||
3137 | 234 | return image | 304 | return image |
3138 | 235 | 305 | ||
3139 | 236 | def delete_image(self, glance, image): | 306 | def delete_image(self, glance, image): |
3140 | 237 | """Delete the specified image.""" | 307 | """Delete the specified image.""" |
3157 | 238 | num_before = len(list(glance.images.list())) | 308 | |
3158 | 239 | glance.images.delete(image) | 309 | # /!\ DEPRECATION WARNING |
3159 | 240 | 310 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
3160 | 241 | count = 1 | 311 | 'delete_resource instead of delete_image.') |
3161 | 242 | num_after = len(list(glance.images.list())) | 312 | self.log.debug('Deleting glance image ({})...'.format(image)) |
3162 | 243 | while num_after != (num_before - 1) and count < 10: | 313 | return self.delete_resource(glance.images, image, msg='glance image') |
3147 | 244 | time.sleep(3) | ||
3148 | 245 | num_after = len(list(glance.images.list())) | ||
3149 | 246 | self.log.debug('number of images: {}'.format(num_after)) | ||
3150 | 247 | count += 1 | ||
3151 | 248 | |||
3152 | 249 | if num_after != (num_before - 1): | ||
3153 | 250 | self.log.error('image deletion timed out') | ||
3154 | 251 | return False | ||
3155 | 252 | |||
3156 | 253 | return True | ||
3163 | 254 | 314 | ||
3164 | 255 | def create_instance(self, nova, image_name, instance_name, flavor): | 315 | def create_instance(self, nova, image_name, instance_name, flavor): |
3165 | 256 | """Create the specified instance.""" | 316 | """Create the specified instance.""" |
3166 | 317 | self.log.debug('Creating instance ' | ||
3167 | 318 | '({}|{}|{})'.format(instance_name, image_name, flavor)) | ||
3168 | 257 | image = nova.images.find(name=image_name) | 319 | image = nova.images.find(name=image_name) |
3169 | 258 | flavor = nova.flavors.find(name=flavor) | 320 | flavor = nova.flavors.find(name=flavor) |
3170 | 259 | instance = nova.servers.create(name=instance_name, image=image, | 321 | instance = nova.servers.create(name=instance_name, image=image, |
3171 | @@ -276,19 +338,264 @@ | |||
3172 | 276 | 338 | ||
3173 | 277 | def delete_instance(self, nova, instance): | 339 | def delete_instance(self, nova, instance): |
3174 | 278 | """Delete the specified instance.""" | 340 | """Delete the specified instance.""" |
3191 | 279 | num_before = len(list(nova.servers.list())) | 341 | |
3192 | 280 | nova.servers.delete(instance) | 342 | # /!\ DEPRECATION WARNING |
3193 | 281 | 343 | self.log.warn('/!\\ DEPRECATION WARNING: use ' | |
3194 | 282 | count = 1 | 344 | 'delete_resource instead of delete_instance.') |
3195 | 283 | num_after = len(list(nova.servers.list())) | 345 | self.log.debug('Deleting instance ({})...'.format(instance)) |
3196 | 284 | while num_after != (num_before - 1) and count < 10: | 346 | return self.delete_resource(nova.servers, instance, |
3197 | 285 | time.sleep(3) | 347 | msg='nova instance') |
3198 | 286 | num_after = len(list(nova.servers.list())) | 348 | |
3199 | 287 | self.log.debug('number of instances: {}'.format(num_after)) | 349 | def create_or_get_keypair(self, nova, keypair_name="testkey"): |
3200 | 288 | count += 1 | 350 | """Create a new keypair, or return pointer if it already exists.""" |
3201 | 289 | 351 | try: | |
3202 | 290 | if num_after != (num_before - 1): | 352 | _keypair = nova.keypairs.get(keypair_name) |
3203 | 291 | self.log.error('instance deletion timed out') | 353 | self.log.debug('Keypair ({}) already exists, ' |
3204 | 292 | return False | 354 | 'using it.'.format(keypair_name)) |
3205 | 293 | 355 | return _keypair | |
3206 | 294 | return True | 356 | except: |
3207 | 357 | self.log.debug('Keypair ({}) does not exist, ' | ||
3208 | 358 | 'creating it.'.format(keypair_name)) | ||
3209 | 359 | |||
3210 | 360 | _keypair = nova.keypairs.create(name=keypair_name) | ||
3211 | 361 | return _keypair | ||
3212 | 362 | |||
3213 | 363 | def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, | ||
3214 | 364 | img_id=None, src_vol_id=None, snap_id=None): | ||
3215 | 365 | """Create cinder volume, optionally from a glance image, or | ||
3216 | 366 | optionally as a clone of an existing volume, or optionally | ||
3217 | 367 | from a snapshot. Wait for the new volume status to reach | ||
3218 | 368 | the expected status, validate and return a resource pointer. | ||
3219 | 369 | |||
3220 | 370 | :param vol_name: cinder volume display name | ||
3221 | 371 | :param vol_size: size in gigabytes | ||
3222 | 372 | :param img_id: optional glance image id | ||
3223 | 373 | :param src_vol_id: optional source volume id to clone | ||
3224 | 374 | :param snap_id: optional snapshot id to use | ||
3225 | 375 | :returns: cinder volume pointer | ||
3226 | 376 | """ | ||
3227 | 377 | # Handle parameter input | ||
3228 | 378 | if img_id and not src_vol_id and not snap_id: | ||
3229 | 379 | self.log.debug('Creating cinder volume from glance image ' | ||
3230 | 380 | '({})...'.format(img_id)) | ||
3231 | 381 | bootable = 'true' | ||
3232 | 382 | elif src_vol_id and not img_id and not snap_id: | ||
3233 | 383 | self.log.debug('Cloning cinder volume...') | ||
3234 | 384 | bootable = cinder.volumes.get(src_vol_id).bootable | ||
3235 | 385 | elif snap_id and not src_vol_id and not img_id: | ||
3236 | 386 | self.log.debug('Creating cinder volume from snapshot...') | ||
3237 | 387 | snap = cinder.volume_snapshots.find(id=snap_id) | ||
3238 | 388 | vol_size = snap.size | ||
3239 | 389 | snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id | ||
3240 | 390 | bootable = cinder.volumes.get(snap_vol_id).bootable | ||
3241 | 391 | elif not img_id and not src_vol_id and not snap_id: | ||
3242 | 392 | self.log.debug('Creating cinder volume...') | ||
3243 | 393 | bootable = 'false' | ||
3244 | 394 | else: | ||
3245 | 395 | msg = ('Invalid method use - name:{} size:{} img_id:{} ' | ||
3246 | 396 | 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, | ||
3247 | 397 | img_id, src_vol_id, | ||
3248 | 398 | snap_id)) | ||
3249 | 399 | raise RuntimeError(msg) | ||
3250 | 400 | |||
3251 | 401 | # Create new volume | ||
3252 | 402 | try: | ||
3253 | 403 | vol_new = cinder.volumes.create(display_name=vol_name, | ||
3254 | 404 | imageRef=img_id, | ||
3255 | 405 | size=vol_size, | ||
3256 | 406 | source_volid=src_vol_id, | ||
3257 | 407 | snapshot_id=snap_id) | ||
3258 | 408 | vol_id = vol_new.id | ||
3259 | 409 | except Exception as e: | ||
3260 | 410 | msg = 'Failed to create volume: {}'.format(e) | ||
3261 | 411 | raise RuntimeError(msg) | ||
3262 | 412 | |||
3263 | 413 | # Wait for volume to reach available status | ||
3264 | 414 | ret = self.resource_reaches_status(cinder.volumes, vol_id, | ||
3265 | 415 | expected_stat="available", | ||
3266 | 416 | msg="Volume status wait") | ||
3267 | 417 | if not ret: | ||
3268 | 418 | msg = 'Cinder volume failed to reach expected state.' | ||
3269 | 419 | raise RuntimeError(msg) | ||
3270 | 420 | |||
3271 | 421 | # Re-validate new volume | ||
3272 | 422 | self.log.debug('Validating volume attributes...') | ||
3273 | 423 | val_vol_name = cinder.volumes.get(vol_id).display_name | ||
3274 | 424 | val_vol_boot = cinder.volumes.get(vol_id).bootable | ||
3275 | 425 | val_vol_stat = cinder.volumes.get(vol_id).status | ||
3276 | 426 | val_vol_size = cinder.volumes.get(vol_id).size | ||
3277 | 427 | msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' | ||
3278 | 428 | '{} size:{}'.format(val_vol_name, vol_id, | ||
3279 | 429 | val_vol_stat, val_vol_boot, | ||
3280 | 430 | val_vol_size)) | ||
3281 | 431 | |||
3282 | 432 | if val_vol_boot == bootable and val_vol_stat == 'available' \ | ||
3283 | 433 | and val_vol_name == vol_name and val_vol_size == vol_size: | ||
3284 | 434 | self.log.debug(msg_attr) | ||
3285 | 435 | else: | ||
3286 | 436 | msg = ('Volume validation failed, {}'.format(msg_attr)) | ||
3287 | 437 | raise RuntimeError(msg) | ||
3288 | 438 | |||
3289 | 439 | return vol_new | ||
3290 | 440 | |||
3291 | 441 | def delete_resource(self, resource, resource_id, | ||
3292 | 442 | msg="resource", max_wait=120): | ||
3293 | 443 | """Delete one openstack resource, such as one instance, keypair, | ||
3294 | 444 | image, volume, stack, etc., and confirm deletion within max wait time. | ||
3295 | 445 | |||
3296 | 446 | :param resource: pointer to os resource type, ex:glance_client.images | ||
3297 | 447 | :param resource_id: unique name or id for the openstack resource | ||
3298 | 448 | :param msg: text to identify purpose in logging | ||
3299 | 449 | :param max_wait: maximum wait time in seconds | ||
3300 | 450 | :returns: True if successful, otherwise False | ||
3301 | 451 | """ | ||
3302 | 452 | self.log.debug('Deleting OpenStack resource ' | ||
3303 | 453 | '{} ({})'.format(resource_id, msg)) | ||
3304 | 454 | num_before = len(list(resource.list())) | ||
3305 | 455 | resource.delete(resource_id) | ||
3306 | 456 | |||
3307 | 457 | tries = 0 | ||
3308 | 458 | num_after = len(list(resource.list())) | ||
3309 | 459 | while num_after != (num_before - 1) and tries < (max_wait / 4): | ||
3310 | 460 | self.log.debug('{} delete check: ' | ||
3311 | 461 | '{} [{}:{}] {}'.format(msg, tries, | ||
3312 | 462 | num_before, | ||
3313 | 463 | num_after, | ||
3314 | 464 | resource_id)) | ||
3315 | 465 | time.sleep(4) | ||
3316 | 466 | num_after = len(list(resource.list())) | ||
3317 | 467 | tries += 1 | ||
3318 | 468 | |||
3319 | 469 | self.log.debug('{}: expected, actual count = {}, ' | ||
3320 | 470 | '{}'.format(msg, num_before - 1, num_after)) | ||
3321 | 471 | |||
3322 | 472 | if num_after == (num_before - 1): | ||
3323 | 473 | return True | ||
3324 | 474 | else: | ||
3325 | 475 | self.log.error('{} delete timed out'.format(msg)) | ||
3326 | 476 | return False | ||
3327 | 477 | |||
3328 | 478 | def resource_reaches_status(self, resource, resource_id, | ||
3329 | 479 | expected_stat='available', | ||
3330 | 480 | msg='resource', max_wait=120): | ||
3331 | 481 | """Wait for an openstack resources status to reach an | ||
3332 | 482 | expected status within a specified time. Useful to confirm that | ||
3333 | 483 | nova instances, cinder vols, snapshots, glance images, heat stacks | ||
3334 | 484 | and other resources eventually reach the expected status. | ||
3335 | 485 | |||
3336 | 486 | :param resource: pointer to os resource type, ex: heat_client.stacks | ||
3337 | 487 | :param resource_id: unique id for the openstack resource | ||
3338 | 488 | :param expected_stat: status to expect resource to reach | ||
3339 | 489 | :param msg: text to identify purpose in logging | ||
3340 | 490 | :param max_wait: maximum wait time in seconds | ||
3341 | 491 | :returns: True if successful, False if status is not reached | ||
3342 | 492 | """ | ||
3343 | 493 | |||
3344 | 494 | tries = 0 | ||
3345 | 495 | resource_stat = resource.get(resource_id).status | ||
3346 | 496 | while resource_stat != expected_stat and tries < (max_wait / 4): | ||
3347 | 497 | self.log.debug('{} status check: ' | ||
3348 | 498 | '{} [{}:{}] {}'.format(msg, tries, | ||
3349 | 499 | resource_stat, | ||
3350 | 500 | expected_stat, | ||
3351 | 501 | resource_id)) | ||
3352 | 502 | time.sleep(4) | ||
3353 | 503 | resource_stat = resource.get(resource_id).status | ||
3354 | 504 | tries += 1 | ||
3355 | 505 | |||
3356 | 506 | self.log.debug('{}: expected, actual status = {}, ' | ||
3357 | 507 | '{}'.format(msg, resource_stat, expected_stat)) | ||
3358 | 508 | |||
3359 | 509 | if resource_stat == expected_stat: | ||
3360 | 510 | return True | ||
3361 | 511 | else: | ||
3362 | 512 | self.log.debug('{} never reached expected status: ' | ||
3363 | 513 | '{}'.format(resource_id, expected_stat)) | ||
3364 | 514 | return False | ||
3365 | 515 | |||
3366 | 516 | def get_ceph_osd_id_cmd(self, index): | ||
3367 | 517 | """Produce a shell command that will return a ceph-osd id.""" | ||
3368 | 518 | cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'" | ||
3369 | 519 | " | grep -o '[0-9]*'`".format(index + 1)) | ||
3370 | 520 | return cmd | ||
3371 | 521 | |||
3372 | 522 | def get_ceph_pools(self, sentry_unit): | ||
3373 | 523 | """Return a dict of ceph pools from a single ceph unit, with | ||
3374 | 524 | pool name as keys, pool id as vals.""" | ||
3375 | 525 | pools = {} | ||
3376 | 526 | cmd = 'sudo ceph osd lspools' | ||
3377 | 527 | output, code = sentry_unit.run(cmd) | ||
3378 | 528 | if code != 0: | ||
3379 | 529 | msg = ('{} `{}` returned {} ' | ||
3380 | 530 | '{}'.format(sentry_unit.info['unit_name'], | ||
3381 | 531 | cmd, code, output)) | ||
3382 | 532 | raise RuntimeError(msg) | ||
3383 | 533 | |||
3384 | 534 | # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, | ||
3385 | 535 | for pool in str(output).split(','): | ||
3386 | 536 | pool_id_name = pool.split(' ') | ||
3387 | 537 | if len(pool_id_name) == 2: | ||
3388 | 538 | pool_id = pool_id_name[0] | ||
3389 | 539 | pool_name = pool_id_name[1] | ||
3390 | 540 | pools[pool_name] = int(pool_id) | ||
3391 | 541 | |||
3392 | 542 | self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], | ||
3393 | 543 | pools)) | ||
3394 | 544 | return pools | ||
3395 | 545 | |||
3396 | 546 | def get_ceph_df(self, sentry_unit): | ||
3397 | 547 | """Return dict of ceph df json output, including ceph pool state. | ||
3398 | 548 | |||
3399 | 549 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
3400 | 550 | :returns: Dict of ceph df output | ||
3401 | 551 | """ | ||
3402 | 552 | cmd = 'sudo ceph df --format=json' | ||
3403 | 553 | output, code = sentry_unit.run(cmd) | ||
3404 | 554 | if code != 0: | ||
3405 | 555 | msg = ('{} `{}` returned {} ' | ||
3406 | 556 | '{}'.format(sentry_unit.info['unit_name'], | ||
3407 | 557 | cmd, code, output)) | ||
3408 | 558 | raise RuntimeError(msg) | ||
3409 | 559 | return json.loads(output) | ||
3410 | 560 | |||
3411 | 561 | def get_ceph_pool_sample(self, sentry_unit, pool_id=0): | ||
3412 | 562 | """Take a sample of attributes of a ceph pool, returning ceph | ||
3413 | 563 | pool name, object count and disk space used for the specified | ||
3414 | 564 | pool ID number. | ||
3415 | 565 | |||
3416 | 566 | :param sentry_unit: Pointer to amulet sentry instance (juju unit) | ||
3417 | 567 | :param pool_id: Ceph pool ID | ||
3418 | 568 | :returns: List of pool name, object count, kb disk space used | ||
3419 | 569 | """ | ||
3420 | 570 | df = self.get_ceph_df(sentry_unit) | ||
3421 | 571 | pool_name = df['pools'][pool_id]['name'] | ||
3422 | 572 | obj_count = df['pools'][pool_id]['stats']['objects'] | ||
3423 | 573 | kb_used = df['pools'][pool_id]['stats']['kb_used'] | ||
3424 | 574 | self.log.debug('Ceph {} pool (ID {}): {} objects, ' | ||
3425 | 575 | '{} kb used'.format(pool_name, | ||
3426 | 576 | pool_id, | ||
3427 | 577 | obj_count, | ||
3428 | 578 | kb_used)) | ||
3429 | 579 | return pool_name, obj_count, kb_used | ||
3430 | 580 | |||
3431 | 581 | def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): | ||
3432 | 582 | """Validate ceph pool samples taken over time, such as pool | ||
3433 | 583 | object counts or pool kb used, before adding, after adding, and | ||
3434 | 584 | after deleting items which affect those pool attributes. The | ||
3435 | 585 | 2nd element is expected to be greater than the 1st; 3rd is expected | ||
3436 | 586 | to be less than the 2nd. | ||
3437 | 587 | |||
3438 | 588 | :param samples: List containing 3 data samples | ||
3439 | 589 | :param sample_type: String for logging and usage context | ||
3440 | 590 | :returns: None if successful, Failure message otherwise | ||
3441 | 591 | """ | ||
3442 | 592 | original, created, deleted = range(3) | ||
3443 | 593 | if samples[created] <= samples[original] or \ | ||
3444 | 594 | samples[deleted] >= samples[created]: | ||
3445 | 595 | msg = ('Ceph {} samples ({}) ' | ||
3446 | 596 | 'unexpected.'.format(sample_type, samples)) | ||
3447 | 597 | return msg | ||
3448 | 598 | else: | ||
3449 | 599 | self.log.debug('Ceph {} samples (OK): ' | ||
3450 | 600 | '{}'.format(sample_type, samples)) | ||
3451 | 601 | return None | ||
3452 | 295 | 602 | ||
3453 | === added file 'tests/tests.yaml' | |||
3454 | --- tests/tests.yaml 1970-01-01 00:00:00 +0000 | |||
3455 | +++ tests/tests.yaml 2015-06-30 20:18:04 +0000 | |||
3456 | @@ -0,0 +1,18 @@ | |||
3457 | 1 | bootstrap: true | ||
3458 | 2 | reset: true | ||
3459 | 3 | virtualenv: true | ||
3460 | 4 | makefile: | ||
3461 | 5 | - lint | ||
3462 | 6 | - test | ||
3463 | 7 | sources: | ||
3464 | 8 | - ppa:juju/stable | ||
3465 | 9 | packages: | ||
3466 | 10 | - amulet | ||
3467 | 11 | - python-amulet | ||
3468 | 12 | - python-cinderclient | ||
3469 | 13 | - python-distro-info | ||
3470 | 14 | - python-glanceclient | ||
3471 | 15 | - python-heatclient | ||
3472 | 16 | - python-keystoneclient | ||
3473 | 17 | - python-novaclient | ||
3474 | 18 | - python-swiftclient | ||
3475 | 0 | 19 | ||
3476 | === modified file 'unit_tests/test_glance_relations.py' | |||
3477 | --- unit_tests/test_glance_relations.py 2015-06-15 17:39:54 +0000 | |||
3478 | +++ unit_tests/test_glance_relations.py 2015-06-30 20:18:04 +0000 | |||
3479 | @@ -40,12 +40,13 @@ | |||
3480 | 40 | 'restart_on_change', | 40 | 'restart_on_change', |
3481 | 41 | 'service_reload', | 41 | 'service_reload', |
3482 | 42 | 'service_stop', | 42 | 'service_stop', |
3483 | 43 | 'service_restart', | ||
3484 | 43 | # charmhelpers.contrib.openstack.utils | 44 | # charmhelpers.contrib.openstack.utils |
3485 | 44 | 'configure_installation_source', | 45 | 'configure_installation_source', |
3486 | 45 | 'os_release', | 46 | 'os_release', |
3487 | 46 | 'openstack_upgrade_available', | 47 | 'openstack_upgrade_available', |
3488 | 47 | # charmhelpers.contrib.hahelpers.cluster_utils | 48 | # charmhelpers.contrib.hahelpers.cluster_utils |
3490 | 48 | 'eligible_leader', | 49 | 'is_elected_leader', |
3491 | 49 | # glance_utils | 50 | # glance_utils |
3492 | 50 | 'restart_map', | 51 | 'restart_map', |
3493 | 51 | 'register_configs', | 52 | 'register_configs', |
3494 | @@ -129,10 +130,14 @@ | |||
3495 | 129 | self.apt_update.assert_called_with(fatal=True) | 130 | self.apt_update.assert_called_with(fatal=True) |
3496 | 130 | self.apt_install.assert_called_with(['haproxy', 'python-setuptools', | 131 | self.apt_install.assert_called_with(['haproxy', 'python-setuptools', |
3497 | 131 | 'python-six', 'uuid', | 132 | 'python-six', 'uuid', |
3502 | 132 | 'python-mysqldb', 'python-pip', | 133 | 'python-mysqldb', |
3503 | 133 | 'apache2', 'libxslt1-dev', | 134 | 'libmysqlclient-dev', |
3504 | 134 | 'python-psycopg2', 'zlib1g-dev', | 135 | 'libssl-dev', 'libffi-dev', |
3505 | 135 | 'python-dev', 'libxml2-dev'], | 136 | 'apache2', 'python-pip', |
3506 | 137 | 'libxslt1-dev', 'libyaml-dev', | ||
3507 | 138 | 'python-psycopg2', | ||
3508 | 139 | 'zlib1g-dev', 'python-dev', | ||
3509 | 140 | 'libxml2-dev'], | ||
3510 | 136 | fatal=True) | 141 | fatal=True) |
3511 | 137 | self.git_install.assert_called_with(projects_yaml) | 142 | self.git_install.assert_called_with(projects_yaml) |
3512 | 138 | 143 | ||
3513 | @@ -430,6 +435,7 @@ | |||
3514 | 430 | self.assertEquals([call('/etc/glance/glance-api.conf'), | 435 | self.assertEquals([call('/etc/glance/glance-api.conf'), |
3515 | 431 | call(self.ceph_config_file())], | 436 | call(self.ceph_config_file())], |
3516 | 432 | configs.write.call_args_list) | 437 | configs.write.call_args_list) |
3517 | 438 | self.service_restart.assert_called_with('glance-api') | ||
3518 | 433 | 439 | ||
3519 | 434 | @patch.object(relations, 'CONFIGS') | 440 | @patch.object(relations, 'CONFIGS') |
3520 | 435 | def test_ceph_broken(self, configs): | 441 | def test_ceph_broken(self, configs): |
3521 | 436 | 442 | ||
3522 | === modified file 'unit_tests/test_glance_utils.py' | |||
3523 | --- unit_tests/test_glance_utils.py 2015-04-17 12:05:48 +0000 | |||
3524 | +++ unit_tests/test_glance_utils.py 2015-06-30 20:18:04 +0000 | |||
3525 | @@ -16,13 +16,14 @@ | |||
3526 | 16 | 'relation_ids', | 16 | 'relation_ids', |
3527 | 17 | 'get_os_codename_install_source', | 17 | 'get_os_codename_install_source', |
3528 | 18 | 'configure_installation_source', | 18 | 'configure_installation_source', |
3530 | 19 | 'eligible_leader', | 19 | 'is_elected_leader', |
3531 | 20 | 'templating', | 20 | 'templating', |
3532 | 21 | 'apt_update', | 21 | 'apt_update', |
3533 | 22 | 'apt_upgrade', | 22 | 'apt_upgrade', |
3534 | 23 | 'apt_install', | 23 | 'apt_install', |
3535 | 24 | 'mkdir', | 24 | 'mkdir', |
3536 | 25 | 'os_release', | 25 | 'os_release', |
3537 | 26 | 'pip_install', | ||
3538 | 26 | 'service_start', | 27 | 'service_start', |
3539 | 27 | 'service_stop', | 28 | 'service_stop', |
3540 | 28 | 'service_name', | 29 | 'service_name', |
3541 | @@ -152,7 +153,7 @@ | |||
3542 | 152 | git_requested.return_value = True | 153 | git_requested.return_value = True |
3543 | 153 | self.config.side_effect = None | 154 | self.config.side_effect = None |
3544 | 154 | self.config.return_value = 'cloud:precise-havana' | 155 | self.config.return_value = 'cloud:precise-havana' |
3546 | 155 | self.eligible_leader.return_value = True | 156 | self.is_elected_leader.return_value = True |
3547 | 156 | self.get_os_codename_install_source.return_value = 'havana' | 157 | self.get_os_codename_install_source.return_value = 'havana' |
3548 | 157 | configs = MagicMock() | 158 | configs = MagicMock() |
3549 | 158 | utils.do_openstack_upgrade(configs) | 159 | utils.do_openstack_upgrade(configs) |
3550 | @@ -170,7 +171,7 @@ | |||
3551 | 170 | git_requested.return_value = True | 171 | git_requested.return_value = True |
3552 | 171 | self.config.side_effect = None | 172 | self.config.side_effect = None |
3553 | 172 | self.config.return_value = 'cloud:precise-havana' | 173 | self.config.return_value = 'cloud:precise-havana' |
3555 | 173 | self.eligible_leader.return_value = False | 174 | self.is_elected_leader.return_value = False |
3556 | 174 | self.get_os_codename_install_source.return_value = 'havana' | 175 | self.get_os_codename_install_source.return_value = 'havana' |
3557 | 175 | configs = MagicMock() | 176 | configs = MagicMock() |
3558 | 176 | utils.do_openstack_upgrade(configs) | 177 | utils.do_openstack_upgrade(configs) |
3559 | @@ -236,26 +237,35 @@ | |||
3560 | 236 | @patch.object(utils, 'git_src_dir') | 237 | @patch.object(utils, 'git_src_dir') |
3561 | 237 | @patch.object(utils, 'service_restart') | 238 | @patch.object(utils, 'service_restart') |
3562 | 238 | @patch.object(utils, 'render') | 239 | @patch.object(utils, 'render') |
3563 | 240 | @patch.object(utils, 'git_pip_venv_dir') | ||
3564 | 239 | @patch('os.path.join') | 241 | @patch('os.path.join') |
3565 | 240 | @patch('os.path.exists') | 242 | @patch('os.path.exists') |
3566 | 243 | @patch('os.symlink') | ||
3567 | 241 | @patch('shutil.copytree') | 244 | @patch('shutil.copytree') |
3568 | 242 | @patch('shutil.rmtree') | 245 | @patch('shutil.rmtree') |
3571 | 243 | def test_git_post_install(self, rmtree, copytree, exists, join, render, | 246 | @patch('subprocess.check_call') |
3572 | 244 | service_restart, git_src_dir): | 247 | def test_git_post_install(self, check_call, rmtree, copytree, symlink, |
3573 | 248 | exists, join, venv, render, service_restart, | ||
3574 | 249 | git_src_dir): | ||
3575 | 245 | projects_yaml = openstack_origin_git | 250 | projects_yaml = openstack_origin_git |
3576 | 246 | join.return_value = 'joined-string' | 251 | join.return_value = 'joined-string' |
3577 | 252 | venv.return_value = '/mnt/openstack-git/venv' | ||
3578 | 247 | utils.git_post_install(projects_yaml) | 253 | utils.git_post_install(projects_yaml) |
3579 | 248 | expected = [ | 254 | expected = [ |
3580 | 249 | call('joined-string', '/etc/glance'), | 255 | call('joined-string', '/etc/glance'), |
3581 | 250 | ] | 256 | ] |
3582 | 251 | copytree.assert_has_calls(expected) | 257 | copytree.assert_has_calls(expected) |
3583 | 258 | expected = [ | ||
3584 | 259 | call('joined-string', '/usr/local/bin/glance-manage'), | ||
3585 | 260 | ] | ||
3586 | 261 | symlink.assert_has_calls(expected, any_order=True) | ||
3587 | 252 | glance_api_context = { | 262 | glance_api_context = { |
3588 | 253 | 'service_description': 'Glance API server', | 263 | 'service_description': 'Glance API server', |
3589 | 254 | 'service_name': 'Glance', | 264 | 'service_name': 'Glance', |
3590 | 255 | 'user_name': 'glance', | 265 | 'user_name': 'glance', |
3591 | 256 | 'start_dir': '/var/lib/glance', | 266 | 'start_dir': '/var/lib/glance', |
3592 | 257 | 'process_name': 'glance-api', | 267 | 'process_name': 'glance-api', |
3594 | 258 | 'executable_name': '/usr/local/bin/glance-api', | 268 | 'executable_name': 'joined-string', |
3595 | 259 | 'config_files': ['/etc/glance/glance-api.conf'], | 269 | 'config_files': ['/etc/glance/glance-api.conf'], |
3596 | 260 | 'log_file': '/var/log/glance/api.log', | 270 | 'log_file': '/var/log/glance/api.log', |
3597 | 261 | } | 271 | } |
3598 | @@ -265,7 +275,7 @@ | |||
3599 | 265 | 'user_name': 'glance', | 275 | 'user_name': 'glance', |
3600 | 266 | 'start_dir': '/var/lib/glance', | 276 | 'start_dir': '/var/lib/glance', |
3601 | 267 | 'process_name': 'glance-registry', | 277 | 'process_name': 'glance-registry', |
3603 | 268 | 'executable_name': '/usr/local/bin/glance-registry', | 278 | 'executable_name': 'joined-string', |
3604 | 269 | 'config_files': ['/etc/glance/glance-registry.conf'], | 279 | 'config_files': ['/etc/glance/glance-registry.conf'], |
3605 | 270 | 'log_file': '/var/log/glance/registry.log', | 280 | 'log_file': '/var/log/glance/registry.log', |
3606 | 271 | } | 281 | } |