Merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic into lp:~openstack-charmers/charms/trusty/quantum-gateway/next
- Trusty Tahr (14.04)
- amulet-basic
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 54 |
Proposed branch: | lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic |
Merge into: | lp:~openstack-charmers/charms/trusty/quantum-gateway/next |
Diff against target: |
2073 lines (+1690/-80) 24 files modified
Makefile (+12/-4) charm-helpers-hooks.yaml (+10/-0) charm-helpers-sync.yaml (+0/-10) charm-helpers-tests.yaml (+5/-0) hooks/charmhelpers/contrib/hahelpers/cluster.py (+1/-0) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+26/-7) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+105/-3) hooks/charmhelpers/contrib/openstack/context.py (+10/-8) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/core/hookenv.py (+5/-4) hooks/charmhelpers/core/host.py (+7/-5) hooks/charmhelpers/fetch/__init__.py (+23/-15) tests/00-setup (+10/-0) tests/12-basic-precise-grizzly (+11/-0) tests/13-basic-precise-havana (+11/-0) tests/14-basic-precise-icehouse (+11/-0) tests/15-basic-trusty-icehouse (+9/-0) tests/README (+47/-0) tests/basic_deployment.py (+834/-0) tests/charmhelpers/contrib/amulet/deployment.py (+63/-0) tests/charmhelpers/contrib/amulet/utils.py (+157/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-basic |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Approve | ||
Review via email: mp+226489@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
James Page (james-page) wrote : | # |
- 56. By Corey Bryant
-
Add Amulet basic tests
Revision history for this message
Corey Bryant (corey.bryant) wrote : | # |
Ok. I pushed a new version with essex/folsom tests dropped.
Revision history for this message
Corey Bryant (corey.bryant) wrote : | # |
> Not relevant for essex and folsom - I'd just drop the tests.
Ok. I pushed a new version with essex/folsom tests dropped.
Revision history for this message
James Page (james-page) wrote : | # |
I'm going to merge this as I think the tests are all 100% OK; however I do keep hitting a race where sentry.wait() in _deploy is not actually waiting for all hook execution to complete; resulting in the neutron-server on the nova-cc being restarted and connections from the client in the tests failing.
review:
Approve
Revision history for this message
Corey Bryant (corey.bryant) wrote : | # |
Thanks James. I'll add support to charm-helpers for the sentry.wait() issue.
Revision history for this message
Stuart Bishop (stub) wrote : | # |
sentry.wait() is likely Bug #1254766
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2014-05-21 10:07:03 +0000 | |||
3 | +++ Makefile 2014-07-17 15:16:47 +0000 | |||
4 | @@ -3,15 +3,23 @@ | |||
5 | 3 | 3 | ||
6 | 4 | lint: | 4 | lint: |
7 | 5 | @flake8 --exclude hooks/charmhelpers hooks | 5 | @flake8 --exclude hooks/charmhelpers hooks |
9 | 6 | @flake8 --exclude hooks/charmhelpers unit_tests | 6 | @flake8 --exclude hooks/charmhelpers unit_tests tests |
10 | 7 | @charm proof | 7 | @charm proof |
11 | 8 | 8 | ||
12 | 9 | unit_test: | ||
13 | 10 | @echo Starting unit tests... | ||
14 | 11 | @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests | ||
15 | 12 | |||
16 | 9 | test: | 13 | test: |
19 | 10 | @echo Starting tests... | 14 | @echo Starting Amulet tests... |
20 | 11 | @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests | 15 | # coreycb note: The -v should only be temporary until Amulet sends |
21 | 16 | # raise_status() messages to stderr: | ||
22 | 17 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
23 | 18 | @juju test -v -p AMULET_HTTP_PROXY | ||
24 | 12 | 19 | ||
25 | 13 | sync: | 20 | sync: |
27 | 14 | @charm-helper-sync -c charm-helpers-sync.yaml | 21 | @charm-helper-sync -c charm-helpers-hooks.yaml |
28 | 22 | @charm-helper-sync -c charm-helpers-tests.yaml | ||
29 | 15 | 23 | ||
30 | 16 | publish: lint test | 24 | publish: lint test |
31 | 17 | bzr push lp:charms/quantum-gateway | 25 | bzr push lp:charms/quantum-gateway |
32 | 18 | 26 | ||
33 | === added file 'charm-helpers-hooks.yaml' | |||
34 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 | |||
35 | +++ charm-helpers-hooks.yaml 2014-07-17 15:16:47 +0000 | |||
36 | @@ -0,0 +1,10 @@ | |||
37 | 1 | branch: lp:charm-helpers | ||
38 | 2 | destination: hooks/charmhelpers | ||
39 | 3 | include: | ||
40 | 4 | - core | ||
41 | 5 | - fetch | ||
42 | 6 | - contrib.openstack | ||
43 | 7 | - contrib.hahelpers | ||
44 | 8 | - contrib.network.ovs | ||
45 | 9 | - contrib.storage.linux | ||
46 | 10 | - payload.execd | ||
47 | 0 | 11 | ||
48 | === removed file 'charm-helpers-sync.yaml' | |||
49 | --- charm-helpers-sync.yaml 2014-03-27 11:20:28 +0000 | |||
50 | +++ charm-helpers-sync.yaml 1970-01-01 00:00:00 +0000 | |||
51 | @@ -1,10 +0,0 @@ | |||
52 | 1 | branch: lp:charm-helpers | ||
53 | 2 | destination: hooks/charmhelpers | ||
54 | 3 | include: | ||
55 | 4 | - core | ||
56 | 5 | - fetch | ||
57 | 6 | - contrib.openstack | ||
58 | 7 | - contrib.hahelpers | ||
59 | 8 | - contrib.network.ovs | ||
60 | 9 | - contrib.storage.linux | ||
61 | 10 | - payload.execd | ||
62 | 11 | 0 | ||
63 | === added file 'charm-helpers-tests.yaml' | |||
64 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 | |||
65 | +++ charm-helpers-tests.yaml 2014-07-17 15:16:47 +0000 | |||
66 | @@ -0,0 +1,5 @@ | |||
67 | 1 | branch: lp:charm-helpers | ||
68 | 2 | destination: tests/charmhelpers | ||
69 | 3 | include: | ||
70 | 4 | - contrib.amulet | ||
71 | 5 | - contrib.openstack.amulet | ||
72 | 0 | 6 | ||
73 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
74 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-27 11:20:28 +0000 | |||
75 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-17 15:16:47 +0000 | |||
76 | @@ -170,6 +170,7 @@ | |||
77 | 170 | 170 | ||
78 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for |
79 | 172 | a complete https context. | 172 | a complete https context. |
80 | 173 | |||
81 | 173 | :vip_setting: str: Setting in charm config that specifies | 174 | :vip_setting: str: Setting in charm config that specifies |
82 | 174 | VIP address. | 175 | VIP address. |
83 | 175 | ''' | 176 | ''' |
84 | 176 | 177 | ||
85 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
86 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-06-24 13:40:39 +0000 | |||
87 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000 | |||
88 | @@ -7,19 +7,38 @@ | |||
89 | 7 | """This class inherits from AmuletDeployment and has additional support | 7 | """This class inherits from AmuletDeployment and has additional support |
90 | 8 | that is specifically for use by OpenStack charms.""" | 8 | that is specifically for use by OpenStack charms.""" |
91 | 9 | 9 | ||
93 | 10 | def __init__(self, series=None, openstack=None): | 10 | def __init__(self, series, openstack=None, source=None): |
94 | 11 | """Initialize the deployment environment.""" | 11 | """Initialize the deployment environment.""" |
95 | 12 | self.openstack = None | ||
96 | 13 | super(OpenStackAmuletDeployment, self).__init__(series) | 12 | super(OpenStackAmuletDeployment, self).__init__(series) |
100 | 14 | 13 | self.openstack = openstack | |
101 | 15 | if openstack: | 14 | self.source = source |
102 | 16 | self.openstack = openstack | 15 | |
103 | 16 | def _add_services(self, this_service, other_services): | ||
104 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
105 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
106 | 19 | other_services) | ||
107 | 20 | name = 0 | ||
108 | 21 | services = other_services | ||
109 | 22 | services.append(this_service) | ||
110 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
111 | 24 | |||
112 | 25 | if self.openstack: | ||
113 | 26 | for svc in services: | ||
114 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
115 | 28 | if charm_name not in use_source: | ||
116 | 29 | config = {'openstack-origin': self.openstack} | ||
117 | 30 | self.d.configure(svc[name], config) | ||
118 | 31 | |||
119 | 32 | if self.source: | ||
120 | 33 | for svc in services: | ||
121 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
122 | 35 | if charm_name in use_source: | ||
123 | 36 | config = {'source': self.source} | ||
124 | 37 | self.d.configure(svc[name], config) | ||
125 | 17 | 38 | ||
126 | 18 | def _configure_services(self, configs): | 39 | def _configure_services(self, configs): |
127 | 19 | """Configure all of the services.""" | 40 | """Configure all of the services.""" |
128 | 20 | for service, config in configs.iteritems(): | 41 | for service, config in configs.iteritems(): |
129 | 21 | if service == self.this_service: | ||
130 | 22 | config['openstack-origin'] = self.openstack | ||
131 | 23 | self.d.configure(service, config) | 42 | self.d.configure(service, config) |
132 | 24 | 43 | ||
133 | 25 | def _get_openstack_release(self): | 44 | def _get_openstack_release(self): |
134 | 26 | 45 | ||
135 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
136 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-06-24 13:40:39 +0000 | |||
137 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000 | |||
138 | @@ -1,4 +1,7 @@ | |||
139 | 1 | import logging | 1 | import logging |
140 | 2 | import os | ||
141 | 3 | import time | ||
142 | 4 | import urllib | ||
143 | 2 | 5 | ||
144 | 3 | import glanceclient.v1.client as glance_client | 6 | import glanceclient.v1.client as glance_client |
145 | 4 | import keystoneclient.v2_0 as keystone_client | 7 | import keystoneclient.v2_0 as keystone_client |
146 | @@ -71,7 +74,7 @@ | |||
147 | 71 | if ret: | 74 | if ret: |
148 | 72 | return "unexpected tenant data - {}".format(ret) | 75 | return "unexpected tenant data - {}".format(ret) |
149 | 73 | if not found: | 76 | if not found: |
151 | 74 | return "tenant {} does not exist".format(e.name) | 77 | return "tenant {} does not exist".format(e['name']) |
152 | 75 | return ret | 78 | return ret |
153 | 76 | 79 | ||
154 | 77 | def validate_role_data(self, expected, actual): | 80 | def validate_role_data(self, expected, actual): |
155 | @@ -88,7 +91,7 @@ | |||
156 | 88 | if ret: | 91 | if ret: |
157 | 89 | return "unexpected role data - {}".format(ret) | 92 | return "unexpected role data - {}".format(ret) |
158 | 90 | if not found: | 93 | if not found: |
160 | 91 | return "role {} does not exist".format(e.name) | 94 | return "role {} does not exist".format(e['name']) |
161 | 92 | return ret | 95 | return ret |
162 | 93 | 96 | ||
163 | 94 | def validate_user_data(self, expected, actual): | 97 | def validate_user_data(self, expected, actual): |
164 | @@ -107,7 +110,7 @@ | |||
165 | 107 | if ret: | 110 | if ret: |
166 | 108 | return "unexpected user data - {}".format(ret) | 111 | return "unexpected user data - {}".format(ret) |
167 | 109 | if not found: | 112 | if not found: |
169 | 110 | return "user {} does not exist".format(e.name) | 113 | return "user {} does not exist".format(e['name']) |
170 | 111 | return ret | 114 | return ret |
171 | 112 | 115 | ||
172 | 113 | def validate_flavor_data(self, expected, actual): | 116 | def validate_flavor_data(self, expected, actual): |
173 | @@ -149,3 +152,102 @@ | |||
174 | 149 | endpoint_type='publicURL') | 152 | endpoint_type='publicURL') |
175 | 150 | return nova_client.Client(username=user, api_key=password, | 153 | return nova_client.Client(username=user, api_key=password, |
176 | 151 | project_id=tenant, auth_url=ep) | 154 | project_id=tenant, auth_url=ep) |
177 | 155 | |||
178 | 156 | def create_cirros_image(self, glance, image_name): | ||
179 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
180 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
181 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
182 | 160 | if http_proxy: | ||
183 | 161 | proxies = {'http': http_proxy} | ||
184 | 162 | opener = urllib.FancyURLopener(proxies) | ||
185 | 163 | else: | ||
186 | 164 | opener = urllib.FancyURLopener() | ||
187 | 165 | |||
188 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
189 | 167 | version = f.read().strip() | ||
190 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
191 | 169 | |||
192 | 170 | if not os.path.exists(cirros_img): | ||
193 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
194 | 172 | version, cirros_img) | ||
195 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
196 | 174 | f.close() | ||
197 | 175 | |||
198 | 176 | with open(cirros_img) as f: | ||
199 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
200 | 178 | disk_format='qcow2', | ||
201 | 179 | container_format='bare', data=f) | ||
202 | 180 | count = 1 | ||
203 | 181 | status = image.status | ||
204 | 182 | while status != 'active' and count < 10: | ||
205 | 183 | time.sleep(3) | ||
206 | 184 | image = glance.images.get(image.id) | ||
207 | 185 | status = image.status | ||
208 | 186 | self.log.debug('image status: {}'.format(status)) | ||
209 | 187 | count += 1 | ||
210 | 188 | |||
211 | 189 | if status != 'active': | ||
212 | 190 | self.log.error('image creation timed out') | ||
213 | 191 | return None | ||
214 | 192 | |||
215 | 193 | return image | ||
216 | 194 | |||
217 | 195 | def delete_image(self, glance, image): | ||
218 | 196 | """Delete the specified image.""" | ||
219 | 197 | num_before = len(list(glance.images.list())) | ||
220 | 198 | glance.images.delete(image) | ||
221 | 199 | |||
222 | 200 | count = 1 | ||
223 | 201 | num_after = len(list(glance.images.list())) | ||
224 | 202 | while num_after != (num_before - 1) and count < 10: | ||
225 | 203 | time.sleep(3) | ||
226 | 204 | num_after = len(list(glance.images.list())) | ||
227 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
228 | 206 | count += 1 | ||
229 | 207 | |||
230 | 208 | if num_after != (num_before - 1): | ||
231 | 209 | self.log.error('image deletion timed out') | ||
232 | 210 | return False | ||
233 | 211 | |||
234 | 212 | return True | ||
235 | 213 | |||
236 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
237 | 215 | """Create the specified instance.""" | ||
238 | 216 | image = nova.images.find(name=image_name) | ||
239 | 217 | flavor = nova.flavors.find(name=flavor) | ||
240 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
241 | 219 | flavor=flavor) | ||
242 | 220 | |||
243 | 221 | count = 1 | ||
244 | 222 | status = instance.status | ||
245 | 223 | while status != 'ACTIVE' and count < 60: | ||
246 | 224 | time.sleep(3) | ||
247 | 225 | instance = nova.servers.get(instance.id) | ||
248 | 226 | status = instance.status | ||
249 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
250 | 228 | count += 1 | ||
251 | 229 | |||
252 | 230 | if status != 'ACTIVE': | ||
253 | 231 | self.log.error('instance creation timed out') | ||
254 | 232 | return None | ||
255 | 233 | |||
256 | 234 | return instance | ||
257 | 235 | |||
258 | 236 | def delete_instance(self, nova, instance): | ||
259 | 237 | """Delete the specified instance.""" | ||
260 | 238 | num_before = len(list(nova.servers.list())) | ||
261 | 239 | nova.servers.delete(instance) | ||
262 | 240 | |||
263 | 241 | count = 1 | ||
264 | 242 | num_after = len(list(nova.servers.list())) | ||
265 | 243 | while num_after != (num_before - 1) and count < 10: | ||
266 | 244 | time.sleep(3) | ||
267 | 245 | num_after = len(list(nova.servers.list())) | ||
268 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
269 | 247 | count += 1 | ||
270 | 248 | |||
271 | 249 | if num_after != (num_before - 1): | ||
272 | 250 | self.log.error('instance deletion timed out') | ||
273 | 251 | return False | ||
274 | 252 | |||
275 | 253 | return True | ||
276 | 152 | 254 | ||
277 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
278 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-06-24 13:40:39 +0000 | |||
279 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-17 15:16:47 +0000 | |||
280 | @@ -426,12 +426,13 @@ | |||
281 | 426 | """ | 426 | """ |
282 | 427 | Generates a context for an apache vhost configuration that configures | 427 | Generates a context for an apache vhost configuration that configures |
283 | 428 | HTTPS reverse proxying for one or many endpoints. Generated context | 428 | HTTPS reverse proxying for one or many endpoints. Generated context |
290 | 429 | looks something like: | 429 | looks something like:: |
291 | 430 | { | 430 | |
292 | 431 | 'namespace': 'cinder', | 431 | { |
293 | 432 | 'private_address': 'iscsi.mycinderhost.com', | 432 | 'namespace': 'cinder', |
294 | 433 | 'endpoints': [(8776, 8766), (8777, 8767)] | 433 | 'private_address': 'iscsi.mycinderhost.com', |
295 | 434 | } | 434 | 'endpoints': [(8776, 8766), (8777, 8767)] |
296 | 435 | } | ||
297 | 435 | 436 | ||
298 | 436 | The endpoints list consists of a tuples mapping external ports | 437 | The endpoints list consists of a tuples mapping external ports |
299 | 437 | to internal ports. | 438 | to internal ports. |
300 | @@ -641,7 +642,7 @@ | |||
301 | 641 | The subordinate interface allows subordinates to export their | 642 | The subordinate interface allows subordinates to export their |
302 | 642 | configuration requirements to the principle for multiple config | 643 | configuration requirements to the principle for multiple config |
303 | 643 | files and multiple serivces. Ie, a subordinate that has interfaces | 644 | files and multiple serivces. Ie, a subordinate that has interfaces |
305 | 644 | to both glance and nova may export to following yaml blob as json: | 645 | to both glance and nova may export to following yaml blob as json:: |
306 | 645 | 646 | ||
307 | 646 | glance: | 647 | glance: |
308 | 647 | /etc/glance/glance-api.conf: | 648 | /etc/glance/glance-api.conf: |
309 | @@ -660,7 +661,8 @@ | |||
310 | 660 | 661 | ||
311 | 661 | It is then up to the principle charms to subscribe this context to | 662 | It is then up to the principle charms to subscribe this context to |
312 | 662 | the service+config file it is interestd in. Configuration data will | 663 | the service+config file it is interestd in. Configuration data will |
314 | 663 | be available in the template context, in glance's case, as: | 664 | be available in the template context, in glance's case, as:: |
315 | 665 | |||
316 | 664 | ctxt = { | 666 | ctxt = { |
317 | 665 | ... other context ... | 667 | ... other context ... |
318 | 666 | 'subordinate_config': { | 668 | 'subordinate_config': { |
319 | 667 | 669 | ||
320 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
321 | --- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-25 15:27:00 +0000 | |||
322 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-17 15:16:47 +0000 | |||
323 | @@ -30,17 +30,17 @@ | |||
324 | 30 | loading dir. | 30 | loading dir. |
325 | 31 | 31 | ||
326 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
338 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
339 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
340 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
341 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
342 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
343 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
344 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
345 | 40 | 40 | loader. | |
346 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
347 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
348 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
349 | 44 | """ | 44 | """ |
350 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
351 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
352 | @@ -111,7 +111,8 @@ | |||
353 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
354 | 112 | releases. | 112 | releases. |
355 | 113 | 113 | ||
357 | 114 | Basic usage: | 114 | Basic usage:: |
358 | 115 | |||
359 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
360 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
361 | 117 | 118 | ||
362 | @@ -131,21 +132,19 @@ | |||
363 | 131 | # write out all registered configs | 132 | # write out all registered configs |
364 | 132 | configs.write_all() | 133 | configs.write_all() |
365 | 133 | 134 | ||
367 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
368 | 135 | 136 | ||
369 | 136 | OpenStack Releases and template loading | ||
370 | 137 | --------------------------------------- | ||
371 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
372 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
373 | 140 | 139 | ||
374 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
375 | 142 | in the following order: | 141 | in the following order: |
382 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
383 | 144 | - the base templates_dir | 143 | - the base templates_dir |
384 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
385 | 146 | 145 | ||
386 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
387 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
388 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
389 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
390 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
391 | @@ -169,8 +168,8 @@ | |||
392 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
393 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
394 | 171 | 170 | ||
397 | 172 | Context generators | 171 | **Context generators** |
398 | 173 | --------------------------------------- | 172 | |
399 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
400 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
401 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
402 | 177 | 176 | ||
403 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
404 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:20:28 +0000 | |||
405 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-17 15:16:47 +0000 | |||
406 | @@ -303,7 +303,7 @@ | |||
407 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
408 | 304 | """ | 304 | """ |
409 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
411 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
412 | 307 | 307 | ||
413 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
414 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
415 | 310 | 310 | ||
416 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
417 | --- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:43:55 +0000 | |||
418 | +++ hooks/charmhelpers/core/hookenv.py 2014-07-17 15:16:47 +0000 | |||
419 | @@ -25,7 +25,7 @@ | |||
420 | 25 | def cached(func): | 25 | def cached(func): |
421 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
422 | 27 | 27 | ||
424 | 28 | For example: | 28 | For example:: |
425 | 29 | 29 | ||
426 | 30 | @cached | 30 | @cached |
427 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
428 | @@ -445,18 +445,19 @@ | |||
429 | 445 | class Hooks(object): | 445 | class Hooks(object): |
430 | 446 | """A convenient handler for hook functions. | 446 | """A convenient handler for hook functions. |
431 | 447 | 447 | ||
433 | 448 | Example: | 448 | Example:: |
434 | 449 | |||
435 | 449 | hooks = Hooks() | 450 | hooks = Hooks() |
436 | 450 | 451 | ||
437 | 451 | # register a hook, taking its name from the function name | 452 | # register a hook, taking its name from the function name |
438 | 452 | @hooks.hook() | 453 | @hooks.hook() |
439 | 453 | def install(): | 454 | def install(): |
441 | 454 | ... | 455 | pass # your code here |
442 | 455 | 456 | ||
443 | 456 | # register a hook, providing a custom hook name | 457 | # register a hook, providing a custom hook name |
444 | 457 | @hooks.hook("config-changed") | 458 | @hooks.hook("config-changed") |
445 | 458 | def config_changed(): | 459 | def config_changed(): |
447 | 459 | ... | 460 | pass # your code here |
448 | 460 | 461 | ||
449 | 461 | if __name__ == "__main__": | 462 | if __name__ == "__main__": |
450 | 462 | # execute a hook based on the name the program is called by | 463 | # execute a hook based on the name the program is called by |
451 | 463 | 464 | ||
452 | === modified file 'hooks/charmhelpers/core/host.py' | |||
453 | --- hooks/charmhelpers/core/host.py 2014-06-24 13:40:39 +0000 | |||
454 | +++ hooks/charmhelpers/core/host.py 2014-07-17 15:16:47 +0000 | |||
455 | @@ -211,13 +211,13 @@ | |||
456 | 211 | def restart_on_change(restart_map, stopstart=False): | 211 | def restart_on_change(restart_map, stopstart=False): |
457 | 212 | """Restart services based on configuration files changing | 212 | """Restart services based on configuration files changing |
458 | 213 | 213 | ||
460 | 214 | This function is used a decorator, for example | 214 | This function is used a decorator, for example:: |
461 | 215 | 215 | ||
462 | 216 | @restart_on_change({ | 216 | @restart_on_change({ |
463 | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
464 | 218 | }) | 218 | }) |
465 | 219 | def ceph_client_changed(): | 219 | def ceph_client_changed(): |
467 | 220 | ... | 220 | pass # your code here |
468 | 221 | 221 | ||
469 | 222 | In this example, the cinder-api and cinder-volume services | 222 | In this example, the cinder-api and cinder-volume services |
470 | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the |
471 | @@ -313,9 +313,11 @@ | |||
472 | 313 | 313 | ||
473 | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): |
474 | 315 | '''Compare supplied revno with the revno of the installed package | 315 | '''Compare supplied revno with the revno of the installed package |
478 | 316 | 1 => Installed revno is greater than supplied arg | 316 | |
479 | 317 | 0 => Installed revno is the same as supplied arg | 317 | * 1 => Installed revno is greater than supplied arg |
480 | 318 | -1 => Installed revno is less than supplied arg | 318 | * 0 => Installed revno is the same as supplied arg |
481 | 319 | * -1 => Installed revno is less than supplied arg | ||
482 | 320 | |||
483 | 319 | ''' | 321 | ''' |
484 | 320 | import apt_pkg | 322 | import apt_pkg |
485 | 321 | if not pkgcache: | 323 | if not pkgcache: |
486 | 322 | 324 | ||
487 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
488 | --- hooks/charmhelpers/fetch/__init__.py 2014-06-24 13:40:39 +0000 | |||
489 | +++ hooks/charmhelpers/fetch/__init__.py 2014-07-17 15:16:47 +0000 | |||
490 | @@ -235,31 +235,39 @@ | |||
491 | 235 | sources_var='install_sources', | 235 | sources_var='install_sources', |
492 | 236 | keys_var='install_keys'): | 236 | keys_var='install_keys'): |
493 | 237 | """ | 237 | """ |
495 | 238 | Configure multiple sources from charm configuration | 238 | Configure multiple sources from charm configuration. |
496 | 239 | |||
497 | 240 | The lists are encoded as yaml fragments in the configuration. | ||
498 | 241 | The frament needs to be included as a string. | ||
499 | 239 | 242 | ||
500 | 240 | Example config: | 243 | Example config: |
502 | 241 | install_sources: | 244 | install_sources: | |
503 | 242 | - "ppa:foo" | 245 | - "ppa:foo" |
504 | 243 | - "http://example.com/repo precise main" | 246 | - "http://example.com/repo precise main" |
506 | 244 | install_keys: | 247 | install_keys: | |
507 | 245 | - null | 248 | - null |
508 | 246 | - "a1b2c3d4" | 249 | - "a1b2c3d4" |
509 | 247 | 250 | ||
510 | 248 | Note that 'null' (a.k.a. None) should not be quoted. | 251 | Note that 'null' (a.k.a. None) should not be quoted. |
511 | 249 | """ | 252 | """ |
519 | 250 | sources = safe_load(config(sources_var)) | 253 | sources = safe_load((config(sources_var) or '').strip()) or [] |
520 | 251 | keys = config(keys_var) | 254 | keys = safe_load((config(keys_var) or '').strip()) or None |
521 | 252 | if keys is not None: | 255 | |
522 | 253 | keys = safe_load(keys) | 256 | if isinstance(sources, basestring): |
523 | 254 | if isinstance(sources, basestring) and ( | 257 | sources = [sources] |
524 | 255 | keys is None or isinstance(keys, basestring)): | 258 | |
525 | 256 | add_source(sources, keys) | 259 | if keys is None: |
526 | 260 | for source in sources: | ||
527 | 261 | add_source(source, None) | ||
528 | 257 | else: | 262 | else: |
534 | 258 | if not len(sources) == len(keys): | 263 | if isinstance(keys, basestring): |
535 | 259 | msg = 'Install sources and keys lists are different lengths' | 264 | keys = [keys] |
536 | 260 | raise SourceConfigError(msg) | 265 | |
537 | 261 | for src_num in range(len(sources)): | 266 | if len(sources) != len(keys): |
538 | 262 | add_source(sources[src_num], keys[src_num]) | 267 | raise SourceConfigError( |
539 | 268 | 'Install sources and keys lists are different lengths') | ||
540 | 269 | for source, key in zip(sources, keys): | ||
541 | 270 | add_source(source, key) | ||
542 | 263 | if update: | 271 | if update: |
543 | 264 | apt_update(fatal=True) | 272 | apt_update(fatal=True) |
544 | 265 | 273 | ||
545 | 266 | 274 | ||
546 | === added directory 'tests' | |||
547 | === added file 'tests/00-setup' | |||
548 | --- tests/00-setup 1970-01-01 00:00:00 +0000 | |||
549 | +++ tests/00-setup 2014-07-17 15:16:47 +0000 | |||
550 | @@ -0,0 +1,10 @@ | |||
551 | 1 | #!/bin/bash | ||
552 | 2 | |||
553 | 3 | set -ex | ||
554 | 4 | |||
555 | 5 | sudo add-apt-repository --yes ppa:juju/stable | ||
556 | 6 | sudo apt-get update --yes | ||
557 | 7 | sudo apt-get install --yes python-amulet | ||
558 | 8 | sudo apt-get install --yes python-neutronclient | ||
559 | 9 | sudo apt-get install --yes python-keystoneclient | ||
560 | 10 | sudo apt-get install --yes python-novaclient | ||
561 | 0 | 11 | ||
562 | === added file 'tests/12-basic-precise-grizzly' | |||
563 | --- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000 | |||
564 | +++ tests/12-basic-precise-grizzly 2014-07-17 15:16:47 +0000 | |||
565 | @@ -0,0 +1,11 @@ | |||
566 | 1 | #!/usr/bin/python | ||
567 | 2 | |||
568 | 3 | """Amulet tests on a basic quantum-gateway deployment on precise-grizzly.""" | ||
569 | 4 | |||
570 | 5 | from basic_deployment import QuantumGatewayBasicDeployment | ||
571 | 6 | |||
572 | 7 | if __name__ == '__main__': | ||
573 | 8 | deployment = QuantumGatewayBasicDeployment(series='precise', | ||
574 | 9 | openstack='cloud:precise-grizzly', | ||
575 | 10 | source='cloud:precise-updates/grizzly') | ||
576 | 11 | deployment.run_tests() | ||
577 | 0 | 12 | ||
578 | === added file 'tests/13-basic-precise-havana' | |||
579 | --- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000 | |||
580 | +++ tests/13-basic-precise-havana 2014-07-17 15:16:47 +0000 | |||
581 | @@ -0,0 +1,11 @@ | |||
582 | 1 | #!/usr/bin/python | ||
583 | 2 | |||
584 | 3 | """Amulet tests on a basic quantum-gateway deployment on precise-havana.""" | ||
585 | 4 | |||
586 | 5 | from basic_deployment import QuantumGatewayBasicDeployment | ||
587 | 6 | |||
588 | 7 | if __name__ == '__main__': | ||
589 | 8 | deployment = QuantumGatewayBasicDeployment(series='precise', | ||
590 | 9 | openstack='cloud:precise-havana', | ||
591 | 10 | source='cloud:precise-updates/havana') | ||
592 | 11 | deployment.run_tests() | ||
593 | 0 | 12 | ||
594 | === added file 'tests/14-basic-precise-icehouse' | |||
595 | --- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000 | |||
596 | +++ tests/14-basic-precise-icehouse 2014-07-17 15:16:47 +0000 | |||
597 | @@ -0,0 +1,11 @@ | |||
598 | 1 | #!/usr/bin/python | ||
599 | 2 | |||
600 | 3 | """Amulet tests on a basic quantum-gateway deployment on precise-icehouse.""" | ||
601 | 4 | |||
602 | 5 | from basic_deployment import QuantumGatewayBasicDeployment | ||
603 | 6 | |||
604 | 7 | if __name__ == '__main__': | ||
605 | 8 | deployment = QuantumGatewayBasicDeployment(series='precise', | ||
606 | 9 | openstack='cloud:precise-icehouse', | ||
607 | 10 | source='cloud:precise-updates/icehouse') | ||
608 | 11 | deployment.run_tests() | ||
609 | 0 | 12 | ||
610 | === added file 'tests/15-basic-trusty-icehouse' | |||
611 | --- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000 | |||
612 | +++ tests/15-basic-trusty-icehouse 2014-07-17 15:16:47 +0000 | |||
613 | @@ -0,0 +1,9 @@ | |||
614 | 1 | #!/usr/bin/python | ||
615 | 2 | |||
616 | 3 | """Amulet tests on a basic quantum-gateway deployment on trusty-icehouse.""" | ||
617 | 4 | |||
618 | 5 | from basic_deployment import QuantumGatewayBasicDeployment | ||
619 | 6 | |||
620 | 7 | if __name__ == '__main__': | ||
621 | 8 | deployment = QuantumGatewayBasicDeployment(series='trusty') | ||
622 | 9 | deployment.run_tests() | ||
623 | 0 | 10 | ||
624 | === added file 'tests/README' | |||
625 | --- tests/README 1970-01-01 00:00:00 +0000 | |||
626 | +++ tests/README 2014-07-17 15:16:47 +0000 | |||
627 | @@ -0,0 +1,47 @@ | |||
628 | 1 | This directory provides Amulet tests that focus on verification of | ||
629 | 2 | quantum-gateway deployments. | ||
630 | 3 | |||
631 | 4 | If you use a web proxy server to access the web, you'll need to set the | ||
632 | 5 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. | ||
633 | 6 | |||
634 | 7 | The following examples demonstrate different ways that tests can be executed. | ||
635 | 8 | All examples are run from the charm's root directory. | ||
636 | 9 | |||
637 | 10 | * To run all tests (starting with 00-setup): | ||
638 | 11 | |||
639 | 12 | make test | ||
640 | 13 | |||
641 | 14 | * To run a specific test module (or modules): | ||
642 | 15 | |||
643 | 16 | juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
644 | 17 | |||
645 | 18 | * To run a specific test module (or modules), and keep the environment | ||
646 | 19 | deployed after a failure: | ||
647 | 20 | |||
648 | 21 | juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
649 | 22 | |||
650 | 23 | * To re-run a test module against an already deployed environment (one | ||
651 | 24 | that was deployed by a previous call to 'juju test --set-e'): | ||
652 | 25 | |||
653 | 26 | ./tests/15-basic-trusty-icehouse | ||
654 | 27 | |||
655 | 28 | For debugging and test development purposes, all code should be idempotent. | ||
656 | 29 | In other words, the code should have the ability to be re-run without changing | ||
657 | 30 | the results beyond the initial run. This enables editing and re-running of a | ||
658 | 31 | test module against an already deployed environment, as described above. | ||
659 | 32 | |||
660 | 33 | Manual debugging tips: | ||
661 | 34 | |||
662 | 35 | * Set the following env vars before using the OpenStack CLI as admin: | ||
663 | 36 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
664 | 37 | export OS_TENANT_NAME=admin | ||
665 | 38 | export OS_USERNAME=admin | ||
666 | 39 | export OS_PASSWORD=openstack | ||
667 | 40 | export OS_REGION_NAME=RegionOne | ||
668 | 41 | |||
669 | 42 | * Set the following env vars before using the OpenStack CLI as demoUser: | ||
670 | 43 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
671 | 44 | export OS_TENANT_NAME=demoTenant | ||
672 | 45 | export OS_USERNAME=demoUser | ||
673 | 46 | export OS_PASSWORD=password | ||
674 | 47 | export OS_REGION_NAME=RegionOne | ||
675 | 0 | 48 | ||
676 | === added file 'tests/basic_deployment.py' | |||
677 | --- tests/basic_deployment.py 1970-01-01 00:00:00 +0000 | |||
678 | +++ tests/basic_deployment.py 2014-07-17 15:16:47 +0000 | |||
679 | @@ -0,0 +1,834 @@ | |||
680 | 1 | #!/usr/bin/python | ||
681 | 2 | |||
682 | 3 | import amulet | ||
683 | 4 | try: | ||
684 | 5 | from quantumclient.v2_0 import client as neutronclient | ||
685 | 6 | except ImportError: | ||
686 | 7 | from neutronclient.v2_0 import client as neutronclient | ||
687 | 8 | |||
688 | 9 | from charmhelpers.contrib.openstack.amulet.deployment import ( | ||
689 | 10 | OpenStackAmuletDeployment | ||
690 | 11 | ) | ||
691 | 12 | |||
692 | 13 | from charmhelpers.contrib.openstack.amulet.utils import ( | ||
693 | 14 | OpenStackAmuletUtils, | ||
694 | 15 | DEBUG, # flake8: noqa | ||
695 | 16 | ERROR | ||
696 | 17 | ) | ||
697 | 18 | |||
698 | 19 | # Use DEBUG to turn on debug logging | ||
699 | 20 | u = OpenStackAmuletUtils(ERROR) | ||
700 | 21 | |||
701 | 22 | |||
702 | 23 | class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment): | ||
703 | 24 | """Amulet tests on a basic quantum-gateway deployment.""" | ||
704 | 25 | |||
705 | 26 | def __init__(self, series, openstack=None, source=None): | ||
706 | 27 | """Deploy the entire test environment.""" | ||
707 | 28 | super(QuantumGatewayBasicDeployment, self).__init__(series, openstack, | ||
708 | 29 | source) | ||
709 | 30 | self._add_services() | ||
710 | 31 | self._add_relations() | ||
711 | 32 | self._configure_services() | ||
712 | 33 | self._deploy() | ||
713 | 34 | self._initialize_tests() | ||
714 | 35 | |||
715 | 36 | def _add_services(self): | ||
716 | 37 | """Add the service that we're testing, including the number of units, | ||
717 | 38 | where quantum-gateway is local, and the other charms are from | ||
718 | 39 | the charm store.""" | ||
719 | 40 | this_service = ('quantum-gateway', 1) | ||
720 | 41 | other_services = [('mysql', 1), | ||
721 | 42 | ('rabbitmq-server', 1), ('keystone', 1), | ||
722 | 43 | ('nova-cloud-controller', 1)] | ||
723 | 44 | super(QuantumGatewayBasicDeployment, self)._add_services(this_service, | ||
724 | 45 | other_services) | ||
725 | 46 | |||
726 | 47 | def _add_relations(self): | ||
727 | 48 | """Add all of the relations for the services.""" | ||
728 | 49 | relations = { | ||
729 | 50 | 'keystone:shared-db': 'mysql:shared-db', | ||
730 | 51 | 'quantum-gateway:shared-db': 'mysql:shared-db', | ||
731 | 52 | 'quantum-gateway:amqp': 'rabbitmq-server:amqp', | ||
732 | 53 | 'nova-cloud-controller:quantum-network-service': \ | ||
733 | 54 | 'quantum-gateway:quantum-network-service', | ||
734 | 55 | 'nova-cloud-controller:shared-db': 'mysql:shared-db', | ||
735 | 56 | 'nova-cloud-controller:identity-service': 'keystone:identity-service', | ||
736 | 57 | 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp' | ||
737 | 58 | } | ||
738 | 59 | super(QuantumGatewayBasicDeployment, self)._add_relations(relations) | ||
739 | 60 | |||
740 | 61 | def _configure_services(self): | ||
741 | 62 | """Configure all of the services.""" | ||
742 | 63 | keystone_config = {'admin-password': 'openstack', | ||
743 | 64 | 'admin-token': 'ubuntutesting'} | ||
744 | 65 | nova_cc_config = {'network-manager': 'Quantum', | ||
745 | 66 | 'quantum-security-groups': 'yes'} | ||
746 | 67 | configs = {'keystone': keystone_config, | ||
747 | 68 | 'nova-cloud-controller': nova_cc_config} | ||
748 | 69 | super(QuantumGatewayBasicDeployment, self)._configure_services(configs) | ||
749 | 70 | |||
750 | 71 | def _initialize_tests(self): | ||
751 | 72 | """Perform final initialization before tests get run.""" | ||
752 | 73 | # Access the sentries for inspecting service units | ||
753 | 74 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | ||
754 | 75 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | ||
755 | 76 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] | ||
756 | 77 | self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] | ||
757 | 78 | self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0'] | ||
758 | 79 | |||
759 | 80 | # Authenticate admin with keystone | ||
760 | 81 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | ||
761 | 82 | user='admin', | ||
762 | 83 | password='openstack', | ||
763 | 84 | tenant='admin') | ||
764 | 85 | |||
765 | 86 | |||
766 | 87 | # Authenticate admin with neutron | ||
767 | 88 | ep = self.keystone.service_catalog.url_for(service_type='identity', | ||
768 | 89 | endpoint_type='publicURL') | ||
769 | 90 | self.neutron = neutronclient.Client(auth_url=ep, | ||
770 | 91 | username='admin', | ||
771 | 92 | password='openstack', | ||
772 | 93 | tenant_name='admin', | ||
773 | 94 | region_name='RegionOne') | ||
774 | 95 | |||
775 | 96 | def test_services(self): | ||
776 | 97 | """Verify the expected services are running on the corresponding | ||
777 | 98 | service units.""" | ||
778 | 99 | if self._get_openstack_release() >= self.precise_havana: | ||
779 | 100 | neutron_services = ['status neutron-dhcp-agent', | ||
780 | 101 | 'status neutron-lbaas-agent', | ||
781 | 102 | 'status neutron-metadata-agent', | ||
782 | 103 | 'status neutron-plugin-openvswitch-agent'] | ||
783 | 104 | if self._get_openstack_release() == self.precise_havana: | ||
784 | 105 | neutron_services.append('status neutron-l3-agent') | ||
785 | 106 | else: | ||
786 | 107 | neutron_services.append('status neutron-vpn-agent') | ||
787 | 108 | neutron_services.append('status neutron-metering-agent') | ||
788 | 109 | neutron_services.append('status neutron-ovs-cleanup') | ||
789 | 110 | else: | ||
790 | 111 | neutron_services = ['status quantum-dhcp-agent', | ||
791 | 112 | 'status quantum-l3-agent', | ||
792 | 113 | 'status quantum-metadata-agent', | ||
793 | 114 | 'status quantum-plugin-openvswitch-agent'] | ||
794 | 115 | |||
795 | 116 | nova_cc_services = ['status nova-api-ec2', | ||
796 | 117 | 'status nova-api-os-compute', | ||
797 | 118 | 'status nova-objectstore', | ||
798 | 119 | 'status nova-cert', | ||
799 | 120 | 'status nova-scheduler'] | ||
800 | 121 | if self._get_openstack_release() >= self.precise_grizzly: | ||
801 | 122 | nova_cc_services.append('status nova-conductor') | ||
802 | 123 | |||
803 | 124 | commands = { | ||
804 | 125 | self.mysql_sentry: ['status mysql'], | ||
805 | 126 | self.keystone_sentry: ['status keystone'], | ||
806 | 127 | self.nova_cc_sentry: nova_cc_services, | ||
807 | 128 | self.quantum_gateway_sentry: neutron_services | ||
808 | 129 | } | ||
809 | 130 | |||
810 | 131 | ret = u.validate_services(commands) | ||
811 | 132 | if ret: | ||
812 | 133 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
813 | 134 | |||
814 | 135 | def test_quantum_gateway_shared_db_relation(self): | ||
815 | 136 | """Verify the quantum-gateway to mysql shared-db relation data""" | ||
816 | 137 | unit = self.quantum_gateway_sentry | ||
817 | 138 | relation = ['shared-db', 'mysql:shared-db'] | ||
818 | 139 | expected = { | ||
819 | 140 | 'private-address': u.valid_ip, | ||
820 | 141 | 'database': 'nova', | ||
821 | 142 | 'username': 'nova', | ||
822 | 143 | 'hostname': u.valid_ip | ||
823 | 144 | } | ||
824 | 145 | |||
825 | 146 | ret = u.validate_relation_data(unit, relation, expected) | ||
826 | 147 | if ret: | ||
827 | 148 | message = u.relation_error('quantum-gateway shared-db', ret) | ||
828 | 149 | amulet.raise_status(amulet.FAIL, msg=message) | ||
829 | 150 | |||
830 | 151 | def test_mysql_shared_db_relation(self): | ||
831 | 152 | """Verify the mysql to quantum-gateway shared-db relation data""" | ||
832 | 153 | unit = self.mysql_sentry | ||
833 | 154 | relation = ['shared-db', 'quantum-gateway:shared-db'] | ||
834 | 155 | expected = { | ||
835 | 156 | 'private-address': u.valid_ip, | ||
836 | 157 | 'password': u.not_null, | ||
837 | 158 | 'db_host': u.valid_ip | ||
838 | 159 | } | ||
839 | 160 | |||
840 | 161 | ret = u.validate_relation_data(unit, relation, expected) | ||
841 | 162 | if ret: | ||
842 | 163 | message = u.relation_error('mysql shared-db', ret) | ||
843 | 164 | amulet.raise_status(amulet.FAIL, msg=message) | ||
844 | 165 | |||
845 | 166 | def test_quantum_gateway_amqp_relation(self): | ||
846 | 167 | """Verify the quantum-gateway to rabbitmq-server amqp relation data""" | ||
847 | 168 | unit = self.quantum_gateway_sentry | ||
848 | 169 | relation = ['amqp', 'rabbitmq-server:amqp'] | ||
849 | 170 | expected = { | ||
850 | 171 | 'username': 'neutron', | ||
851 | 172 | 'private-address': u.valid_ip, | ||
852 | 173 | 'vhost': 'openstack' | ||
853 | 174 | } | ||
854 | 175 | |||
855 | 176 | ret = u.validate_relation_data(unit, relation, expected) | ||
856 | 177 | if ret: | ||
857 | 178 | message = u.relation_error('quantum-gateway amqp', ret) | ||
858 | 179 | amulet.raise_status(amulet.FAIL, msg=message) | ||
859 | 180 | |||
860 | 181 | def test_rabbitmq_amqp_relation(self): | ||
861 | 182 | """Verify the rabbitmq-server to quantum-gateway amqp relation data""" | ||
862 | 183 | unit = self.rabbitmq_sentry | ||
863 | 184 | relation = ['amqp', 'quantum-gateway:amqp'] | ||
864 | 185 | expected = { | ||
865 | 186 | 'private-address': u.valid_ip, | ||
866 | 187 | 'password': u.not_null, | ||
867 | 188 | 'hostname': u.valid_ip | ||
868 | 189 | } | ||
869 | 190 | |||
870 | 191 | ret = u.validate_relation_data(unit, relation, expected) | ||
871 | 192 | if ret: | ||
872 | 193 | message = u.relation_error('rabbitmq amqp', ret) | ||
873 | 194 | amulet.raise_status(amulet.FAIL, msg=message) | ||
874 | 195 | |||
875 | 196 | def test_quantum_gateway_network_service_relation(self): | ||
876 | 197 | """Verify the quantum-gateway to nova-cc quantum-network-service | ||
877 | 198 | relation data""" | ||
878 | 199 | unit = self.quantum_gateway_sentry | ||
879 | 200 | relation = ['quantum-network-service', | ||
880 | 201 | 'nova-cloud-controller:quantum-network-service'] | ||
881 | 202 | expected = { | ||
882 | 203 | 'private-address': u.valid_ip | ||
883 | 204 | } | ||
884 | 205 | |||
885 | 206 | ret = u.validate_relation_data(unit, relation, expected) | ||
886 | 207 | if ret: | ||
887 | 208 | message = u.relation_error('quantum-gateway network-service', ret) | ||
888 | 209 | amulet.raise_status(amulet.FAIL, msg=message) | ||
889 | 210 | |||
890 | 211 | def test_nova_cc_network_service_relation(self): | ||
891 | 212 | """Verify the nova-cc to quantum-gateway quantum-network-service | ||
892 | 213 | relation data""" | ||
893 | 214 | unit = self.nova_cc_sentry | ||
894 | 215 | relation = ['quantum-network-service', | ||
895 | 216 | 'quantum-gateway:quantum-network-service'] | ||
896 | 217 | expected = { | ||
897 | 218 | 'service_protocol': 'http', | ||
898 | 219 | 'service_tenant': 'services', | ||
899 | 220 | 'quantum_url': u.valid_url, | ||
900 | 221 | 'quantum_port': '9696', | ||
901 | 222 | 'service_port': '5000', | ||
902 | 223 | 'region': 'RegionOne', | ||
903 | 224 | 'service_password': u.not_null, | ||
904 | 225 | 'quantum_host': u.valid_ip, | ||
905 | 226 | 'auth_port': '35357', | ||
906 | 227 | 'auth_protocol': 'http', | ||
907 | 228 | 'private-address': u.valid_ip, | ||
908 | 229 | 'keystone_host': u.valid_ip, | ||
909 | 230 | 'quantum_plugin': 'ovs', | ||
910 | 231 | 'auth_host': u.valid_ip, | ||
911 | 232 | 'service_username': 'quantum_s3_ec2_nova', | ||
912 | 233 | 'service_tenant_name': 'services' | ||
913 | 234 | } | ||
914 | 235 | |||
915 | 236 | ret = u.validate_relation_data(unit, relation, expected) | ||
916 | 237 | if ret: | ||
917 | 238 | message = u.relation_error('nova-cc network-service', ret) | ||
918 | 239 | amulet.raise_status(amulet.FAIL, msg=message) | ||
919 | 240 | |||
920 | 241 | def test_restart_on_config_change(self): | ||
921 | 242 | """Verify that the specified services are restarted when the config | ||
922 | 243 | is changed.""" | ||
923 | 244 | if self._get_openstack_release() >= self.precise_havana: | ||
924 | 245 | conf = '/etc/neutron/neutron.conf' | ||
925 | 246 | services = ['neutron-dhcp-agent', 'neutron-openvswitch-agent', | ||
926 | 247 | 'neutron-metering-agent', 'neutron-lbaas-agent', | ||
927 | 248 | 'neutron-metadata-agent'] | ||
928 | 249 | if self._get_openstack_release() == self.precise_havana: | ||
929 | 250 | services.append('neutron-l3-agent') | ||
930 | 251 | else: | ||
931 | 252 | services.append('neutron-vpn-agent') | ||
932 | 253 | else: | ||
933 | 254 | conf = '/etc/quantum/quantum.conf' | ||
934 | 255 | services = ['quantum-dhcp-agent', 'quantum-openvswitch-agent', | ||
935 | 256 | 'quantum-metadata-agent', 'quantum-l3-agent'] | ||
936 | 257 | |||
937 | 258 | self.d.configure('quantum-gateway', {'debug': 'True'}) | ||
938 | 259 | |||
939 | 260 | time = 20 | ||
940 | 261 | for s in services: | ||
941 | 262 | if not u.service_restarted(self.quantum_gateway_sentry, s, conf, | ||
942 | 263 | pgrep_full=True, sleep_time=time): | ||
943 | 264 | msg = "service {} didn't restart after config change".format(s) | ||
944 | 265 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
945 | 266 | time = 0 | ||
946 | 267 | |||
947 | 268 | self.d.configure('quantum-gateway', {'debug': 'False'}) | ||
948 | 269 | |||
949 | 270 | def test_neutron_config(self): | ||
950 | 271 | """Verify the data in the neutron config file.""" | ||
951 | 272 | unit = self.quantum_gateway_sentry | ||
952 | 273 | rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', | ||
953 | 274 | 'quantum-gateway:amqp') | ||
954 | 275 | |||
955 | 276 | if self._get_openstack_release() >= self.precise_havana: | ||
956 | 277 | conf = '/etc/neutron/neutron.conf' | ||
957 | 278 | expected = { | ||
958 | 279 | 'DEFAULT': { | ||
959 | 280 | 'verbose': 'False', | ||
960 | 281 | 'debug': 'False', | ||
961 | 282 | 'lock_path': '/var/lock/neutron', | ||
962 | 283 | 'rabbit_userid': 'neutron', | ||
963 | 284 | 'rabbit_virtual_host': 'openstack', | ||
964 | 285 | 'rabbit_password': rabbitmq_relation['password'], | ||
965 | 286 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
966 | 287 | 'control_exchange': 'neutron', | ||
967 | 288 | 'notification_driver': 'neutron.openstack.common.notifier.' | ||
968 | 289 | 'list_notifier', | ||
969 | 290 | 'list_notifier_drivers': 'neutron.openstack.common.' | ||
970 | 291 | 'notifier.rabbit_notifier' | ||
971 | 292 | }, | ||
972 | 293 | 'agent': { | ||
973 | 294 | 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' | ||
974 | 295 | '/etc/neutron/rootwrap.conf' | ||
975 | 296 | } | ||
976 | 297 | } | ||
977 | 298 | else: | ||
978 | 299 | conf = '/etc/quantum/quantum.conf' | ||
979 | 300 | expected = { | ||
980 | 301 | 'DEFAULT': { | ||
981 | 302 | 'verbose': 'False', | ||
982 | 303 | 'debug': 'False', | ||
983 | 304 | 'lock_path': '/var/lock/quantum', | ||
984 | 305 | 'rabbit_userid': 'neutron', | ||
985 | 306 | 'rabbit_virtual_host': 'openstack', | ||
986 | 307 | 'rabbit_password': rabbitmq_relation['password'], | ||
987 | 308 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
988 | 309 | 'control_exchange': 'quantum', | ||
989 | 310 | 'notification_driver': 'quantum.openstack.common.notifier.' | ||
990 | 311 | 'list_notifier', | ||
991 | 312 | 'list_notifier_drivers': 'quantum.openstack.common.' | ||
992 | 313 | 'notifier.rabbit_notifier' | ||
993 | 314 | }, | ||
994 | 315 | 'AGENT': { | ||
995 | 316 | 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' | ||
996 | 317 | '/etc/quantum/rootwrap.conf' | ||
997 | 318 | } | ||
998 | 319 | } | ||
999 | 320 | |||
1000 | 321 | if self._get_openstack_release() >= self.precise_icehouse: | ||
1001 | 322 | expected['DEFAULT']['core_plugin'] = \ | ||
1002 | 323 | 'neutron.plugins.ml2.plugin.Ml2Plugin' | ||
1003 | 324 | elif self._get_openstack_release() >= self.precise_havana: | ||
1004 | 325 | expected['DEFAULT']['core_plugin'] = \ | ||
1005 | 326 | 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2' | ||
1006 | 327 | else: | ||
1007 | 328 | expected['DEFAULT']['core_plugin'] = \ | ||
1008 | 329 | 'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2' | ||
1009 | 330 | |||
1010 | 331 | for section, pairs in expected.iteritems(): | ||
1011 | 332 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1012 | 333 | if ret: | ||
1013 | 334 | message = "neutron config error: {}".format(ret) | ||
1014 | 335 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1015 | 336 | |||
1016 | 337 | def test_ml2_config(self): | ||
1017 | 338 | """Verify the data in the ml2 config file. This is only available | ||
1018 | 339 | since icehouse.""" | ||
1019 | 340 | if self._get_openstack_release() < self.precise_icehouse: | ||
1020 | 341 | return | ||
1021 | 342 | |||
1022 | 343 | unit = self.quantum_gateway_sentry | ||
1023 | 344 | conf = '/etc/neutron/plugins/ml2/ml2_conf.ini' | ||
1024 | 345 | quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') | ||
1025 | 346 | expected = { | ||
1026 | 347 | 'ml2': { | ||
1027 | 348 | 'type_drivers': 'gre,vxlan', | ||
1028 | 349 | 'tenant_network_types': 'gre,vxlan', | ||
1029 | 350 | 'mechanism_drivers': 'openvswitch' | ||
1030 | 351 | }, | ||
1031 | 352 | 'ml2_type_gre': { | ||
1032 | 353 | 'tunnel_id_ranges': '1:1000' | ||
1033 | 354 | }, | ||
1034 | 355 | 'ml2_type_vxlan': { | ||
1035 | 356 | 'vni_ranges': '1001:2000' | ||
1036 | 357 | }, | ||
1037 | 358 | 'ovs': { | ||
1038 | 359 | 'enable_tunneling': 'True', | ||
1039 | 360 | 'local_ip': quantum_gateway_relation['private-address'] | ||
1040 | 361 | }, | ||
1041 | 362 | 'agent': { | ||
1042 | 363 | 'tunnel_types': 'gre' | ||
1043 | 364 | }, | ||
1044 | 365 | 'securitygroup': { | ||
1045 | 366 | 'firewall_driver': 'neutron.agent.linux.iptables_firewall.' | ||
1046 | 367 | 'OVSHybridIptablesFirewallDriver' | ||
1047 | 368 | } | ||
1048 | 369 | } | ||
1049 | 370 | |||
1050 | 371 | for section, pairs in expected.iteritems(): | ||
1051 | 372 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1052 | 373 | if ret: | ||
1053 | 374 | message = "ml2 config error: {}".format(ret) | ||
1054 | 375 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1055 | 376 | |||
1056 | 377 | def test_api_paste_config(self): | ||
1057 | 378 | """Verify the data in the api paste config file.""" | ||
1058 | 379 | unit = self.quantum_gateway_sentry | ||
1059 | 380 | if self._get_openstack_release() >= self.precise_havana: | ||
1060 | 381 | conf = '/etc/neutron/api-paste.ini' | ||
1061 | 382 | expected = { | ||
1062 | 383 | 'composite:neutron': { | ||
1063 | 384 | 'use': 'egg:Paste#urlmap', | ||
1064 | 385 | '/': 'neutronversions', | ||
1065 | 386 | '/v2.0': 'neutronapi_v2_0' | ||
1066 | 387 | }, | ||
1067 | 388 | 'filter:keystonecontext': { | ||
1068 | 389 | 'paste.filter_factory': 'neutron.auth:' | ||
1069 | 390 | 'NeutronKeystoneContext.factory' | ||
1070 | 391 | }, | ||
1071 | 392 | 'filter:authtoken': { | ||
1072 | 393 | 'paste.filter_factory': 'keystoneclient.middleware.' | ||
1073 | 394 | 'auth_token:filter_factory' | ||
1074 | 395 | }, | ||
1075 | 396 | 'filter:extensions': { | ||
1076 | 397 | 'paste.filter_factory': 'neutron.api.extensions:' | ||
1077 | 398 | 'plugin_aware_extension_middleware_' | ||
1078 | 399 | 'factory' | ||
1079 | 400 | }, | ||
1080 | 401 | 'app:neutronversions': { | ||
1081 | 402 | 'paste.app_factory': 'neutron.api.versions:Versions.factory' | ||
1082 | 403 | }, | ||
1083 | 404 | 'app:neutronapiapp_v2_0': { | ||
1084 | 405 | 'paste.app_factory': 'neutron.api.v2.router:APIRouter.' | ||
1085 | 406 | 'factory' | ||
1086 | 407 | } | ||
1087 | 408 | } | ||
1088 | 409 | if self._get_openstack_release() == self.precise_havana: | ||
1089 | 410 | expected_additional = { | ||
1090 | 411 | 'composite:neutronapi_v2_0': { | ||
1091 | 412 | 'use': 'call:neutron.auth:pipeline_factory', | ||
1092 | 413 | 'noauth': 'extensions neutronapiapp_v2_0', | ||
1093 | 414 | 'keystone': 'authtoken keystonecontext extensions ' | ||
1094 | 415 | 'neutronapiapp_v2_0' | ||
1095 | 416 | } | ||
1096 | 417 | } | ||
1097 | 418 | else: | ||
1098 | 419 | expected_additional = { | ||
1099 | 420 | 'composite:neutronapi_v2_0': { | ||
1100 | 421 | 'use': 'call:neutron.auth:pipeline_factory', | ||
1101 | 422 | 'noauth': 'request_id catch_errors extensions ' | ||
1102 | 423 | 'neutronapiapp_v2_0', | ||
1103 | 424 | 'keystone': 'request_id catch_errors authtoken ' | ||
1104 | 425 | 'keystonecontext extensions ' | ||
1105 | 426 | 'neutronapiapp_v2_0' | ||
1106 | 427 | } | ||
1107 | 428 | } | ||
1108 | 429 | expected = dict(expected.items() + expected_additional.items()) | ||
1109 | 430 | else: | ||
1110 | 431 | conf = '/etc/quantum/api-paste.ini' | ||
1111 | 432 | expected = { | ||
1112 | 433 | 'composite:quantum': { | ||
1113 | 434 | 'use': 'egg:Paste#urlmap', | ||
1114 | 435 | '/': 'quantumversions', | ||
1115 | 436 | '/v2.0': 'quantumapi_v2_0' | ||
1116 | 437 | }, | ||
1117 | 438 | 'composite:quantumapi_v2_0': { | ||
1118 | 439 | 'use': 'call:quantum.auth:pipeline_factory', | ||
1119 | 440 | 'noauth': 'extensions quantumapiapp_v2_0', | ||
1120 | 441 | 'keystone': 'authtoken keystonecontext extensions ' | ||
1121 | 442 | 'quantumapiapp_v2_0', | ||
1122 | 443 | }, | ||
1123 | 444 | 'filter:keystonecontext': { | ||
1124 | 445 | 'paste.filter_factory': 'quantum.auth:' | ||
1125 | 446 | 'QuantumKeystoneContext.factory' | ||
1126 | 447 | }, | ||
1127 | 448 | 'filter:authtoken': { | ||
1128 | 449 | 'paste.filter_factory': 'keystoneclient.middleware.' | ||
1129 | 450 | 'auth_token:filter_factory' | ||
1130 | 451 | }, | ||
1131 | 452 | 'filter:extensions': { | ||
1132 | 453 | 'paste.filter_factory': 'quantum.api.extensions:' | ||
1133 | 454 | 'plugin_aware_extension_middleware_' | ||
1134 | 455 | 'factory' | ||
1135 | 456 | }, | ||
1136 | 457 | 'app:quantumversions': { | ||
1137 | 458 | 'paste.app_factory': 'quantum.api.versions:Versions.factory' | ||
1138 | 459 | }, | ||
1139 | 460 | 'app:quantumapiapp_v2_0': { | ||
1140 | 461 | 'paste.app_factory': 'quantum.api.v2.router:APIRouter.' | ||
1141 | 462 | 'factory' | ||
1142 | 463 | } | ||
1143 | 464 | } | ||
1144 | 465 | |||
1145 | 466 | for section, pairs in expected.iteritems(): | ||
1146 | 467 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1147 | 468 | if ret: | ||
1148 | 469 | message = "api paste config error: {}".format(ret) | ||
1149 | 470 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1150 | 471 | |||
1151 | 472 | def test_dhcp_agent_config(self): | ||
1152 | 473 | """Verify the data in the dhcp agent config file.""" | ||
1153 | 474 | unit = self.quantum_gateway_sentry | ||
1154 | 475 | if self._get_openstack_release() >= self.precise_havana: | ||
1155 | 476 | conf = '/etc/neutron/dhcp_agent.ini' | ||
1156 | 477 | expected = { | ||
1157 | 478 | 'state_path': '/var/lib/neutron', | ||
1158 | 479 | 'interface_driver': 'neutron.agent.linux.interface.' | ||
1159 | 480 | 'OVSInterfaceDriver', | ||
1160 | 481 | 'dhcp_driver': 'neutron.agent.linux.dhcp.Dnsmasq', | ||
1161 | 482 | 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' | ||
1162 | 483 | '/etc/neutron/rootwrap.conf', | ||
1163 | 484 | 'ovs_use_veth': 'True' | ||
1164 | 485 | } | ||
1165 | 486 | else: | ||
1166 | 487 | conf = '/etc/quantum/dhcp_agent.ini' | ||
1167 | 488 | expected = { | ||
1168 | 489 | 'state_path': '/var/lib/quantum', | ||
1169 | 490 | 'interface_driver': 'quantum.agent.linux.interface.' | ||
1170 | 491 | 'OVSInterfaceDriver', | ||
1171 | 492 | 'dhcp_driver': 'quantum.agent.linux.dhcp.Dnsmasq', | ||
1172 | 493 | 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' | ||
1173 | 494 | '/etc/quantum/rootwrap.conf' | ||
1174 | 495 | } | ||
1175 | 496 | |||
1176 | 497 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1177 | 498 | if ret: | ||
1178 | 499 | message = "dhcp agent config error: {}".format(ret) | ||
1179 | 500 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1180 | 501 | |||
1181 | 502 | def test_fwaas_driver_config(self): | ||
1182 | 503 | """Verify the data in the fwaas driver config file. This is only | ||
1183 | 504 | available since havana.""" | ||
1184 | 505 | if self._get_openstack_release() < self.precise_havana: | ||
1185 | 506 | return | ||
1186 | 507 | |||
1187 | 508 | unit = self.quantum_gateway_sentry | ||
1188 | 509 | conf = '/etc/neutron/fwaas_driver.ini' | ||
1189 | 510 | expected = { | ||
1190 | 511 | 'driver': 'neutron.services.firewall.drivers.linux.' | ||
1191 | 512 | 'iptables_fwaas.IptablesFwaasDriver', | ||
1192 | 513 | 'enabled': 'True' | ||
1193 | 514 | } | ||
1194 | 515 | |||
1195 | 516 | ret = u.validate_config_data(unit, conf, 'fwaas', expected) | ||
1196 | 517 | if ret: | ||
1197 | 518 | message = "fwaas driver config error: {}".format(ret) | ||
1198 | 519 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1199 | 520 | |||
1200 | 521 | def test_l3_agent_config(self): | ||
1201 | 522 | """Verify the data in the l3 agent config file.""" | ||
1202 | 523 | unit = self.quantum_gateway_sentry | ||
1203 | 524 | nova_cc_relation = self.nova_cc_sentry.relation(\ | ||
1204 | 525 | 'quantum-network-service', | ||
1205 | 526 | 'quantum-gateway:quantum-network-service') | ||
1206 | 527 | ep = self.keystone.service_catalog.url_for(service_type='identity', | ||
1207 | 528 | endpoint_type='publicURL') | ||
1208 | 529 | |||
1209 | 530 | if self._get_openstack_release() >= self.precise_havana: | ||
1210 | 531 | conf = '/etc/neutron/l3_agent.ini' | ||
1211 | 532 | expected = { | ||
1212 | 533 | 'interface_driver': 'neutron.agent.linux.interface.' | ||
1213 | 534 | 'OVSInterfaceDriver', | ||
1214 | 535 | 'auth_url': ep, | ||
1215 | 536 | 'auth_region': 'RegionOne', | ||
1216 | 537 | 'admin_tenant_name': 'services', | ||
1217 | 538 | 'admin_user': 'quantum_s3_ec2_nova', | ||
1218 | 539 | 'admin_password': nova_cc_relation['service_password'], | ||
1219 | 540 | 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' | ||
1220 | 541 | '/etc/neutron/rootwrap.conf', | ||
1221 | 542 | 'ovs_use_veth': 'True', | ||
1222 | 543 | 'handle_internal_only_routers': 'True' | ||
1223 | 544 | } | ||
1224 | 545 | else: | ||
1225 | 546 | conf = '/etc/quantum/l3_agent.ini' | ||
1226 | 547 | expected = { | ||
1227 | 548 | 'interface_driver': 'quantum.agent.linux.interface.' | ||
1228 | 549 | 'OVSInterfaceDriver', | ||
1229 | 550 | 'auth_url': ep, | ||
1230 | 551 | 'auth_region': 'RegionOne', | ||
1231 | 552 | 'admin_tenant_name': 'services', | ||
1232 | 553 | 'admin_user': 'quantum_s3_ec2_nova', | ||
1233 | 554 | 'admin_password': nova_cc_relation['service_password'], | ||
1234 | 555 | 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' | ||
1235 | 556 | '/etc/quantum/rootwrap.conf' | ||
1236 | 557 | } | ||
1237 | 558 | |||
1238 | 559 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1239 | 560 | if ret: | ||
1240 | 561 | message = "l3 agent config error: {}".format(ret) | ||
1241 | 562 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1242 | 563 | |||
1243 | 564 | def test_lbaas_agent_config(self): | ||
1244 | 565 | """Verify the data in the lbaas agent config file. This is only | ||
1245 | 566 | available since havana.""" | ||
1246 | 567 | if self._get_openstack_release() < self.precise_havana: | ||
1247 | 568 | return | ||
1248 | 569 | |||
1249 | 570 | unit = self.quantum_gateway_sentry | ||
1250 | 571 | conf = '/etc/neutron/lbaas_agent.ini' | ||
1251 | 572 | expected = { | ||
1252 | 573 | 'DEFAULT': { | ||
1253 | 574 | 'periodic_interval': '10', | ||
1254 | 575 | 'interface_driver': 'neutron.agent.linux.interface.' | ||
1255 | 576 | 'OVSInterfaceDriver', | ||
1256 | 577 | 'ovs_use_veth': 'False', | ||
1257 | 578 | 'device_driver': 'neutron.services.loadbalancer.drivers.' | ||
1258 | 579 | 'haproxy.namespace_driver.HaproxyNSDriver' | ||
1259 | 580 | }, | ||
1260 | 581 | 'haproxy': { | ||
1261 | 582 | 'loadbalancer_state_path': '$state_path/lbaas', | ||
1262 | 583 | 'user_group': 'nogroup' | ||
1263 | 584 | } | ||
1264 | 585 | } | ||
1265 | 586 | |||
1266 | 587 | for section, pairs in expected.iteritems(): | ||
1267 | 588 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1268 | 589 | if ret: | ||
1269 | 590 | message = "lbaas agent config error: {}".format(ret) | ||
1270 | 591 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1271 | 592 | |||
1272 | 593 | def test_metadata_agent_config(self): | ||
1273 | 594 | """Verify the data in the metadata agent config file.""" | ||
1274 | 595 | unit = self.quantum_gateway_sentry | ||
1275 | 596 | ep = self.keystone.service_catalog.url_for(service_type='identity', | ||
1276 | 597 | endpoint_type='publicURL') | ||
1277 | 598 | quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') | ||
1278 | 599 | nova_cc_relation = self.nova_cc_sentry.relation(\ | ||
1279 | 600 | 'quantum-network-service', | ||
1280 | 601 | 'quantum-gateway:quantum-network-service') | ||
1281 | 602 | |||
1282 | 603 | if self._get_openstack_release() >= self.precise_havana: | ||
1283 | 604 | conf = '/etc/neutron/metadata_agent.ini' | ||
1284 | 605 | expected = { | ||
1285 | 606 | 'auth_url': ep, | ||
1286 | 607 | 'auth_region': 'RegionOne', | ||
1287 | 608 | 'admin_tenant_name': 'services', | ||
1288 | 609 | 'admin_user': 'quantum_s3_ec2_nova', | ||
1289 | 610 | 'admin_password': nova_cc_relation['service_password'], | ||
1290 | 611 | 'root_helper': 'sudo neutron-rootwrap ' | ||
1291 | 612 | '/etc/neutron/rootwrap.conf', | ||
1292 | 613 | 'state_path': '/var/lib/neutron', | ||
1293 | 614 | 'nova_metadata_ip': quantum_gateway_relation['private-address'], | ||
1294 | 615 | 'nova_metadata_port': '8775' | ||
1295 | 616 | } | ||
1296 | 617 | else: | ||
1297 | 618 | conf = '/etc/quantum/metadata_agent.ini' | ||
1298 | 619 | expected = { | ||
1299 | 620 | 'auth_url': ep, | ||
1300 | 621 | 'auth_region': 'RegionOne', | ||
1301 | 622 | 'admin_tenant_name': 'services', | ||
1302 | 623 | 'admin_user': 'quantum_s3_ec2_nova', | ||
1303 | 624 | 'admin_password': nova_cc_relation['service_password'], | ||
1304 | 625 | 'root_helper': 'sudo quantum-rootwrap ' | ||
1305 | 626 | '/etc/quantum/rootwrap.conf', | ||
1306 | 627 | 'state_path': '/var/lib/quantum', | ||
1307 | 628 | 'nova_metadata_ip': quantum_gateway_relation['private-address'], | ||
1308 | 629 | 'nova_metadata_port': '8775' | ||
1309 | 630 | } | ||
1310 | 631 | |||
1311 | 632 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1312 | 633 | if ret: | ||
1313 | 634 | message = "metadata agent config error: {}".format(ret) | ||
1314 | 635 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1315 | 636 | |||
1316 | 637 | def test_metering_agent_config(self): | ||
1317 | 638 | """Verify the data in the metering agent config file. This is only | ||
1318 | 639 | available since havana.""" | ||
1319 | 640 | if self._get_openstack_release() < self.precise_havana: | ||
1320 | 641 | return | ||
1321 | 642 | |||
1322 | 643 | unit = self.quantum_gateway_sentry | ||
1323 | 644 | conf = '/etc/neutron/metering_agent.ini' | ||
1324 | 645 | expected = { | ||
1325 | 646 | 'driver': 'neutron.services.metering.drivers.iptables.' | ||
1326 | 647 | 'iptables_driver.IptablesMeteringDriver', | ||
1327 | 648 | 'measure_interval': '30', | ||
1328 | 649 | 'report_interval': '300', | ||
1329 | 650 | 'interface_driver': 'neutron.agent.linux.interface.' | ||
1330 | 651 | 'OVSInterfaceDriver', | ||
1331 | 652 | 'use_namespaces': 'True' | ||
1332 | 653 | } | ||
1333 | 654 | |||
1334 | 655 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1335 | 656 | if ret: | ||
1336 | 657 | message = "metering agent config error: {}".format(ret) | ||
1337 | 658 | |||
1338 | 659 | def test_nova_config(self): | ||
1339 | 660 | """Verify the data in the nova config file.""" | ||
1340 | 661 | unit = self.quantum_gateway_sentry | ||
1341 | 662 | conf = '/etc/nova/nova.conf' | ||
1342 | 663 | mysql_relation = self.mysql_sentry.relation('shared-db', | ||
1343 | 664 | 'quantum-gateway:shared-db') | ||
1344 | 665 | db_uri = "mysql://{}:{}@{}/{}".format('nova', | ||
1345 | 666 | mysql_relation['password'], | ||
1346 | 667 | mysql_relation['db_host'], | ||
1347 | 668 | 'nova') | ||
1348 | 669 | rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', | ||
1349 | 670 | 'quantum-gateway:amqp') | ||
1350 | 671 | nova_cc_relation = self.nova_cc_sentry.relation(\ | ||
1351 | 672 | 'quantum-network-service', | ||
1352 | 673 | 'quantum-gateway:quantum-network-service') | ||
1353 | 674 | ep = self.keystone.service_catalog.url_for(service_type='identity', | ||
1354 | 675 | endpoint_type='publicURL') | ||
1355 | 676 | |||
1356 | 677 | if self._get_openstack_release() >= self.precise_havana: | ||
1357 | 678 | expected = { | ||
1358 | 679 | 'logdir': '/var/log/nova', | ||
1359 | 680 | 'state_path': '/var/lib/nova', | ||
1360 | 681 | 'lock_path': '/var/lock/nova', | ||
1361 | 682 | 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', | ||
1362 | 683 | 'verbose': 'False', | ||
1363 | 684 | 'use_syslog': 'False', | ||
1364 | 685 | 'api_paste_config': '/etc/nova/api-paste.ini', | ||
1365 | 686 | 'enabled_apis': 'metadata', | ||
1366 | 687 | 'multi_host': 'True', | ||
1367 | 688 | 'sql_connection': db_uri, | ||
1368 | 689 | 'service_neutron_metadata_proxy': 'True', | ||
1369 | 690 | 'rabbit_userid': 'neutron', | ||
1370 | 691 | 'rabbit_virtual_host': 'openstack', | ||
1371 | 692 | 'rabbit_password': rabbitmq_relation['password'], | ||
1372 | 693 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
1373 | 694 | 'network_api_class': 'nova.network.neutronv2.api.API', | ||
1374 | 695 | 'neutron_auth_strategy': 'keystone', | ||
1375 | 696 | 'neutron_url': nova_cc_relation['quantum_url'], | ||
1376 | 697 | 'neutron_admin_tenant_name': 'services', | ||
1377 | 698 | 'neutron_admin_username': 'quantum_s3_ec2_nova', | ||
1378 | 699 | 'neutron_admin_password': nova_cc_relation['service_password'], | ||
1379 | 700 | 'neutron_admin_auth_url': ep | ||
1380 | 701 | |||
1381 | 702 | } | ||
1382 | 703 | else: | ||
1383 | 704 | expected = { | ||
1384 | 705 | 'logdir': '/var/log/nova', | ||
1385 | 706 | 'state_path': '/var/lib/nova', | ||
1386 | 707 | 'lock_path': '/var/lock/nova', | ||
1387 | 708 | 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', | ||
1388 | 709 | 'verbose': 'True', | ||
1389 | 710 | 'api_paste_config': '/etc/nova/api-paste.ini', | ||
1390 | 711 | 'enabled_apis': 'metadata', | ||
1391 | 712 | 'multi_host': 'True', | ||
1392 | 713 | 'sql_connection': db_uri, | ||
1393 | 714 | 'service_quantum_metadata_proxy': 'True', | ||
1394 | 715 | 'rabbit_userid': 'neutron', | ||
1395 | 716 | 'rabbit_virtual_host': 'openstack', | ||
1396 | 717 | 'rabbit_password': rabbitmq_relation['password'], | ||
1397 | 718 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
1398 | 719 | 'network_api_class': 'nova.network.quantumv2.api.API', | ||
1399 | 720 | 'quantum_auth_strategy': 'keystone', | ||
1400 | 721 | 'quantum_url': nova_cc_relation['quantum_url'], | ||
1401 | 722 | 'quantum_admin_tenant_name': 'services', | ||
1402 | 723 | 'quantum_admin_username': 'quantum_s3_ec2_nova', | ||
1403 | 724 | 'quantum_admin_password': nova_cc_relation['service_password'], | ||
1404 | 725 | 'quantum_admin_auth_url': ep | ||
1405 | 726 | } | ||
1406 | 727 | |||
1407 | 728 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1408 | 729 | if ret: | ||
1409 | 730 | message = "nova config error: {}".format(ret) | ||
1410 | 731 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1411 | 732 | |||
1412 | 733 | def test_ovs_neutron_plugin_config(self): | ||
1413 | 734 | """Verify the data in the ovs neutron plugin config file. The ovs | ||
1414 | 735 | plugin is not used by default since icehouse.""" | ||
1415 | 736 | if self._get_openstack_release() >= self.precise_icehouse: | ||
1416 | 737 | return | ||
1417 | 738 | |||
1418 | 739 | unit = self.quantum_gateway_sentry | ||
1419 | 740 | quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') | ||
1420 | 741 | |||
1421 | 742 | if self._get_openstack_release() >= self.precise_havana: | ||
1422 | 743 | conf = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini' | ||
1423 | 744 | expected = { | ||
1424 | 745 | 'ovs': { | ||
1425 | 746 | 'local_ip': quantum_gateway_relation['private-address'], | ||
1426 | 747 | 'tenant_network_type': 'gre', | ||
1427 | 748 | 'enable_tunneling': 'True', | ||
1428 | 749 | 'tunnel_id_ranges': '1:1000' | ||
1429 | 750 | } | ||
1430 | 751 | } | ||
1431 | 752 | if self._get_openstack_release() > self.precise_havana: | ||
1432 | 753 | expected_additional = { | ||
1433 | 754 | 'agent': { | ||
1434 | 755 | 'polling_interval': '10', | ||
1435 | 756 | 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' | ||
1436 | 757 | '/etc/neutron/rootwrap.conf' | ||
1437 | 758 | } | ||
1438 | 759 | } | ||
1439 | 760 | expected = dict(expected.items() + expected_additional.items()) | ||
1440 | 761 | else: | ||
1441 | 762 | conf = '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini' | ||
1442 | 763 | expected = { | ||
1443 | 764 | 'OVS': { | ||
1444 | 765 | 'local_ip': quantum_gateway_relation['private-address'], | ||
1445 | 766 | 'tenant_network_type': 'gre', | ||
1446 | 767 | 'enable_tunneling': 'True', | ||
1447 | 768 | 'tunnel_id_ranges': '1:1000' | ||
1448 | 769 | }, | ||
1449 | 770 | 'AGENT': { | ||
1450 | 771 | 'polling_interval': '10', | ||
1451 | 772 | 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' | ||
1452 | 773 | '/etc/quantum/rootwrap.conf' | ||
1453 | 774 | } | ||
1454 | 775 | } | ||
1455 | 776 | |||
1456 | 777 | for section, pairs in expected.iteritems(): | ||
1457 | 778 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1458 | 779 | if ret: | ||
1459 | 780 | message = "ovs neutron plugin config error: {}".format(ret) | ||
1460 | 781 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1461 | 782 | |||
1462 | 783 | def test_vpn_agent_config(self): | ||
1463 | 784 | """Verify the data in the vpn agent config file. This isn't available | ||
1464 | 785 | prior to havana.""" | ||
1465 | 786 | if self._get_openstack_release() < self.precise_havana: | ||
1466 | 787 | return | ||
1467 | 788 | |||
1468 | 789 | unit = self.quantum_gateway_sentry | ||
1469 | 790 | conf = '/etc/neutron/vpn_agent.ini' | ||
1470 | 791 | expected = { | ||
1471 | 792 | 'vpnagent': { | ||
1472 | 793 | 'vpn_device_driver': 'neutron.services.vpn.device_drivers.' | ||
1473 | 794 | 'ipsec.OpenSwanDriver' | ||
1474 | 795 | }, | ||
1475 | 796 | 'ipsec': { | ||
1476 | 797 | 'ipsec_status_check_interval': '60' | ||
1477 | 798 | } | ||
1478 | 799 | } | ||
1479 | 800 | |||
1480 | 801 | for section, pairs in expected.iteritems(): | ||
1481 | 802 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1482 | 803 | if ret: | ||
1483 | 804 | message = "vpn agent config error: {}".format(ret) | ||
1484 | 805 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1485 | 806 | |||
1486 | 807 | def test_create_network(self): | ||
1487 | 808 | """Create a network, verify that it exists, and then delete it.""" | ||
1488 | 809 | self.neutron.format = 'json' | ||
1489 | 810 | net_name = 'ext_net' | ||
1490 | 811 | |||
1491 | 812 | #Verify that the network doesn't exist | ||
1492 | 813 | networks = self.neutron.list_networks(name=net_name) | ||
1493 | 814 | net_count = len(networks['networks']) | ||
1494 | 815 | if net_count != 0: | ||
1495 | 816 | msg = "Expected zero networks, found {}".format(net_count) | ||
1496 | 817 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1497 | 818 | |||
1498 | 819 | # Create a network and verify that it exists | ||
1499 | 820 | network = {'name': net_name} | ||
1500 | 821 | self.neutron.create_network({'network':network}) | ||
1501 | 822 | |||
1502 | 823 | networks = self.neutron.list_networks(name=net_name) | ||
1503 | 824 | net_len = len(networks['networks']) | ||
1504 | 825 | if net_len != 1: | ||
1505 | 826 | msg = "Expected 1 network, found {}".format(net_len) | ||
1506 | 827 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1507 | 828 | |||
1508 | 829 | network = networks['networks'][0] | ||
1509 | 830 | if network['name'] != net_name: | ||
1510 | 831 | amulet.raise_status(amulet.FAIL, msg="network ext_net not found") | ||
1511 | 832 | |||
1512 | 833 | #Cleanup | ||
1513 | 834 | self.neutron.delete_network(network['id']) | ||
1514 | 0 | 835 | ||
1515 | === added directory 'tests/charmhelpers' | |||
1516 | === added file 'tests/charmhelpers/__init__.py' | |||
1517 | === added directory 'tests/charmhelpers/contrib' | |||
1518 | === added file 'tests/charmhelpers/contrib/__init__.py' | |||
1519 | === added directory 'tests/charmhelpers/contrib/amulet' | |||
1520 | === added file 'tests/charmhelpers/contrib/amulet/__init__.py' | |||
1521 | === added file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
1522 | --- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1523 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-17 15:16:47 +0000 | |||
1524 | @@ -0,0 +1,63 @@ | |||
1525 | 1 | import amulet | ||
1526 | 2 | import re | ||
1527 | 3 | |||
1528 | 4 | |||
1529 | 5 | class AmuletDeployment(object): | ||
1530 | 6 | """This class provides generic Amulet deployment and test runner | ||
1531 | 7 | methods.""" | ||
1532 | 8 | |||
1533 | 9 | def __init__(self, series): | ||
1534 | 10 | """Initialize the deployment environment.""" | ||
1535 | 11 | self.series = series | ||
1536 | 12 | self.d = amulet.Deployment(series=self.series) | ||
1537 | 13 | |||
1538 | 14 | def _get_charm_name(self, service_name): | ||
1539 | 15 | """Gets the charm name from the service name. Unique service names can | ||
1540 | 16 | be specified with a '-service#' suffix (e.g. mysql-service1).""" | ||
1541 | 17 | if re.match(r"^.*-service\d{1,3}$", service_name): | ||
1542 | 18 | charm_name = re.sub('\-service\d{1,3}$', '', service_name) | ||
1543 | 19 | else: | ||
1544 | 20 | charm_name = service_name | ||
1545 | 21 | return charm_name | ||
1546 | 22 | |||
1547 | 23 | def _add_services(self, this_service, other_services): | ||
1548 | 24 | """Add services to the deployment where this_service is the local charm | ||
1549 | 25 | that we're focused on testing and other_services are the other | ||
1550 | 26 | charms that come from the charm store.""" | ||
1551 | 27 | name, units = range(2) | ||
1552 | 28 | |||
1553 | 29 | charm_name = self._get_charm_name(this_service[name]) | ||
1554 | 30 | self.d.add(this_service[name], | ||
1555 | 31 | units=this_service[units]) | ||
1556 | 32 | |||
1557 | 33 | for svc in other_services: | ||
1558 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
1559 | 35 | self.d.add(svc[name], | ||
1560 | 36 | charm='cs:{}/{}'.format(self.series, charm_name), | ||
1561 | 37 | units=svc[units]) | ||
1562 | 38 | |||
1563 | 39 | def _add_relations(self, relations): | ||
1564 | 40 | """Add all of the relations for the services.""" | ||
1565 | 41 | for k, v in relations.iteritems(): | ||
1566 | 42 | self.d.relate(k, v) | ||
1567 | 43 | |||
1568 | 44 | def _configure_services(self, configs): | ||
1569 | 45 | """Configure all of the services.""" | ||
1570 | 46 | for service, config in configs.iteritems(): | ||
1571 | 47 | self.d.configure(service, config) | ||
1572 | 48 | |||
1573 | 49 | def _deploy(self): | ||
1574 | 50 | """Deploy environment and wait for all hooks to finish executing.""" | ||
1575 | 51 | try: | ||
1576 | 52 | self.d.setup() | ||
1577 | 53 | self.d.sentry.wait() | ||
1578 | 54 | except amulet.helpers.TimeoutError: | ||
1579 | 55 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | ||
1580 | 56 | except: | ||
1581 | 57 | raise | ||
1582 | 58 | |||
1583 | 59 | def run_tests(self): | ||
1584 | 60 | """Run all of the methods that are prefixed with 'test_'.""" | ||
1585 | 61 | for test in dir(self): | ||
1586 | 62 | if test.startswith('test_'): | ||
1587 | 63 | getattr(self, test)() | ||
1588 | 0 | 64 | ||
1589 | === added file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
1590 | --- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
1591 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-17 15:16:47 +0000 | |||
1592 | @@ -0,0 +1,157 @@ | |||
1593 | 1 | import ConfigParser | ||
1594 | 2 | import io | ||
1595 | 3 | import logging | ||
1596 | 4 | import re | ||
1597 | 5 | import sys | ||
1598 | 6 | from time import sleep | ||
1599 | 7 | |||
1600 | 8 | |||
1601 | 9 | class AmuletUtils(object): | ||
1602 | 10 | """This class provides common utility functions that are used by Amulet | ||
1603 | 11 | tests.""" | ||
1604 | 12 | |||
1605 | 13 | def __init__(self, log_level=logging.ERROR): | ||
1606 | 14 | self.log = self.get_logger(level=log_level) | ||
1607 | 15 | |||
1608 | 16 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | ||
1609 | 17 | """Get a logger object that will log to stdout.""" | ||
1610 | 18 | log = logging | ||
1611 | 19 | logger = log.getLogger(name) | ||
1612 | 20 | fmt = \ | ||
1613 | 21 | log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") | ||
1614 | 22 | |||
1615 | 23 | handler = log.StreamHandler(stream=sys.stdout) | ||
1616 | 24 | handler.setLevel(level) | ||
1617 | 25 | handler.setFormatter(fmt) | ||
1618 | 26 | |||
1619 | 27 | logger.addHandler(handler) | ||
1620 | 28 | logger.setLevel(level) | ||
1621 | 29 | |||
1622 | 30 | return logger | ||
1623 | 31 | |||
1624 | 32 | def valid_ip(self, ip): | ||
1625 | 33 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): | ||
1626 | 34 | return True | ||
1627 | 35 | else: | ||
1628 | 36 | return False | ||
1629 | 37 | |||
1630 | 38 | def valid_url(self, url): | ||
1631 | 39 | p = re.compile( | ||
1632 | 40 | r'^(?:http|ftp)s?://' | ||
1633 | 41 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa | ||
1634 | 42 | r'localhost|' | ||
1635 | 43 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | ||
1636 | 44 | r'(?::\d+)?' | ||
1637 | 45 | r'(?:/?|[/?]\S+)$', | ||
1638 | 46 | re.IGNORECASE) | ||
1639 | 47 | if p.match(url): | ||
1640 | 48 | return True | ||
1641 | 49 | else: | ||
1642 | 50 | return False | ||
1643 | 51 | |||
1644 | 52 | def validate_services(self, commands): | ||
1645 | 53 | """Verify the specified services are running on the corresponding | ||
1646 | 54 | service units.""" | ||
1647 | 55 | for k, v in commands.iteritems(): | ||
1648 | 56 | for cmd in v: | ||
1649 | 57 | output, code = k.run(cmd) | ||
1650 | 58 | if code != 0: | ||
1651 | 59 | return "command `{}` returned {}".format(cmd, str(code)) | ||
1652 | 60 | return None | ||
1653 | 61 | |||
1654 | 62 | def _get_config(self, unit, filename): | ||
1655 | 63 | """Get a ConfigParser object for parsing a unit's config file.""" | ||
1656 | 64 | file_contents = unit.file_contents(filename) | ||
1657 | 65 | config = ConfigParser.ConfigParser() | ||
1658 | 66 | config.readfp(io.StringIO(file_contents)) | ||
1659 | 67 | return config | ||
1660 | 68 | |||
1661 | 69 | def validate_config_data(self, sentry_unit, config_file, section, expected): | ||
1662 | 70 | """Verify that the specified section of the config file contains | ||
1663 | 71 | the expected option key:value pairs.""" | ||
1664 | 72 | config = self._get_config(sentry_unit, config_file) | ||
1665 | 73 | |||
1666 | 74 | if section != 'DEFAULT' and not config.has_section(section): | ||
1667 | 75 | return "section [{}] does not exist".format(section) | ||
1668 | 76 | |||
1669 | 77 | for k in expected.keys(): | ||
1670 | 78 | if not config.has_option(section, k): | ||
1671 | 79 | return "section [{}] is missing option {}".format(section, k) | ||
1672 | 80 | if config.get(section, k) != expected[k]: | ||
1673 | 81 | return "section [{}] {}:{} != expected {}:{}".format(section, | ||
1674 | 82 | k, config.get(section, k), k, expected[k]) | ||
1675 | 83 | return None | ||
1676 | 84 | |||
1677 | 85 | def _validate_dict_data(self, expected, actual): | ||
1678 | 86 | """Compare expected dictionary data vs actual dictionary data. | ||
1679 | 87 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
1680 | 88 | longs, or can be a function that evaluate a variable and returns a | ||
1681 | 89 | bool.""" | ||
1682 | 90 | for k, v in expected.iteritems(): | ||
1683 | 91 | if k in actual: | ||
1684 | 92 | if isinstance(v, basestring) or \ | ||
1685 | 93 | isinstance(v, bool) or \ | ||
1686 | 94 | isinstance(v, (int, long)): | ||
1687 | 95 | if v != actual[k]: | ||
1688 | 96 | return "{}:{}".format(k, actual[k]) | ||
1689 | 97 | elif not v(actual[k]): | ||
1690 | 98 | return "{}:{}".format(k, actual[k]) | ||
1691 | 99 | else: | ||
1692 | 100 | return "key '{}' does not exist".format(k) | ||
1693 | 101 | return None | ||
1694 | 102 | |||
1695 | 103 | def validate_relation_data(self, sentry_unit, relation, expected): | ||
1696 | 104 | """Validate actual relation data based on expected relation data.""" | ||
1697 | 105 | actual = sentry_unit.relation(relation[0], relation[1]) | ||
1698 | 106 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1699 | 107 | return self._validate_dict_data(expected, actual) | ||
1700 | 108 | |||
1701 | 109 | def _validate_list_data(self, expected, actual): | ||
1702 | 110 | """Compare expected list vs actual list data.""" | ||
1703 | 111 | for e in expected: | ||
1704 | 112 | if e not in actual: | ||
1705 | 113 | return "expected item {} not found in actual list".format(e) | ||
1706 | 114 | return None | ||
1707 | 115 | |||
1708 | 116 | def not_null(self, string): | ||
1709 | 117 | if string != None: | ||
1710 | 118 | return True | ||
1711 | 119 | else: | ||
1712 | 120 | return False | ||
1713 | 121 | |||
1714 | 122 | def _get_file_mtime(self, sentry_unit, filename): | ||
1715 | 123 | """Get last modification time of file.""" | ||
1716 | 124 | return sentry_unit.file_stat(filename)['mtime'] | ||
1717 | 125 | |||
1718 | 126 | def _get_dir_mtime(self, sentry_unit, directory): | ||
1719 | 127 | """Get last modification time of directory.""" | ||
1720 | 128 | return sentry_unit.directory_stat(directory)['mtime'] | ||
1721 | 129 | |||
1722 | 130 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | ||
1723 | 131 | """Determine start time of the process based on the last modification | ||
1724 | 132 | time of the /proc/pid directory. If pgrep_full is True, the process | ||
1725 | 133 | name is matched against the full command line.""" | ||
1726 | 134 | if pgrep_full: | ||
1727 | 135 | cmd = 'pgrep -o -f {}'.format(service) | ||
1728 | 136 | else: | ||
1729 | 137 | cmd = 'pgrep -o {}'.format(service) | ||
1730 | 138 | proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) | ||
1731 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
1732 | 140 | |||
1733 | 141 | def service_restarted(self, sentry_unit, service, filename, | ||
1734 | 142 | pgrep_full=False, sleep_time=20): | ||
1735 | 143 | """Compare a service's start time vs a file's last modification time | ||
1736 | 144 | (such as a config file for that service) to determine if the service | ||
1737 | 145 | has been restarted.""" | ||
1738 | 146 | sleep(sleep_time) | ||
1739 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | ||
1740 | 148 | self._get_file_mtime(sentry_unit, filename): | ||
1741 | 149 | return True | ||
1742 | 150 | else: | ||
1743 | 151 | return False | ||
1744 | 152 | |||
1745 | 153 | def relation_error(self, name, data): | ||
1746 | 154 | return 'unexpected relation data in {} - {}'.format(name, data) | ||
1747 | 155 | |||
1748 | 156 | def endpoint_error(self, name, data): | ||
1749 | 157 | return 'unexpected endpoint data in {} - {}'.format(name, data) | ||
1750 | 0 | 158 | ||
1751 | === added directory 'tests/charmhelpers/contrib/openstack' | |||
1752 | === added file 'tests/charmhelpers/contrib/openstack/__init__.py' | |||
1753 | === added directory 'tests/charmhelpers/contrib/openstack/amulet' | |||
1754 | === added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
1755 | === added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
1756 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1757 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-17 15:16:47 +0000 | |||
1758 | @@ -0,0 +1,57 @@ | |||
1759 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
1760 | 2 | AmuletDeployment | ||
1761 | 3 | ) | ||
1762 | 4 | |||
1763 | 5 | |||
1764 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
1765 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
1766 | 8 | that is specifically for use by OpenStack charms.""" | ||
1767 | 9 | |||
1768 | 10 | def __init__(self, series, openstack=None, source=None): | ||
1769 | 11 | """Initialize the deployment environment.""" | ||
1770 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
1771 | 13 | self.openstack = openstack | ||
1772 | 14 | self.source = source | ||
1773 | 15 | |||
1774 | 16 | def _add_services(self, this_service, other_services): | ||
1775 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
1776 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
1777 | 19 | other_services) | ||
1778 | 20 | name = 0 | ||
1779 | 21 | services = other_services | ||
1780 | 22 | services.append(this_service) | ||
1781 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
1782 | 24 | |||
1783 | 25 | if self.openstack: | ||
1784 | 26 | for svc in services: | ||
1785 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
1786 | 28 | if charm_name not in use_source: | ||
1787 | 29 | config = {'openstack-origin': self.openstack} | ||
1788 | 30 | self.d.configure(svc[name], config) | ||
1789 | 31 | |||
1790 | 32 | if self.source: | ||
1791 | 33 | for svc in services: | ||
1792 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
1793 | 35 | if charm_name in use_source: | ||
1794 | 36 | config = {'source': self.source} | ||
1795 | 37 | self.d.configure(svc[name], config) | ||
1796 | 38 | |||
1797 | 39 | def _configure_services(self, configs): | ||
1798 | 40 | """Configure all of the services.""" | ||
1799 | 41 | for service, config in configs.iteritems(): | ||
1800 | 42 | self.d.configure(service, config) | ||
1801 | 43 | |||
1802 | 44 | def _get_openstack_release(self): | ||
1803 | 45 | """Return an integer representing the enum value of the openstack | ||
1804 | 46 | release.""" | ||
1805 | 47 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
1806 | 48 | self.precise_havana, self.precise_icehouse, \ | ||
1807 | 49 | self.trusty_icehouse = range(6) | ||
1808 | 50 | releases = { | ||
1809 | 51 | ('precise', None): self.precise_essex, | ||
1810 | 52 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
1811 | 53 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
1812 | 54 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
1813 | 55 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
1814 | 56 | ('trusty', None): self.trusty_icehouse} | ||
1815 | 57 | return releases[(self.series, self.openstack)] | ||
1816 | 0 | 58 | ||
1817 | === added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
1818 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
1819 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-17 15:16:47 +0000 | |||
1820 | @@ -0,0 +1,253 @@ | |||
1821 | 1 | import logging | ||
1822 | 2 | import os | ||
1823 | 3 | import time | ||
1824 | 4 | import urllib | ||
1825 | 5 | |||
1826 | 6 | import glanceclient.v1.client as glance_client | ||
1827 | 7 | import keystoneclient.v2_0 as keystone_client | ||
1828 | 8 | import novaclient.v1_1.client as nova_client | ||
1829 | 9 | |||
1830 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
1831 | 11 | AmuletUtils | ||
1832 | 12 | ) | ||
1833 | 13 | |||
1834 | 14 | DEBUG = logging.DEBUG | ||
1835 | 15 | ERROR = logging.ERROR | ||
1836 | 16 | |||
1837 | 17 | |||
1838 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
1839 | 19 | """This class inherits from AmuletUtils and has additional support | ||
1840 | 20 | that is specifically for use by OpenStack charms.""" | ||
1841 | 21 | |||
1842 | 22 | def __init__(self, log_level=ERROR): | ||
1843 | 23 | """Initialize the deployment environment.""" | ||
1844 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
1845 | 25 | |||
1846 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
1847 | 27 | public_port, expected): | ||
1848 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
1849 | 29 | are used to find the matching endpoint.""" | ||
1850 | 30 | found = False | ||
1851 | 31 | for ep in endpoints: | ||
1852 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
1853 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
1854 | 34 | and public_port in ep.publicurl: | ||
1855 | 35 | found = True | ||
1856 | 36 | actual = {'id': ep.id, | ||
1857 | 37 | 'region': ep.region, | ||
1858 | 38 | 'adminurl': ep.adminurl, | ||
1859 | 39 | 'internalurl': ep.internalurl, | ||
1860 | 40 | 'publicurl': ep.publicurl, | ||
1861 | 41 | 'service_id': ep.service_id} | ||
1862 | 42 | ret = self._validate_dict_data(expected, actual) | ||
1863 | 43 | if ret: | ||
1864 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
1865 | 45 | |||
1866 | 46 | if not found: | ||
1867 | 47 | return 'endpoint not found' | ||
1868 | 48 | |||
1869 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
1870 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
1871 | 51 | expected service catalog endpoints.""" | ||
1872 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1873 | 53 | for k, v in expected.iteritems(): | ||
1874 | 54 | if k in actual: | ||
1875 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
1876 | 56 | if ret: | ||
1877 | 57 | return self.endpoint_error(k, ret) | ||
1878 | 58 | else: | ||
1879 | 59 | return "endpoint {} does not exist".format(k) | ||
1880 | 60 | return ret | ||
1881 | 61 | |||
1882 | 62 | def validate_tenant_data(self, expected, actual): | ||
1883 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
1884 | 64 | data.""" | ||
1885 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1886 | 66 | for e in expected: | ||
1887 | 67 | found = False | ||
1888 | 68 | for act in actual: | ||
1889 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
1890 | 70 | 'name': act.name, 'id': act.id} | ||
1891 | 71 | if e['name'] == a['name']: | ||
1892 | 72 | found = True | ||
1893 | 73 | ret = self._validate_dict_data(e, a) | ||
1894 | 74 | if ret: | ||
1895 | 75 | return "unexpected tenant data - {}".format(ret) | ||
1896 | 76 | if not found: | ||
1897 | 77 | return "tenant {} does not exist".format(e['name']) | ||
1898 | 78 | return ret | ||
1899 | 79 | |||
1900 | 80 | def validate_role_data(self, expected, actual): | ||
1901 | 81 | """Validate a list of actual role data vs a list of expected role | ||
1902 | 82 | data.""" | ||
1903 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1904 | 84 | for e in expected: | ||
1905 | 85 | found = False | ||
1906 | 86 | for act in actual: | ||
1907 | 87 | a = {'name': act.name, 'id': act.id} | ||
1908 | 88 | if e['name'] == a['name']: | ||
1909 | 89 | found = True | ||
1910 | 90 | ret = self._validate_dict_data(e, a) | ||
1911 | 91 | if ret: | ||
1912 | 92 | return "unexpected role data - {}".format(ret) | ||
1913 | 93 | if not found: | ||
1914 | 94 | return "role {} does not exist".format(e['name']) | ||
1915 | 95 | return ret | ||
1916 | 96 | |||
1917 | 97 | def validate_user_data(self, expected, actual): | ||
1918 | 98 | """Validate a list of actual user data vs a list of expected user | ||
1919 | 99 | data.""" | ||
1920 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1921 | 101 | for e in expected: | ||
1922 | 102 | found = False | ||
1923 | 103 | for act in actual: | ||
1924 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
1925 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
1926 | 106 | 'id': act.id} | ||
1927 | 107 | if e['name'] == a['name']: | ||
1928 | 108 | found = True | ||
1929 | 109 | ret = self._validate_dict_data(e, a) | ||
1930 | 110 | if ret: | ||
1931 | 111 | return "unexpected user data - {}".format(ret) | ||
1932 | 112 | if not found: | ||
1933 | 113 | return "user {} does not exist".format(e['name']) | ||
1934 | 114 | return ret | ||
1935 | 115 | |||
1936 | 116 | def validate_flavor_data(self, expected, actual): | ||
1937 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
1938 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1939 | 119 | act = [a.name for a in actual] | ||
1940 | 120 | return self._validate_list_data(expected, act) | ||
1941 | 121 | |||
1942 | 122 | def tenant_exists(self, keystone, tenant): | ||
1943 | 123 | """Return True if tenant exists""" | ||
1944 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
1945 | 125 | |||
1946 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
1947 | 127 | tenant): | ||
1948 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
1949 | 129 | service_ip = \ | ||
1950 | 130 | keystone_sentry.relation('shared-db', | ||
1951 | 131 | 'mysql:shared-db')['private-address'] | ||
1952 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
1953 | 133 | return keystone_client.Client(username=user, password=password, | ||
1954 | 134 | tenant_name=tenant, auth_url=ep) | ||
1955 | 135 | |||
1956 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
1957 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
1958 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1959 | 139 | endpoint_type='publicURL') | ||
1960 | 140 | return keystone_client.Client(username=user, password=password, | ||
1961 | 141 | tenant_name=tenant, auth_url=ep) | ||
1962 | 142 | |||
1963 | 143 | def authenticate_glance_admin(self, keystone): | ||
1964 | 144 | """Authenticates admin user with glance.""" | ||
1965 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
1966 | 146 | endpoint_type='adminURL') | ||
1967 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
1968 | 148 | |||
1969 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
1970 | 150 | """Authenticates a regular user with nova-api.""" | ||
1971 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1972 | 152 | endpoint_type='publicURL') | ||
1973 | 153 | return nova_client.Client(username=user, api_key=password, | ||
1974 | 154 | project_id=tenant, auth_url=ep) | ||
1975 | 155 | |||
1976 | 156 | def create_cirros_image(self, glance, image_name): | ||
1977 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
1978 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
1979 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
1980 | 160 | if http_proxy: | ||
1981 | 161 | proxies = {'http': http_proxy} | ||
1982 | 162 | opener = urllib.FancyURLopener(proxies) | ||
1983 | 163 | else: | ||
1984 | 164 | opener = urllib.FancyURLopener() | ||
1985 | 165 | |||
1986 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
1987 | 167 | version = f.read().strip() | ||
1988 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
1989 | 169 | |||
1990 | 170 | if not os.path.exists(cirros_img): | ||
1991 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
1992 | 172 | version, cirros_img) | ||
1993 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
1994 | 174 | f.close() | ||
1995 | 175 | |||
1996 | 176 | with open(cirros_img) as f: | ||
1997 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
1998 | 178 | disk_format='qcow2', | ||
1999 | 179 | container_format='bare', data=f) | ||
2000 | 180 | count = 1 | ||
2001 | 181 | status = image.status | ||
2002 | 182 | while status != 'active' and count < 10: | ||
2003 | 183 | time.sleep(3) | ||
2004 | 184 | image = glance.images.get(image.id) | ||
2005 | 185 | status = image.status | ||
2006 | 186 | self.log.debug('image status: {}'.format(status)) | ||
2007 | 187 | count += 1 | ||
2008 | 188 | |||
2009 | 189 | if status != 'active': | ||
2010 | 190 | self.log.error('image creation timed out') | ||
2011 | 191 | return None | ||
2012 | 192 | |||
2013 | 193 | return image | ||
2014 | 194 | |||
2015 | 195 | def delete_image(self, glance, image): | ||
2016 | 196 | """Delete the specified image.""" | ||
2017 | 197 | num_before = len(list(glance.images.list())) | ||
2018 | 198 | glance.images.delete(image) | ||
2019 | 199 | |||
2020 | 200 | count = 1 | ||
2021 | 201 | num_after = len(list(glance.images.list())) | ||
2022 | 202 | while num_after != (num_before - 1) and count < 10: | ||
2023 | 203 | time.sleep(3) | ||
2024 | 204 | num_after = len(list(glance.images.list())) | ||
2025 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
2026 | 206 | count += 1 | ||
2027 | 207 | |||
2028 | 208 | if num_after != (num_before - 1): | ||
2029 | 209 | self.log.error('image deletion timed out') | ||
2030 | 210 | return False | ||
2031 | 211 | |||
2032 | 212 | return True | ||
2033 | 213 | |||
2034 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
2035 | 215 | """Create the specified instance.""" | ||
2036 | 216 | image = nova.images.find(name=image_name) | ||
2037 | 217 | flavor = nova.flavors.find(name=flavor) | ||
2038 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
2039 | 219 | flavor=flavor) | ||
2040 | 220 | |||
2041 | 221 | count = 1 | ||
2042 | 222 | status = instance.status | ||
2043 | 223 | while status != 'ACTIVE' and count < 60: | ||
2044 | 224 | time.sleep(3) | ||
2045 | 225 | instance = nova.servers.get(instance.id) | ||
2046 | 226 | status = instance.status | ||
2047 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
2048 | 228 | count += 1 | ||
2049 | 229 | |||
2050 | 230 | if status != 'ACTIVE': | ||
2051 | 231 | self.log.error('instance creation timed out') | ||
2052 | 232 | return None | ||
2053 | 233 | |||
2054 | 234 | return instance | ||
2055 | 235 | |||
2056 | 236 | def delete_instance(self, nova, instance): | ||
2057 | 237 | """Delete the specified instance.""" | ||
2058 | 238 | num_before = len(list(nova.servers.list())) | ||
2059 | 239 | nova.servers.delete(instance) | ||
2060 | 240 | |||
2061 | 241 | count = 1 | ||
2062 | 242 | num_after = len(list(nova.servers.list())) | ||
2063 | 243 | while num_after != (num_before - 1) and count < 10: | ||
2064 | 244 | time.sleep(3) | ||
2065 | 245 | num_after = len(list(nova.servers.list())) | ||
2066 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
2067 | 247 | count += 1 | ||
2068 | 248 | |||
2069 | 249 | if num_after != (num_before - 1): | ||
2070 | 250 | self.log.error('instance deletion timed out') | ||
2071 | 251 | return False | ||
2072 | 252 | |||
2073 | 253 | return True |
Not relevant for essex and folsom - I'd just drop the tests.