Merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
- Trusty Tahr (14.04)
- amulet-basic
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 84 |
Proposed branch: | lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next |
Diff against target: |
2296 lines (+1781/-86) 30 files modified
Makefile (+12/-4) charm-helpers-hooks.yaml (+10/-0) charm-helpers-tests.yaml (+5/-0) charm-helpers.yaml (+0/-10) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0) hooks/charmhelpers/contrib/openstack/context.py (+45/-13) hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/openstack/utils.py (+5/-2) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0) hooks/charmhelpers/core/fstab.py (+116/-0) hooks/charmhelpers/core/hookenv.py (+5/-4) hooks/charmhelpers/core/host.py (+28/-12) hooks/charmhelpers/fetch/__init__.py (+24/-16) hooks/charmhelpers/fetch/bzrurl.py (+2/-1) tests/00-setup (+10/-0) tests/10-basic-precise-essex (+10/-0) tests/11-basic-precise-folsom (+18/-0) tests/12-basic-precise-grizzly (+12/-0) tests/13-basic-precise-havana (+12/-0) tests/14-basic-precise-icehouse (+12/-0) tests/15-basic-trusty-icehouse (+10/-0) tests/README (+47/-0) tests/basic_deployment.py (+520/-0) tests/charmhelpers/contrib/amulet/deployment.py (+63/-0) tests/charmhelpers/contrib/amulet/utils.py (+157/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/nova-cloud-controller/amulet-basic |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Needs Fixing | ||
Review via email:
|
Commit message
Description of the change
To post a comment you must log in.
- 84. By Corey Bryant
-
Add Amulet basic tests
Revision history for this message
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Corey Bryant (corey.bryant) wrote : | # |
Thanks for the review Liam. Good catch on the noqa issue. I'll fix that in the charm-helpers branch and will fix up any lint issues throughout the charm tests.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2014-05-21 10:14:28 +0000 | |||
3 | +++ Makefile 2014-07-11 17:34:59 +0000 | |||
4 | @@ -2,15 +2,23 @@ | |||
5 | 2 | PYTHON := /usr/bin/env python | 2 | PYTHON := /usr/bin/env python |
6 | 3 | 3 | ||
7 | 4 | lint: | 4 | lint: |
9 | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests | 5 | @flake8 --exclude hooks/charmhelpers hooks unit_tests tests |
10 | 6 | @charm proof | 6 | @charm proof |
11 | 7 | 7 | ||
12 | 8 | unit_test: | ||
13 | 9 | @echo Starting unit tests... | ||
14 | 10 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | ||
15 | 11 | |||
16 | 8 | test: | 12 | test: |
19 | 9 | @echo Starting tests... | 13 | @echo Starting Amulet tests... |
20 | 10 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | 14 | # coreycb note: The -v should only be temporary until Amulet sends |
21 | 15 | # raise_status() messages to stderr: | ||
22 | 16 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
23 | 17 | @juju test -v -p AMULET_HTTP_PROXY | ||
24 | 11 | 18 | ||
25 | 12 | sync: | 19 | sync: |
27 | 13 | @charm-helper-sync -c charm-helpers.yaml | 20 | @charm-helper-sync -c charm-helpers-hooks.yaml |
28 | 21 | @charm-helper-sync -c charm-helpers-tests.yaml | ||
29 | 14 | 22 | ||
30 | 15 | publish: lint test | 23 | publish: lint test |
31 | 16 | bzr push lp:charms/nova-cloud-controller | 24 | bzr push lp:charms/nova-cloud-controller |
32 | 17 | 25 | ||
33 | === added file 'charm-helpers-hooks.yaml' | |||
34 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 | |||
35 | +++ charm-helpers-hooks.yaml 2014-07-11 17:34:59 +0000 | |||
36 | @@ -0,0 +1,10 @@ | |||
37 | 1 | branch: lp:charm-helpers | ||
38 | 2 | destination: hooks/charmhelpers | ||
39 | 3 | include: | ||
40 | 4 | - core | ||
41 | 5 | - fetch | ||
42 | 6 | - contrib.openstack|inc=* | ||
43 | 7 | - contrib.storage | ||
44 | 8 | - contrib.hahelpers: | ||
45 | 9 | - apache | ||
46 | 10 | - payload.execd | ||
47 | 0 | 11 | ||
48 | === added file 'charm-helpers-tests.yaml' | |||
49 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 | |||
50 | +++ charm-helpers-tests.yaml 2014-07-11 17:34:59 +0000 | |||
51 | @@ -0,0 +1,5 @@ | |||
52 | 1 | branch: lp:charm-helpers | ||
53 | 2 | destination: tests/charmhelpers | ||
54 | 3 | include: | ||
55 | 4 | - contrib.amulet | ||
56 | 5 | - contrib.openstack.amulet | ||
57 | 0 | 6 | ||
58 | === removed file 'charm-helpers.yaml' | |||
59 | --- charm-helpers.yaml 2014-05-10 02:00:22 +0000 | |||
60 | +++ charm-helpers.yaml 1970-01-01 00:00:00 +0000 | |||
61 | @@ -1,10 +0,0 @@ | |||
62 | 1 | branch: lp:charm-helpers | ||
63 | 2 | destination: hooks/charmhelpers | ||
64 | 3 | include: | ||
65 | 4 | - core | ||
66 | 5 | - fetch | ||
67 | 6 | - contrib.openstack|inc=* | ||
68 | 7 | - contrib.storage | ||
69 | 8 | - contrib.hahelpers: | ||
70 | 9 | - apache | ||
71 | 10 | - payload.execd | ||
72 | 11 | 0 | ||
73 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
74 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
75 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
76 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
77 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000 | |||
78 | @@ -0,0 +1,57 @@ | |||
79 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
80 | 2 | AmuletDeployment | ||
81 | 3 | ) | ||
82 | 4 | |||
83 | 5 | |||
84 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
85 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
86 | 8 | that is specifically for use by OpenStack charms.""" | ||
87 | 9 | |||
88 | 10 | def __init__(self, series, openstack=None, source=None): | ||
89 | 11 | """Initialize the deployment environment.""" | ||
90 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
91 | 13 | self.openstack = openstack | ||
92 | 14 | self.source = source | ||
93 | 15 | |||
94 | 16 | def _add_services(self, this_service, other_services): | ||
95 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
96 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
97 | 19 | other_services) | ||
98 | 20 | name = 0 | ||
99 | 21 | services = other_services | ||
100 | 22 | services.append(this_service) | ||
101 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
102 | 24 | |||
103 | 25 | if self.openstack: | ||
104 | 26 | for svc in services: | ||
105 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
106 | 28 | if charm_name not in use_source: | ||
107 | 29 | config = {'openstack-origin': self.openstack} | ||
108 | 30 | self.d.configure(svc[name], config) | ||
109 | 31 | |||
110 | 32 | if self.source: | ||
111 | 33 | for svc in services: | ||
112 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
113 | 35 | if charm_name in use_source: | ||
114 | 36 | config = {'source': self.source} | ||
115 | 37 | self.d.configure(svc[name], config) | ||
116 | 38 | |||
117 | 39 | def _configure_services(self, configs): | ||
118 | 40 | """Configure all of the services.""" | ||
119 | 41 | for service, config in configs.iteritems(): | ||
120 | 42 | self.d.configure(service, config) | ||
121 | 43 | |||
122 | 44 | def _get_openstack_release(self): | ||
123 | 45 | """Return an integer representing the enum value of the openstack | ||
124 | 46 | release.""" | ||
125 | 47 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
126 | 48 | self.precise_havana, self.precise_icehouse, \ | ||
127 | 49 | self.trusty_icehouse = range(6) | ||
128 | 50 | releases = { | ||
129 | 51 | ('precise', None): self.precise_essex, | ||
130 | 52 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
131 | 53 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
132 | 54 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
133 | 55 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
134 | 56 | ('trusty', None): self.trusty_icehouse} | ||
135 | 57 | return releases[(self.series, self.openstack)] | ||
136 | 0 | 58 | ||
137 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
138 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
139 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000 | |||
140 | @@ -0,0 +1,253 @@ | |||
141 | 1 | import logging | ||
142 | 2 | import os | ||
143 | 3 | import time | ||
144 | 4 | import urllib | ||
145 | 5 | |||
146 | 6 | import glanceclient.v1.client as glance_client | ||
147 | 7 | import keystoneclient.v2_0 as keystone_client | ||
148 | 8 | import novaclient.v1_1.client as nova_client | ||
149 | 9 | |||
150 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
151 | 11 | AmuletUtils | ||
152 | 12 | ) | ||
153 | 13 | |||
154 | 14 | DEBUG = logging.DEBUG | ||
155 | 15 | ERROR = logging.ERROR | ||
156 | 16 | |||
157 | 17 | |||
158 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
159 | 19 | """This class inherits from AmuletUtils and has additional support | ||
160 | 20 | that is specifically for use by OpenStack charms.""" | ||
161 | 21 | |||
162 | 22 | def __init__(self, log_level=ERROR): | ||
163 | 23 | """Initialize the deployment environment.""" | ||
164 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
165 | 25 | |||
166 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
167 | 27 | public_port, expected): | ||
168 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
169 | 29 | are used to find the matching endpoint.""" | ||
170 | 30 | found = False | ||
171 | 31 | for ep in endpoints: | ||
172 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
173 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
174 | 34 | and public_port in ep.publicurl: | ||
175 | 35 | found = True | ||
176 | 36 | actual = {'id': ep.id, | ||
177 | 37 | 'region': ep.region, | ||
178 | 38 | 'adminurl': ep.adminurl, | ||
179 | 39 | 'internalurl': ep.internalurl, | ||
180 | 40 | 'publicurl': ep.publicurl, | ||
181 | 41 | 'service_id': ep.service_id} | ||
182 | 42 | ret = self._validate_dict_data(expected, actual) | ||
183 | 43 | if ret: | ||
184 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
185 | 45 | |||
186 | 46 | if not found: | ||
187 | 47 | return 'endpoint not found' | ||
188 | 48 | |||
189 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
190 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
191 | 51 | expected service catalog endpoints.""" | ||
192 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
193 | 53 | for k, v in expected.iteritems(): | ||
194 | 54 | if k in actual: | ||
195 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
196 | 56 | if ret: | ||
197 | 57 | return self.endpoint_error(k, ret) | ||
198 | 58 | else: | ||
199 | 59 | return "endpoint {} does not exist".format(k) | ||
200 | 60 | return ret | ||
201 | 61 | |||
202 | 62 | def validate_tenant_data(self, expected, actual): | ||
203 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
204 | 64 | data.""" | ||
205 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
206 | 66 | for e in expected: | ||
207 | 67 | found = False | ||
208 | 68 | for act in actual: | ||
209 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
210 | 70 | 'name': act.name, 'id': act.id} | ||
211 | 71 | if e['name'] == a['name']: | ||
212 | 72 | found = True | ||
213 | 73 | ret = self._validate_dict_data(e, a) | ||
214 | 74 | if ret: | ||
215 | 75 | return "unexpected tenant data - {}".format(ret) | ||
216 | 76 | if not found: | ||
217 | 77 | return "tenant {} does not exist".format(e['name']) | ||
218 | 78 | return ret | ||
219 | 79 | |||
220 | 80 | def validate_role_data(self, expected, actual): | ||
221 | 81 | """Validate a list of actual role data vs a list of expected role | ||
222 | 82 | data.""" | ||
223 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
224 | 84 | for e in expected: | ||
225 | 85 | found = False | ||
226 | 86 | for act in actual: | ||
227 | 87 | a = {'name': act.name, 'id': act.id} | ||
228 | 88 | if e['name'] == a['name']: | ||
229 | 89 | found = True | ||
230 | 90 | ret = self._validate_dict_data(e, a) | ||
231 | 91 | if ret: | ||
232 | 92 | return "unexpected role data - {}".format(ret) | ||
233 | 93 | if not found: | ||
234 | 94 | return "role {} does not exist".format(e['name']) | ||
235 | 95 | return ret | ||
236 | 96 | |||
237 | 97 | def validate_user_data(self, expected, actual): | ||
238 | 98 | """Validate a list of actual user data vs a list of expected user | ||
239 | 99 | data.""" | ||
240 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
241 | 101 | for e in expected: | ||
242 | 102 | found = False | ||
243 | 103 | for act in actual: | ||
244 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
245 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
246 | 106 | 'id': act.id} | ||
247 | 107 | if e['name'] == a['name']: | ||
248 | 108 | found = True | ||
249 | 109 | ret = self._validate_dict_data(e, a) | ||
250 | 110 | if ret: | ||
251 | 111 | return "unexpected user data - {}".format(ret) | ||
252 | 112 | if not found: | ||
253 | 113 | return "user {} does not exist".format(e['name']) | ||
254 | 114 | return ret | ||
255 | 115 | |||
256 | 116 | def validate_flavor_data(self, expected, actual): | ||
257 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
258 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
259 | 119 | act = [a.name for a in actual] | ||
260 | 120 | return self._validate_list_data(expected, act) | ||
261 | 121 | |||
262 | 122 | def tenant_exists(self, keystone, tenant): | ||
263 | 123 | """Return True if tenant exists""" | ||
264 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
265 | 125 | |||
266 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
267 | 127 | tenant): | ||
268 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
269 | 129 | service_ip = \ | ||
270 | 130 | keystone_sentry.relation('shared-db', | ||
271 | 131 | 'mysql:shared-db')['private-address'] | ||
272 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
273 | 133 | return keystone_client.Client(username=user, password=password, | ||
274 | 134 | tenant_name=tenant, auth_url=ep) | ||
275 | 135 | |||
276 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
277 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
278 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
279 | 139 | endpoint_type='publicURL') | ||
280 | 140 | return keystone_client.Client(username=user, password=password, | ||
281 | 141 | tenant_name=tenant, auth_url=ep) | ||
282 | 142 | |||
283 | 143 | def authenticate_glance_admin(self, keystone): | ||
284 | 144 | """Authenticates admin user with glance.""" | ||
285 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
286 | 146 | endpoint_type='adminURL') | ||
287 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
288 | 148 | |||
289 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
290 | 150 | """Authenticates a regular user with nova-api.""" | ||
291 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
292 | 152 | endpoint_type='publicURL') | ||
293 | 153 | return nova_client.Client(username=user, api_key=password, | ||
294 | 154 | project_id=tenant, auth_url=ep) | ||
295 | 155 | |||
296 | 156 | def create_cirros_image(self, glance, image_name): | ||
297 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
298 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
299 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
300 | 160 | if http_proxy: | ||
301 | 161 | proxies = {'http': http_proxy} | ||
302 | 162 | opener = urllib.FancyURLopener(proxies) | ||
303 | 163 | else: | ||
304 | 164 | opener = urllib.FancyURLopener() | ||
305 | 165 | |||
306 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
307 | 167 | version = f.read().strip() | ||
308 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
309 | 169 | |||
310 | 170 | if not os.path.exists(cirros_img): | ||
311 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
312 | 172 | version, cirros_img) | ||
313 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
314 | 174 | f.close() | ||
315 | 175 | |||
316 | 176 | with open(cirros_img) as f: | ||
317 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
318 | 178 | disk_format='qcow2', | ||
319 | 179 | container_format='bare', data=f) | ||
320 | 180 | count = 1 | ||
321 | 181 | status = image.status | ||
322 | 182 | while status != 'active' and count < 10: | ||
323 | 183 | time.sleep(3) | ||
324 | 184 | image = glance.images.get(image.id) | ||
325 | 185 | status = image.status | ||
326 | 186 | self.log.debug('image status: {}'.format(status)) | ||
327 | 187 | count += 1 | ||
328 | 188 | |||
329 | 189 | if status != 'active': | ||
330 | 190 | self.log.error('image creation timed out') | ||
331 | 191 | return None | ||
332 | 192 | |||
333 | 193 | return image | ||
334 | 194 | |||
335 | 195 | def delete_image(self, glance, image): | ||
336 | 196 | """Delete the specified image.""" | ||
337 | 197 | num_before = len(list(glance.images.list())) | ||
338 | 198 | glance.images.delete(image) | ||
339 | 199 | |||
340 | 200 | count = 1 | ||
341 | 201 | num_after = len(list(glance.images.list())) | ||
342 | 202 | while num_after != (num_before - 1) and count < 10: | ||
343 | 203 | time.sleep(3) | ||
344 | 204 | num_after = len(list(glance.images.list())) | ||
345 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
346 | 206 | count += 1 | ||
347 | 207 | |||
348 | 208 | if num_after != (num_before - 1): | ||
349 | 209 | self.log.error('image deletion timed out') | ||
350 | 210 | return False | ||
351 | 211 | |||
352 | 212 | return True | ||
353 | 213 | |||
354 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
355 | 215 | """Create the specified instance.""" | ||
356 | 216 | image = nova.images.find(name=image_name) | ||
357 | 217 | flavor = nova.flavors.find(name=flavor) | ||
358 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
359 | 219 | flavor=flavor) | ||
360 | 220 | |||
361 | 221 | count = 1 | ||
362 | 222 | status = instance.status | ||
363 | 223 | while status != 'ACTIVE' and count < 60: | ||
364 | 224 | time.sleep(3) | ||
365 | 225 | instance = nova.servers.get(instance.id) | ||
366 | 226 | status = instance.status | ||
367 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
368 | 228 | count += 1 | ||
369 | 229 | |||
370 | 230 | if status != 'ACTIVE': | ||
371 | 231 | self.log.error('instance creation timed out') | ||
372 | 232 | return None | ||
373 | 233 | |||
374 | 234 | return instance | ||
375 | 235 | |||
376 | 236 | def delete_instance(self, nova, instance): | ||
377 | 237 | """Delete the specified instance.""" | ||
378 | 238 | num_before = len(list(nova.servers.list())) | ||
379 | 239 | nova.servers.delete(instance) | ||
380 | 240 | |||
381 | 241 | count = 1 | ||
382 | 242 | num_after = len(list(nova.servers.list())) | ||
383 | 243 | while num_after != (num_before - 1) and count < 10: | ||
384 | 244 | time.sleep(3) | ||
385 | 245 | num_after = len(list(nova.servers.list())) | ||
386 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
387 | 247 | count += 1 | ||
388 | 248 | |||
389 | 249 | if num_after != (num_before - 1): | ||
390 | 250 | self.log.error('instance deletion timed out') | ||
391 | 251 | return False | ||
392 | 252 | |||
393 | 253 | return True | ||
394 | 0 | 254 | ||
395 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
396 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-05-21 10:28:14 +0000 | |||
397 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-11 17:34:59 +0000 | |||
398 | @@ -243,23 +243,31 @@ | |||
399 | 243 | 243 | ||
400 | 244 | 244 | ||
401 | 245 | class AMQPContext(OSContextGenerator): | 245 | class AMQPContext(OSContextGenerator): |
402 | 246 | interfaces = ['amqp'] | ||
403 | 247 | 246 | ||
405 | 248 | def __init__(self, ssl_dir=None): | 247 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
406 | 249 | self.ssl_dir = ssl_dir | 248 | self.ssl_dir = ssl_dir |
407 | 249 | self.rel_name = rel_name | ||
408 | 250 | self.relation_prefix = relation_prefix | ||
409 | 251 | self.interfaces = [rel_name] | ||
410 | 250 | 252 | ||
411 | 251 | def __call__(self): | 253 | def __call__(self): |
412 | 252 | log('Generating template context for amqp') | 254 | log('Generating template context for amqp') |
413 | 253 | conf = config() | 255 | conf = config() |
414 | 256 | user_setting = 'rabbit-user' | ||
415 | 257 | vhost_setting = 'rabbit-vhost' | ||
416 | 258 | if self.relation_prefix: | ||
417 | 259 | user_setting = self.relation_prefix + '-rabbit-user' | ||
418 | 260 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | ||
419 | 261 | |||
420 | 254 | try: | 262 | try: |
423 | 255 | username = conf['rabbit-user'] | 263 | username = conf[user_setting] |
424 | 256 | vhost = conf['rabbit-vhost'] | 264 | vhost = conf[vhost_setting] |
425 | 257 | except KeyError as e: | 265 | except KeyError as e: |
426 | 258 | log('Could not generate shared_db context. ' | 266 | log('Could not generate shared_db context. ' |
427 | 259 | 'Missing required charm config options: %s.' % e) | 267 | 'Missing required charm config options: %s.' % e) |
428 | 260 | raise OSContextError | 268 | raise OSContextError |
429 | 261 | ctxt = {} | 269 | ctxt = {} |
431 | 262 | for rid in relation_ids('amqp'): | 270 | for rid in relation_ids(self.rel_name): |
432 | 263 | ha_vip_only = False | 271 | ha_vip_only = False |
433 | 264 | for unit in related_units(rid): | 272 | for unit in related_units(rid): |
434 | 265 | if relation_get('clustered', rid=rid, unit=unit): | 273 | if relation_get('clustered', rid=rid, unit=unit): |
435 | @@ -418,12 +426,13 @@ | |||
436 | 418 | """ | 426 | """ |
437 | 419 | Generates a context for an apache vhost configuration that configures | 427 | Generates a context for an apache vhost configuration that configures |
438 | 420 | HTTPS reverse proxying for one or many endpoints. Generated context | 428 | HTTPS reverse proxying for one or many endpoints. Generated context |
445 | 421 | looks something like: | 429 | looks something like:: |
446 | 422 | { | 430 | |
447 | 423 | 'namespace': 'cinder', | 431 | { |
448 | 424 | 'private_address': 'iscsi.mycinderhost.com', | 432 | 'namespace': 'cinder', |
449 | 425 | 'endpoints': [(8776, 8766), (8777, 8767)] | 433 | 'private_address': 'iscsi.mycinderhost.com', |
450 | 426 | } | 434 | 'endpoints': [(8776, 8766), (8777, 8767)] |
451 | 435 | } | ||
452 | 427 | 436 | ||
453 | 428 | The endpoints list consists of a tuples mapping external ports | 437 | The endpoints list consists of a tuples mapping external ports |
454 | 429 | to internal ports. | 438 | to internal ports. |
455 | @@ -541,6 +550,26 @@ | |||
456 | 541 | 550 | ||
457 | 542 | return nvp_ctxt | 551 | return nvp_ctxt |
458 | 543 | 552 | ||
459 | 553 | def n1kv_ctxt(self): | ||
460 | 554 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
461 | 555 | self.network_manager) | ||
462 | 556 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
463 | 557 | self.network_manager) | ||
464 | 558 | n1kv_ctxt = { | ||
465 | 559 | 'core_plugin': driver, | ||
466 | 560 | 'neutron_plugin': 'n1kv', | ||
467 | 561 | 'neutron_security_groups': self.neutron_security_groups, | ||
468 | 562 | 'local_ip': unit_private_ip(), | ||
469 | 563 | 'config': n1kv_config, | ||
470 | 564 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
471 | 565 | 'vsm_username': config('n1kv-vsm-username'), | ||
472 | 566 | 'vsm_password': config('n1kv-vsm-password'), | ||
473 | 567 | 'restrict_policy_profiles': config( | ||
474 | 568 | 'n1kv_restrict_policy_profiles'), | ||
475 | 569 | } | ||
476 | 570 | |||
477 | 571 | return n1kv_ctxt | ||
478 | 572 | |||
479 | 544 | def neutron_ctxt(self): | 573 | def neutron_ctxt(self): |
480 | 545 | if https(): | 574 | if https(): |
481 | 546 | proto = 'https' | 575 | proto = 'https' |
482 | @@ -572,6 +601,8 @@ | |||
483 | 572 | ctxt.update(self.ovs_ctxt()) | 601 | ctxt.update(self.ovs_ctxt()) |
484 | 573 | elif self.plugin in ['nvp', 'nsx']: | 602 | elif self.plugin in ['nvp', 'nsx']: |
485 | 574 | ctxt.update(self.nvp_ctxt()) | 603 | ctxt.update(self.nvp_ctxt()) |
486 | 604 | elif self.plugin == 'n1kv': | ||
487 | 605 | ctxt.update(self.n1kv_ctxt()) | ||
488 | 575 | 606 | ||
489 | 576 | alchemy_flags = config('neutron-alchemy-flags') | 607 | alchemy_flags = config('neutron-alchemy-flags') |
490 | 577 | if alchemy_flags: | 608 | if alchemy_flags: |
491 | @@ -611,7 +642,7 @@ | |||
492 | 611 | The subordinate interface allows subordinates to export their | 642 | The subordinate interface allows subordinates to export their |
493 | 612 | configuration requirements to the principle for multiple config | 643 | configuration requirements to the principle for multiple config |
494 | 613 | files and multiple serivces. Ie, a subordinate that has interfaces | 644 | files and multiple serivces. Ie, a subordinate that has interfaces |
496 | 614 | to both glance and nova may export to following yaml blob as json: | 645 | to both glance and nova may export to following yaml blob as json:: |
497 | 615 | 646 | ||
498 | 616 | glance: | 647 | glance: |
499 | 617 | /etc/glance/glance-api.conf: | 648 | /etc/glance/glance-api.conf: |
500 | @@ -630,7 +661,8 @@ | |||
501 | 630 | 661 | ||
502 | 631 | It is then up to the principle charms to subscribe this context to | 662 | It is then up to the principle charms to subscribe this context to |
503 | 632 | the service+config file it is interestd in. Configuration data will | 663 | the service+config file it is interestd in. Configuration data will |
505 | 633 | be available in the template context, in glance's case, as: | 664 | be available in the template context, in glance's case, as:: |
506 | 665 | |||
507 | 634 | ctxt = { | 666 | ctxt = { |
508 | 635 | ... other context ... | 667 | ... other context ... |
509 | 636 | 'subordinate_config': { | 668 | 'subordinate_config': { |
510 | 637 | 669 | ||
511 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
512 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-21 10:28:14 +0000 | |||
513 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-11 17:34:59 +0000 | |||
514 | @@ -128,6 +128,20 @@ | |||
515 | 128 | 'server_packages': ['neutron-server', | 128 | 'server_packages': ['neutron-server', |
516 | 129 | 'neutron-plugin-vmware'], | 129 | 'neutron-plugin-vmware'], |
517 | 130 | 'server_services': ['neutron-server'] | 130 | 'server_services': ['neutron-server'] |
518 | 131 | }, | ||
519 | 132 | 'n1kv': { | ||
520 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
521 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
522 | 135 | 'contexts': [ | ||
523 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
524 | 137 | database=config('neutron-database'), | ||
525 | 138 | relation_prefix='neutron', | ||
526 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
527 | 140 | 'services': [], | ||
528 | 141 | 'packages': [['neutron-plugin-cisco']], | ||
529 | 142 | 'server_packages': ['neutron-server', | ||
530 | 143 | 'neutron-plugin-cisco'], | ||
531 | 144 | 'server_services': ['neutron-server'] | ||
532 | 131 | } | 145 | } |
533 | 132 | } | 146 | } |
534 | 133 | if release >= 'icehouse': | 147 | if release >= 'icehouse': |
535 | 134 | 148 | ||
536 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
537 | --- hooks/charmhelpers/contrib/openstack/templating.py 2014-02-24 19:31:57 +0000 | |||
538 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-11 17:34:59 +0000 | |||
539 | @@ -30,17 +30,17 @@ | |||
540 | 30 | loading dir. | 30 | loading dir. |
541 | 31 | 31 | ||
542 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
554 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
555 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
556 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
557 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
558 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
559 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
560 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
561 | 40 | 40 | loader. | |
562 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
563 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
564 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
565 | 44 | """ | 44 | """ |
566 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
567 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
568 | @@ -111,7 +111,8 @@ | |||
569 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
570 | 112 | releases. | 112 | releases. |
571 | 113 | 113 | ||
573 | 114 | Basic usage: | 114 | Basic usage:: |
574 | 115 | |||
575 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
576 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
577 | 117 | 118 | ||
578 | @@ -131,21 +132,19 @@ | |||
579 | 131 | # write out all registered configs | 132 | # write out all registered configs |
580 | 132 | configs.write_all() | 133 | configs.write_all() |
581 | 133 | 134 | ||
583 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
584 | 135 | 136 | ||
585 | 136 | OpenStack Releases and template loading | ||
586 | 137 | --------------------------------------- | ||
587 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
588 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
589 | 140 | 139 | ||
590 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
591 | 142 | in the following order: | 141 | in the following order: |
598 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
599 | 144 | - the base templates_dir | 143 | - the base templates_dir |
600 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
601 | 146 | 145 | ||
602 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
603 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
604 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
605 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
606 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
607 | @@ -169,8 +168,8 @@ | |||
608 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
609 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
610 | 171 | 170 | ||
613 | 172 | Context generators | 171 | **Context generators** |
614 | 173 | --------------------------------------- | 172 | |
615 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
616 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
617 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
618 | 177 | 176 | ||
619 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
620 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-06-16 14:47:23 +0000 | |||
621 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-11 17:34:59 +0000 | |||
622 | @@ -3,7 +3,6 @@ | |||
623 | 3 | # Common python helper functions used for OpenStack charms. | 3 | # Common python helper functions used for OpenStack charms. |
624 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
625 | 5 | 5 | ||
626 | 6 | import apt_pkg as apt | ||
627 | 7 | import subprocess | 6 | import subprocess |
628 | 8 | import os | 7 | import os |
629 | 9 | import socket | 8 | import socket |
630 | @@ -85,6 +84,8 @@ | |||
631 | 85 | '''Derive OpenStack release codename from a given installation source.''' | 84 | '''Derive OpenStack release codename from a given installation source.''' |
632 | 86 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 85 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
633 | 87 | rel = '' | 86 | rel = '' |
634 | 87 | if src is None: | ||
635 | 88 | return rel | ||
636 | 88 | if src in ['distro', 'distro-proposed']: | 89 | if src in ['distro', 'distro-proposed']: |
637 | 89 | try: | 90 | try: |
638 | 90 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 91 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
639 | @@ -132,6 +133,7 @@ | |||
640 | 132 | 133 | ||
641 | 133 | def get_os_codename_package(package, fatal=True): | 134 | def get_os_codename_package(package, fatal=True): |
642 | 134 | '''Derive OpenStack release codename from an installed package.''' | 135 | '''Derive OpenStack release codename from an installed package.''' |
643 | 136 | import apt_pkg as apt | ||
644 | 135 | apt.init() | 137 | apt.init() |
645 | 136 | 138 | ||
646 | 137 | # Tell apt to build an in-memory cache to prevent race conditions (if | 139 | # Tell apt to build an in-memory cache to prevent race conditions (if |
647 | @@ -189,7 +191,7 @@ | |||
648 | 189 | for version, cname in vers_map.iteritems(): | 191 | for version, cname in vers_map.iteritems(): |
649 | 190 | if cname == codename: | 192 | if cname == codename: |
650 | 191 | return version | 193 | return version |
652 | 192 | #e = "Could not determine OpenStack version for package: %s" % pkg | 194 | # e = "Could not determine OpenStack version for package: %s" % pkg |
653 | 193 | # error_out(e) | 195 | # error_out(e) |
654 | 194 | 196 | ||
655 | 195 | 197 | ||
656 | @@ -325,6 +327,7 @@ | |||
657 | 325 | 327 | ||
658 | 326 | """ | 328 | """ |
659 | 327 | 329 | ||
660 | 330 | import apt_pkg as apt | ||
661 | 328 | src = config('openstack-origin') | 331 | src = config('openstack-origin') |
662 | 329 | cur_vers = get_os_version_package(package) | 332 | cur_vers = get_os_version_package(package) |
663 | 330 | available_vers = get_os_version_install_source(src) | 333 | available_vers = get_os_version_install_source(src) |
664 | 331 | 334 | ||
665 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
666 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-03-27 11:02:24 +0000 | |||
667 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-11 17:34:59 +0000 | |||
668 | @@ -303,7 +303,7 @@ | |||
669 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
670 | 304 | """ | 304 | """ |
671 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
673 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
674 | 307 | 307 | ||
675 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
676 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
677 | 310 | 310 | ||
678 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
679 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-21 10:28:14 +0000 | |||
680 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-11 17:34:59 +0000 | |||
681 | @@ -37,6 +37,7 @@ | |||
682 | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
683 | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) |
684 | 39 | 39 | ||
685 | 40 | |||
686 | 40 | def is_device_mounted(device): | 41 | def is_device_mounted(device): |
687 | 41 | '''Given a device path, return True if that device is mounted, and False | 42 | '''Given a device path, return True if that device is mounted, and False |
688 | 42 | if it isn't. | 43 | if it isn't. |
689 | 43 | 44 | ||
690 | === added file 'hooks/charmhelpers/core/fstab.py' | |||
691 | --- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000 | |||
692 | +++ hooks/charmhelpers/core/fstab.py 2014-07-11 17:34:59 +0000 | |||
693 | @@ -0,0 +1,116 @@ | |||
694 | 1 | #!/usr/bin/env python | ||
695 | 2 | # -*- coding: utf-8 -*- | ||
696 | 3 | |||
697 | 4 | __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' | ||
698 | 5 | |||
699 | 6 | import os | ||
700 | 7 | |||
701 | 8 | |||
702 | 9 | class Fstab(file): | ||
703 | 10 | """This class extends file in order to implement a file reader/writer | ||
704 | 11 | for file `/etc/fstab` | ||
705 | 12 | """ | ||
706 | 13 | |||
707 | 14 | class Entry(object): | ||
708 | 15 | """Entry class represents a non-comment line on the `/etc/fstab` file | ||
709 | 16 | """ | ||
710 | 17 | def __init__(self, device, mountpoint, filesystem, | ||
711 | 18 | options, d=0, p=0): | ||
712 | 19 | self.device = device | ||
713 | 20 | self.mountpoint = mountpoint | ||
714 | 21 | self.filesystem = filesystem | ||
715 | 22 | |||
716 | 23 | if not options: | ||
717 | 24 | options = "defaults" | ||
718 | 25 | |||
719 | 26 | self.options = options | ||
720 | 27 | self.d = d | ||
721 | 28 | self.p = p | ||
722 | 29 | |||
723 | 30 | def __eq__(self, o): | ||
724 | 31 | return str(self) == str(o) | ||
725 | 32 | |||
726 | 33 | def __str__(self): | ||
727 | 34 | return "{} {} {} {} {} {}".format(self.device, | ||
728 | 35 | self.mountpoint, | ||
729 | 36 | self.filesystem, | ||
730 | 37 | self.options, | ||
731 | 38 | self.d, | ||
732 | 39 | self.p) | ||
733 | 40 | |||
734 | 41 | DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') | ||
735 | 42 | |||
736 | 43 | def __init__(self, path=None): | ||
737 | 44 | if path: | ||
738 | 45 | self._path = path | ||
739 | 46 | else: | ||
740 | 47 | self._path = self.DEFAULT_PATH | ||
741 | 48 | file.__init__(self, self._path, 'r+') | ||
742 | 49 | |||
743 | 50 | def _hydrate_entry(self, line): | ||
744 | 51 | # NOTE: use split with no arguments to split on any | ||
745 | 52 | # whitespace including tabs | ||
746 | 53 | return Fstab.Entry(*filter( | ||
747 | 54 | lambda x: x not in ('', None), | ||
748 | 55 | line.strip("\n").split())) | ||
749 | 56 | |||
750 | 57 | @property | ||
751 | 58 | def entries(self): | ||
752 | 59 | self.seek(0) | ||
753 | 60 | for line in self.readlines(): | ||
754 | 61 | try: | ||
755 | 62 | if not line.startswith("#"): | ||
756 | 63 | yield self._hydrate_entry(line) | ||
757 | 64 | except ValueError: | ||
758 | 65 | pass | ||
759 | 66 | |||
760 | 67 | def get_entry_by_attr(self, attr, value): | ||
761 | 68 | for entry in self.entries: | ||
762 | 69 | e_attr = getattr(entry, attr) | ||
763 | 70 | if e_attr == value: | ||
764 | 71 | return entry | ||
765 | 72 | return None | ||
766 | 73 | |||
767 | 74 | def add_entry(self, entry): | ||
768 | 75 | if self.get_entry_by_attr('device', entry.device): | ||
769 | 76 | return False | ||
770 | 77 | |||
771 | 78 | self.write(str(entry) + '\n') | ||
772 | 79 | self.truncate() | ||
773 | 80 | return entry | ||
774 | 81 | |||
775 | 82 | def remove_entry(self, entry): | ||
776 | 83 | self.seek(0) | ||
777 | 84 | |||
778 | 85 | lines = self.readlines() | ||
779 | 86 | |||
780 | 87 | found = False | ||
781 | 88 | for index, line in enumerate(lines): | ||
782 | 89 | if not line.startswith("#"): | ||
783 | 90 | if self._hydrate_entry(line) == entry: | ||
784 | 91 | found = True | ||
785 | 92 | break | ||
786 | 93 | |||
787 | 94 | if not found: | ||
788 | 95 | return False | ||
789 | 96 | |||
790 | 97 | lines.remove(line) | ||
791 | 98 | |||
792 | 99 | self.seek(0) | ||
793 | 100 | self.write(''.join(lines)) | ||
794 | 101 | self.truncate() | ||
795 | 102 | return True | ||
796 | 103 | |||
797 | 104 | @classmethod | ||
798 | 105 | def remove_by_mountpoint(cls, mountpoint, path=None): | ||
799 | 106 | fstab = cls(path=path) | ||
800 | 107 | entry = fstab.get_entry_by_attr('mountpoint', mountpoint) | ||
801 | 108 | if entry: | ||
802 | 109 | return fstab.remove_entry(entry) | ||
803 | 110 | return False | ||
804 | 111 | |||
805 | 112 | @classmethod | ||
806 | 113 | def add(cls, device, mountpoint, filesystem, options=None, path=None): | ||
807 | 114 | return cls(path=path).add_entry(Fstab.Entry(device, | ||
808 | 115 | mountpoint, filesystem, | ||
809 | 116 | options=options)) | ||
810 | 0 | 117 | ||
811 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
812 | --- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:38:09 +0000 | |||
813 | +++ hooks/charmhelpers/core/hookenv.py 2014-07-11 17:34:59 +0000 | |||
814 | @@ -25,7 +25,7 @@ | |||
815 | 25 | def cached(func): | 25 | def cached(func): |
816 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
817 | 27 | 27 | ||
819 | 28 | For example: | 28 | For example:: |
820 | 29 | 29 | ||
821 | 30 | @cached | 30 | @cached |
822 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
823 | @@ -445,18 +445,19 @@ | |||
824 | 445 | class Hooks(object): | 445 | class Hooks(object): |
825 | 446 | """A convenient handler for hook functions. | 446 | """A convenient handler for hook functions. |
826 | 447 | 447 | ||
828 | 448 | Example: | 448 | Example:: |
829 | 449 | |||
830 | 449 | hooks = Hooks() | 450 | hooks = Hooks() |
831 | 450 | 451 | ||
832 | 451 | # register a hook, taking its name from the function name | 452 | # register a hook, taking its name from the function name |
833 | 452 | @hooks.hook() | 453 | @hooks.hook() |
834 | 453 | def install(): | 454 | def install(): |
836 | 454 | ... | 455 | pass # your code here |
837 | 455 | 456 | ||
838 | 456 | # register a hook, providing a custom hook name | 457 | # register a hook, providing a custom hook name |
839 | 457 | @hooks.hook("config-changed") | 458 | @hooks.hook("config-changed") |
840 | 458 | def config_changed(): | 459 | def config_changed(): |
842 | 459 | ... | 460 | pass # your code here |
843 | 460 | 461 | ||
844 | 461 | if __name__ == "__main__": | 462 | if __name__ == "__main__": |
845 | 462 | # execute a hook based on the name the program is called by | 463 | # execute a hook based on the name the program is called by |
846 | 463 | 464 | ||
847 | === modified file 'hooks/charmhelpers/core/host.py' | |||
848 | --- hooks/charmhelpers/core/host.py 2014-05-19 11:38:09 +0000 | |||
849 | +++ hooks/charmhelpers/core/host.py 2014-07-11 17:34:59 +0000 | |||
850 | @@ -12,11 +12,11 @@ | |||
851 | 12 | import string | 12 | import string |
852 | 13 | import subprocess | 13 | import subprocess |
853 | 14 | import hashlib | 14 | import hashlib |
854 | 15 | import apt_pkg | ||
855 | 16 | 15 | ||
856 | 17 | from collections import OrderedDict | 16 | from collections import OrderedDict |
857 | 18 | 17 | ||
858 | 19 | from hookenv import log | 18 | from hookenv import log |
859 | 19 | from fstab import Fstab | ||
860 | 20 | 20 | ||
861 | 21 | 21 | ||
862 | 22 | def service_start(service_name): | 22 | def service_start(service_name): |
863 | @@ -35,7 +35,8 @@ | |||
864 | 35 | 35 | ||
865 | 36 | 36 | ||
866 | 37 | def service_reload(service_name, restart_on_failure=False): | 37 | def service_reload(service_name, restart_on_failure=False): |
868 | 38 | """Reload a system service, optionally falling back to restart if reload fails""" | 38 | """Reload a system service, optionally falling back to restart if |
869 | 39 | reload fails""" | ||
870 | 39 | service_result = service('reload', service_name) | 40 | service_result = service('reload', service_name) |
871 | 40 | if not service_result and restart_on_failure: | 41 | if not service_result and restart_on_failure: |
872 | 41 | service_result = service('restart', service_name) | 42 | service_result = service('restart', service_name) |
873 | @@ -144,7 +145,19 @@ | |||
874 | 144 | target.write(content) | 145 | target.write(content) |
875 | 145 | 146 | ||
876 | 146 | 147 | ||
878 | 147 | def mount(device, mountpoint, options=None, persist=False): | 148 | def fstab_remove(mp): |
879 | 149 | """Remove the given mountpoint entry from /etc/fstab | ||
880 | 150 | """ | ||
881 | 151 | return Fstab.remove_by_mountpoint(mp) | ||
882 | 152 | |||
883 | 153 | |||
884 | 154 | def fstab_add(dev, mp, fs, options=None): | ||
885 | 155 | """Adds the given device entry to the /etc/fstab file | ||
886 | 156 | """ | ||
887 | 157 | return Fstab.add(dev, mp, fs, options=options) | ||
888 | 158 | |||
889 | 159 | |||
890 | 160 | def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): | ||
891 | 148 | """Mount a filesystem at a particular mountpoint""" | 161 | """Mount a filesystem at a particular mountpoint""" |
892 | 149 | cmd_args = ['mount'] | 162 | cmd_args = ['mount'] |
893 | 150 | if options is not None: | 163 | if options is not None: |
894 | @@ -155,9 +168,9 @@ | |||
895 | 155 | except subprocess.CalledProcessError, e: | 168 | except subprocess.CalledProcessError, e: |
896 | 156 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) | 169 | log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) |
897 | 157 | return False | 170 | return False |
898 | 171 | |||
899 | 158 | if persist: | 172 | if persist: |
902 | 159 | # TODO: update fstab | 173 | return fstab_add(device, mountpoint, filesystem, options=options) |
901 | 160 | pass | ||
903 | 161 | return True | 174 | return True |
904 | 162 | 175 | ||
905 | 163 | 176 | ||
906 | @@ -169,9 +182,9 @@ | |||
907 | 169 | except subprocess.CalledProcessError, e: | 182 | except subprocess.CalledProcessError, e: |
908 | 170 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) | 183 | log('Error unmounting {}\n{}'.format(mountpoint, e.output)) |
909 | 171 | return False | 184 | return False |
910 | 185 | |||
911 | 172 | if persist: | 186 | if persist: |
914 | 173 | # TODO: update fstab | 187 | return fstab_remove(mountpoint) |
913 | 174 | pass | ||
915 | 175 | return True | 188 | return True |
916 | 176 | 189 | ||
917 | 177 | 190 | ||
918 | @@ -198,13 +211,13 @@ | |||
919 | 198 | def restart_on_change(restart_map, stopstart=False): | 211 | def restart_on_change(restart_map, stopstart=False): |
920 | 199 | """Restart services based on configuration files changing | 212 | """Restart services based on configuration files changing |
921 | 200 | 213 | ||
923 | 201 | This function is used a decorator, for example | 214 | This function is used a decorator, for example:: |
924 | 202 | 215 | ||
925 | 203 | @restart_on_change({ | 216 | @restart_on_change({ |
926 | 204 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
927 | 205 | }) | 218 | }) |
928 | 206 | def ceph_client_changed(): | 219 | def ceph_client_changed(): |
930 | 207 | ... | 220 | pass # your code here |
931 | 208 | 221 | ||
932 | 209 | In this example, the cinder-api and cinder-volume services | 222 | In this example, the cinder-api and cinder-volume services |
933 | 210 | would be restarted if /etc/ceph/ceph.conf is changed by the | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the |
934 | @@ -300,10 +313,13 @@ | |||
935 | 300 | 313 | ||
936 | 301 | def cmp_pkgrevno(package, revno, pkgcache=None): | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): |
937 | 302 | '''Compare supplied revno with the revno of the installed package | 315 | '''Compare supplied revno with the revno of the installed package |
941 | 303 | 1 => Installed revno is greater than supplied arg | 316 | |
942 | 304 | 0 => Installed revno is the same as supplied arg | 317 | * 1 => Installed revno is greater than supplied arg |
943 | 305 | -1 => Installed revno is less than supplied arg | 318 | * 0 => Installed revno is the same as supplied arg |
944 | 319 | * -1 => Installed revno is less than supplied arg | ||
945 | 320 | |||
946 | 306 | ''' | 321 | ''' |
947 | 322 | import apt_pkg | ||
948 | 307 | if not pkgcache: | 323 | if not pkgcache: |
949 | 308 | apt_pkg.init() | 324 | apt_pkg.init() |
950 | 309 | pkgcache = apt_pkg.Cache() | 325 | pkgcache = apt_pkg.Cache() |
951 | 310 | 326 | ||
952 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
953 | --- hooks/charmhelpers/fetch/__init__.py 2014-06-04 13:06:13 +0000 | |||
954 | +++ hooks/charmhelpers/fetch/__init__.py 2014-07-11 17:34:59 +0000 | |||
955 | @@ -13,7 +13,6 @@ | |||
956 | 13 | config, | 13 | config, |
957 | 14 | log, | 14 | log, |
958 | 15 | ) | 15 | ) |
959 | 16 | import apt_pkg | ||
960 | 17 | import os | 16 | import os |
961 | 18 | 17 | ||
962 | 19 | 18 | ||
963 | @@ -117,6 +116,7 @@ | |||
964 | 117 | 116 | ||
965 | 118 | def filter_installed_packages(packages): | 117 | def filter_installed_packages(packages): |
966 | 119 | """Returns a list of packages that require installation""" | 118 | """Returns a list of packages that require installation""" |
967 | 119 | import apt_pkg | ||
968 | 120 | apt_pkg.init() | 120 | apt_pkg.init() |
969 | 121 | 121 | ||
970 | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if | 122 | # Tell apt to build an in-memory cache to prevent race conditions (if |
971 | @@ -235,31 +235,39 @@ | |||
972 | 235 | sources_var='install_sources', | 235 | sources_var='install_sources', |
973 | 236 | keys_var='install_keys'): | 236 | keys_var='install_keys'): |
974 | 237 | """ | 237 | """ |
976 | 238 | Configure multiple sources from charm configuration | 238 | Configure multiple sources from charm configuration. |
977 | 239 | |||
978 | 240 | The lists are encoded as yaml fragments in the configuration. | ||
979 | 241 | The frament needs to be included as a string. | ||
980 | 239 | 242 | ||
981 | 240 | Example config: | 243 | Example config: |
983 | 241 | install_sources: | 244 | install_sources: | |
984 | 242 | - "ppa:foo" | 245 | - "ppa:foo" |
985 | 243 | - "http://example.com/repo precise main" | 246 | - "http://example.com/repo precise main" |
987 | 244 | install_keys: | 247 | install_keys: | |
988 | 245 | - null | 248 | - null |
989 | 246 | - "a1b2c3d4" | 249 | - "a1b2c3d4" |
990 | 247 | 250 | ||
991 | 248 | Note that 'null' (a.k.a. None) should not be quoted. | 251 | Note that 'null' (a.k.a. None) should not be quoted. |
992 | 249 | """ | 252 | """ |
1000 | 250 | sources = safe_load(config(sources_var)) | 253 | sources = safe_load((config(sources_var) or '').strip()) or [] |
1001 | 251 | keys = config(keys_var) | 254 | keys = safe_load((config(keys_var) or '').strip()) or None |
1002 | 252 | if keys is not None: | 255 | |
1003 | 253 | keys = safe_load(keys) | 256 | if isinstance(sources, basestring): |
1004 | 254 | if isinstance(sources, basestring) and ( | 257 | sources = [sources] |
1005 | 255 | keys is None or isinstance(keys, basestring)): | 258 | |
1006 | 256 | add_source(sources, keys) | 259 | if keys is None: |
1007 | 260 | for source in sources: | ||
1008 | 261 | add_source(source, None) | ||
1009 | 257 | else: | 262 | else: |
1015 | 258 | if not len(sources) == len(keys): | 263 | if isinstance(keys, basestring): |
1016 | 259 | msg = 'Install sources and keys lists are different lengths' | 264 | keys = [keys] |
1017 | 260 | raise SourceConfigError(msg) | 265 | |
1018 | 261 | for src_num in range(len(sources)): | 266 | if len(sources) != len(keys): |
1019 | 262 | add_source(sources[src_num], keys[src_num]) | 267 | raise SourceConfigError( |
1020 | 268 | 'Install sources and keys lists are different lengths') | ||
1021 | 269 | for source, key in zip(sources, keys): | ||
1022 | 270 | add_source(source, key) | ||
1023 | 263 | if update: | 271 | if update: |
1024 | 264 | apt_update(fatal=True) | 272 | apt_update(fatal=True) |
1025 | 265 | 273 | ||
1026 | 266 | 274 | ||
1027 | === modified file 'hooks/charmhelpers/fetch/bzrurl.py' | |||
1028 | --- hooks/charmhelpers/fetch/bzrurl.py 2013-11-06 03:48:26 +0000 | |||
1029 | +++ hooks/charmhelpers/fetch/bzrurl.py 2014-07-11 17:34:59 +0000 | |||
1030 | @@ -39,7 +39,8 @@ | |||
1031 | 39 | def install(self, source): | 39 | def install(self, source): |
1032 | 40 | url_parts = self.parse_url(source) | 40 | url_parts = self.parse_url(source) |
1033 | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] | 41 | branch_name = url_parts.path.strip("/").split("/")[-1] |
1035 | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name) | 42 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", |
1036 | 43 | branch_name) | ||
1037 | 43 | if not os.path.exists(dest_dir): | 44 | if not os.path.exists(dest_dir): |
1038 | 44 | mkdir(dest_dir, perms=0755) | 45 | mkdir(dest_dir, perms=0755) |
1039 | 45 | try: | 46 | try: |
1040 | 46 | 47 | ||
1041 | === added directory 'tests' | |||
1042 | === added file 'tests/00-setup' | |||
1043 | --- tests/00-setup 1970-01-01 00:00:00 +0000 | |||
1044 | +++ tests/00-setup 2014-07-11 17:34:59 +0000 | |||
1045 | @@ -0,0 +1,10 @@ | |||
1046 | 1 | #!/bin/bash | ||
1047 | 2 | |||
1048 | 3 | set -ex | ||
1049 | 4 | |||
1050 | 5 | sudo add-apt-repository --yes ppa:juju/stable | ||
1051 | 6 | sudo apt-get update --yes | ||
1052 | 7 | sudo apt-get install --yes python-amulet | ||
1053 | 8 | sudo apt-get install --yes python-glanceclient | ||
1054 | 9 | sudo apt-get install --yes python-keystoneclient | ||
1055 | 10 | sudo apt-get install --yes python-novaclient | ||
1056 | 0 | 11 | ||
1057 | === added file 'tests/10-basic-precise-essex' | |||
1058 | --- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000 | |||
1059 | +++ tests/10-basic-precise-essex 2014-07-11 17:34:59 +0000 | |||
1060 | @@ -0,0 +1,10 @@ | |||
1061 | 1 | #!/usr/bin/python | ||
1062 | 2 | |||
1063 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1064 | 4 | precise-essex.""" | ||
1065 | 5 | |||
1066 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
1067 | 7 | |||
1068 | 8 | if __name__ == '__main__': | ||
1069 | 9 | deployment = NovaCCBasicDeployment(series='precise') | ||
1070 | 10 | deployment.run_tests() | ||
1071 | 0 | 11 | ||
1072 | === added file 'tests/11-basic-precise-folsom' | |||
1073 | --- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000 | |||
1074 | +++ tests/11-basic-precise-folsom 2014-07-11 17:34:59 +0000 | |||
1075 | @@ -0,0 +1,18 @@ | |||
1076 | 1 | #!/usr/bin/python | ||
1077 | 2 | |||
1078 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1079 | 4 | precise-folsom.""" | ||
1080 | 5 | |||
1081 | 6 | import amulet | ||
1082 | 7 | from basic_deployment import NovaCCBasicDeployment | ||
1083 | 8 | |||
1084 | 9 | if __name__ == '__main__': | ||
1085 | 10 | # NOTE(coreycb): Skipping failing test until resolved. 'nova-manage db sync' | ||
1086 | 11 | # fails in shared-db-relation-changed (only fails on folsom) | ||
1087 | 12 | message = "Skipping failing test until resolved" | ||
1088 | 13 | amulet.raise_status(amulet.SKIP, msg=message) | ||
1089 | 14 | |||
1090 | 15 | deployment = NovaCCBasicDeployment(series='precise', | ||
1091 | 16 | openstack='cloud:precise-folsom', | ||
1092 | 17 | source='cloud:precise-updates/folsom') | ||
1093 | 18 | deployment.run_tests() | ||
1094 | 0 | 19 | ||
1095 | === added file 'tests/12-basic-precise-grizzly' | |||
1096 | --- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000 | |||
1097 | +++ tests/12-basic-precise-grizzly 2014-07-11 17:34:59 +0000 | |||
1098 | @@ -0,0 +1,12 @@ | |||
1099 | 1 | #!/usr/bin/python | ||
1100 | 2 | |||
1101 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1102 | 4 | precise-grizzly.""" | ||
1103 | 5 | |||
1104 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
1105 | 7 | |||
1106 | 8 | if __name__ == '__main__': | ||
1107 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
1108 | 10 | openstack='cloud:precise-grizzly', | ||
1109 | 11 | source='cloud:precise-updates/grizzly') | ||
1110 | 12 | deployment.run_tests() | ||
1111 | 0 | 13 | ||
1112 | === added file 'tests/13-basic-precise-havana' | |||
1113 | --- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000 | |||
1114 | +++ tests/13-basic-precise-havana 2014-07-11 17:34:59 +0000 | |||
1115 | @@ -0,0 +1,12 @@ | |||
1116 | 1 | #!/usr/bin/python | ||
1117 | 2 | |||
1118 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1119 | 4 | precise-havana.""" | ||
1120 | 5 | |||
1121 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
1122 | 7 | |||
1123 | 8 | if __name__ == '__main__': | ||
1124 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
1125 | 10 | openstack='cloud:precise-havana', | ||
1126 | 11 | source='cloud:precise-updates/havana') | ||
1127 | 12 | deployment.run_tests() | ||
1128 | 0 | 13 | ||
1129 | === added file 'tests/14-basic-precise-icehouse' | |||
1130 | --- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000 | |||
1131 | +++ tests/14-basic-precise-icehouse 2014-07-11 17:34:59 +0000 | |||
1132 | @@ -0,0 +1,12 @@ | |||
1133 | 1 | #!/usr/bin/python | ||
1134 | 2 | |||
1135 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1136 | 4 | precise-icehouse.""" | ||
1137 | 5 | |||
1138 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
1139 | 7 | |||
1140 | 8 | if __name__ == '__main__': | ||
1141 | 9 | deployment = NovaCCBasicDeployment(series='precise', | ||
1142 | 10 | openstack='cloud:precise-icehouse', | ||
1143 | 11 | source='cloud:precise-updates/icehouse') | ||
1144 | 12 | deployment.run_tests() | ||
1145 | 0 | 13 | ||
1146 | === added file 'tests/15-basic-trusty-icehouse' | |||
1147 | --- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000 | |||
1148 | +++ tests/15-basic-trusty-icehouse 2014-07-11 17:34:59 +0000 | |||
1149 | @@ -0,0 +1,10 @@ | |||
1150 | 1 | #!/usr/bin/python | ||
1151 | 2 | |||
1152 | 3 | """Amulet tests on a basic nova cloud controller deployment on | ||
1153 | 4 | trusty-icehouse.""" | ||
1154 | 5 | |||
1155 | 6 | from basic_deployment import NovaCCBasicDeployment | ||
1156 | 7 | |||
1157 | 8 | if __name__ == '__main__': | ||
1158 | 9 | deployment = NovaCCBasicDeployment(series='trusty') | ||
1159 | 10 | deployment.run_tests() | ||
1160 | 0 | 11 | ||
1161 | === added file 'tests/README' | |||
1162 | --- tests/README 1970-01-01 00:00:00 +0000 | |||
1163 | +++ tests/README 2014-07-11 17:34:59 +0000 | |||
1164 | @@ -0,0 +1,47 @@ | |||
1165 | 1 | This directory provides Amulet tests that focus on verification of Nova Cloud | ||
1166 | 2 | Controller deployments. | ||
1167 | 3 | |||
1168 | 4 | If you use a web proxy server to access the web, you'll need to set the | ||
1169 | 5 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. | ||
1170 | 6 | |||
1171 | 7 | The following examples demonstrate different ways that tests can be executed. | ||
1172 | 8 | All examples are run from the charm's root directory. | ||
1173 | 9 | |||
1174 | 10 | * To run all tests (starting with 00-setup): | ||
1175 | 11 | |||
1176 | 12 | make test | ||
1177 | 13 | |||
1178 | 14 | * To run a specific test module (or modules): | ||
1179 | 15 | |||
1180 | 16 | juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
1181 | 17 | |||
1182 | 18 | * To run a specific test module (or modules), and keep the environment | ||
1183 | 19 | deployed after a failure: | ||
1184 | 20 | |||
1185 | 21 | juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
1186 | 22 | |||
1187 | 23 | * To re-run a test module against an already deployed environment (one | ||
1188 | 24 | that was deployed by a previous call to 'juju test --set-e'): | ||
1189 | 25 | |||
1190 | 26 | ./tests/15-basic-trusty-icehouse | ||
1191 | 27 | |||
1192 | 28 | For debugging and test development purposes, all code should be idempotent. | ||
1193 | 29 | In other words, the code should have the ability to be re-run without changing | ||
1194 | 30 | the results beyond the initial run. This enables editing and re-running of a | ||
1195 | 31 | test module against an already deployed environment, as described above. | ||
1196 | 32 | |||
1197 | 33 | Manual debugging tips: | ||
1198 | 34 | |||
1199 | 35 | * Set the following env vars before using the OpenStack CLI as admin: | ||
1200 | 36 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
1201 | 37 | export OS_TENANT_NAME=admin | ||
1202 | 38 | export OS_USERNAME=admin | ||
1203 | 39 | export OS_PASSWORD=openstack | ||
1204 | 40 | export OS_REGION_NAME=RegionOne | ||
1205 | 41 | |||
1206 | 42 | * Set the following env vars before using the OpenStack CLI as demoUser: | ||
1207 | 43 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
1208 | 44 | export OS_TENANT_NAME=demoTenant | ||
1209 | 45 | export OS_USERNAME=demoUser | ||
1210 | 46 | export OS_PASSWORD=password | ||
1211 | 47 | export OS_REGION_NAME=RegionOne | ||
1212 | 0 | 48 | ||
1213 | === added file 'tests/basic_deployment.py' | |||
1214 | --- tests/basic_deployment.py 1970-01-01 00:00:00 +0000 | |||
1215 | +++ tests/basic_deployment.py 2014-07-11 17:34:59 +0000 | |||
1216 | @@ -0,0 +1,520 @@ | |||
1217 | 1 | #!/usr/bin/python | ||
1218 | 2 | |||
1219 | 3 | import amulet | ||
1220 | 4 | |||
1221 | 5 | from charmhelpers.contrib.openstack.amulet.deployment import ( | ||
1222 | 6 | OpenStackAmuletDeployment | ||
1223 | 7 | ) | ||
1224 | 8 | |||
1225 | 9 | from charmhelpers.contrib.openstack.amulet.utils import ( | ||
1226 | 10 | OpenStackAmuletUtils, | ||
1227 | 11 | DEBUG, # flake8: noqa | ||
1228 | 12 | ERROR | ||
1229 | 13 | ) | ||
1230 | 14 | |||
1231 | 15 | # Use DEBUG to turn on debug logging | ||
1232 | 16 | u = OpenStackAmuletUtils(ERROR) | ||
1233 | 17 | |||
1234 | 18 | |||
1235 | 19 | class NovaCCBasicDeployment(OpenStackAmuletDeployment): | ||
1236 | 20 | """Amulet tests on a basic nova cloud controller deployment.""" | ||
1237 | 21 | |||
1238 | 22 | def __init__(self, series=None, openstack=None, source=None): | ||
1239 | 23 | """Deploy the entire test environment.""" | ||
1240 | 24 | super(NovaCCBasicDeployment, self).__init__(series, openstack, source) | ||
1241 | 25 | self._add_services() | ||
1242 | 26 | self._add_relations() | ||
1243 | 27 | self._configure_services() | ||
1244 | 28 | self._deploy() | ||
1245 | 29 | self._initialize_tests() | ||
1246 | 30 | |||
1247 | 31 | def _add_services(self): | ||
1248 | 32 | """Add the service that we're testing, including the number of units, | ||
1249 | 33 | where nova-cloud-controller is local, and the other charms are from | ||
1250 | 34 | the charm store.""" | ||
1251 | 35 | this_service = ('nova-cloud-controller', 1) | ||
1252 | 36 | other_services = [('mysql', 1), ('rabbitmq-server', 1), | ||
1253 | 37 | ('nova-compute', 2), ('keystone', 1), ('glance', 1)] | ||
1254 | 38 | super(NovaCCBasicDeployment, self)._add_services(this_service, | ||
1255 | 39 | other_services) | ||
1256 | 40 | |||
1257 | 41 | def _add_relations(self): | ||
1258 | 42 | """Add all of the relations for the services.""" | ||
1259 | 43 | relations = { | ||
1260 | 44 | 'nova-cloud-controller:shared-db': 'mysql:shared-db', | ||
1261 | 45 | 'nova-cloud-controller:identity-service': 'keystone:identity-service', | ||
1262 | 46 | 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp', | ||
1263 | 47 | 'nova-cloud-controller:cloud-compute': 'nova-compute:cloud-compute', | ||
1264 | 48 | 'nova-cloud-controller:image-service': 'glance:image-service', | ||
1265 | 49 | 'nova-compute:image-service': 'glance:image-service', | ||
1266 | 50 | 'nova-compute:shared-db': 'mysql:shared-db', | ||
1267 | 51 | 'nova-compute:amqp': 'rabbitmq-server:amqp', | ||
1268 | 52 | 'keystone:shared-db': 'mysql:shared-db', | ||
1269 | 53 | 'glance:identity-service': 'keystone:identity-service', | ||
1270 | 54 | 'glance:shared-db': 'mysql:shared-db', | ||
1271 | 55 | 'glance:amqp': 'rabbitmq-server:amqp' | ||
1272 | 56 | } | ||
1273 | 57 | super(NovaCCBasicDeployment, self)._add_relations(relations) | ||
1274 | 58 | |||
1275 | 59 | def _configure_services(self): | ||
1276 | 60 | """Configure all of the services.""" | ||
1277 | 61 | keystone_config = {'admin-password': 'openstack', | ||
1278 | 62 | 'admin-token': 'ubuntutesting'} | ||
1279 | 63 | configs = {'keystone': keystone_config} | ||
1280 | 64 | super(NovaCCBasicDeployment, self)._configure_services(configs) | ||
1281 | 65 | |||
1282 | 66 | def _initialize_tests(self): | ||
1283 | 67 | """Perform final initialization before tests get run.""" | ||
1284 | 68 | # Access the sentries for inspecting service units | ||
1285 | 69 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | ||
1286 | 70 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | ||
1287 | 71 | self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] | ||
1288 | 72 | self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] | ||
1289 | 73 | self.nova_compute_sentry = self.d.sentry.unit['nova-compute/0'] | ||
1290 | 74 | self.glance_sentry = self.d.sentry.unit['glance/0'] | ||
1291 | 75 | |||
1292 | 76 | # Authenticate admin with keystone | ||
1293 | 77 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | ||
1294 | 78 | user='admin', | ||
1295 | 79 | password='openstack', | ||
1296 | 80 | tenant='admin') | ||
1297 | 81 | |||
1298 | 82 | # Authenticate admin with glance endpoint | ||
1299 | 83 | self.glance = u.authenticate_glance_admin(self.keystone) | ||
1300 | 84 | |||
1301 | 85 | # Create a demo tenant/role/user | ||
1302 | 86 | self.demo_tenant = 'demoTenant' | ||
1303 | 87 | self.demo_role = 'demoRole' | ||
1304 | 88 | self.demo_user = 'demoUser' | ||
1305 | 89 | if not u.tenant_exists(self.keystone, self.demo_tenant): | ||
1306 | 90 | tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, | ||
1307 | 91 | description='demo tenant', | ||
1308 | 92 | enabled=True) | ||
1309 | 93 | self.keystone.roles.create(name=self.demo_role) | ||
1310 | 94 | self.keystone.users.create(name=self.demo_user, | ||
1311 | 95 | password='password', | ||
1312 | 96 | tenant_id=tenant.id, | ||
1313 | 97 | email='demo@demo.com') | ||
1314 | 98 | |||
1315 | 99 | # Authenticate demo user with keystone | ||
1316 | 100 | self.keystone_demo = \ | ||
1317 | 101 | u.authenticate_keystone_user(self.keystone, user=self.demo_user, | ||
1318 | 102 | password='password', | ||
1319 | 103 | tenant=self.demo_tenant) | ||
1320 | 104 | |||
1321 | 105 | # Authenticate demo user with nova-api | ||
1322 | 106 | self.nova_demo = u.authenticate_nova_user(self.keystone, | ||
1323 | 107 | user=self.demo_user, | ||
1324 | 108 | password='password', | ||
1325 | 109 | tenant=self.demo_tenant) | ||
1326 | 110 | |||
1327 | 111 | def test_services(self): | ||
1328 | 112 | """Verify the expected services are running on the corresponding | ||
1329 | 113 | service units.""" | ||
1330 | 114 | commands = { | ||
1331 | 115 | self.mysql_sentry: ['status mysql'], | ||
1332 | 116 | self.rabbitmq_sentry: ['sudo service rabbitmq-server status'], | ||
1333 | 117 | self.nova_cc_sentry: ['status nova-api-ec2', | ||
1334 | 118 | 'status nova-api-os-compute', | ||
1335 | 119 | 'status nova-objectstore', | ||
1336 | 120 | 'status nova-cert', | ||
1337 | 121 | 'status nova-scheduler'], | ||
1338 | 122 | self.nova_compute_sentry: ['status nova-compute', | ||
1339 | 123 | 'status nova-network', | ||
1340 | 124 | 'status nova-api'], | ||
1341 | 125 | self.keystone_sentry: ['status keystone'], | ||
1342 | 126 | self.glance_sentry: ['status glance-registry', 'status glance-api'] | ||
1343 | 127 | } | ||
1344 | 128 | if self._get_openstack_release() >= self.precise_grizzly: | ||
1345 | 129 | commands[self.nova_cc_sentry] = ['status nova-conductor'] | ||
1346 | 130 | |||
1347 | 131 | ret = u.validate_services(commands) | ||
1348 | 132 | if ret: | ||
1349 | 133 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
1350 | 134 | |||
1351 | 135 | def test_service_catalog(self): | ||
1352 | 136 | """Verify that the service catalog endpoint data is valid.""" | ||
1353 | 137 | endpoint_vol = {'adminURL': u.valid_url, | ||
1354 | 138 | 'region': 'RegionOne', | ||
1355 | 139 | 'publicURL': u.valid_url, | ||
1356 | 140 | 'internalURL': u.valid_url} | ||
1357 | 141 | endpoint_id = {'adminURL': u.valid_url, | ||
1358 | 142 | 'region': 'RegionOne', | ||
1359 | 143 | 'publicURL': u.valid_url, | ||
1360 | 144 | 'internalURL': u.valid_url} | ||
1361 | 145 | if self._get_openstack_release() >= self.precise_folsom: | ||
1362 | 146 | endpoint_vol['id'] = u.not_null | ||
1363 | 147 | endpoint_id['id'] = u.not_null | ||
1364 | 148 | expected = {'s3': [endpoint_vol], 'compute': [endpoint_vol], | ||
1365 | 149 | 'ec2': [endpoint_vol], 'identity': [endpoint_id]} | ||
1366 | 150 | actual = self.keystone_demo.service_catalog.get_endpoints() | ||
1367 | 151 | |||
1368 | 152 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | ||
1369 | 153 | if ret: | ||
1370 | 154 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
1371 | 155 | |||
1372 | 156 | def test_openstack_compute_api_endpoint(self): | ||
1373 | 157 | """Verify the openstack compute api (osapi) endpoint data.""" | ||
1374 | 158 | endpoints = self.keystone.endpoints.list() | ||
1375 | 159 | admin_port = internal_port = public_port = '8774' | ||
1376 | 160 | expected = {'id': u.not_null, | ||
1377 | 161 | 'region': 'RegionOne', | ||
1378 | 162 | 'adminurl': u.valid_url, | ||
1379 | 163 | 'internalurl': u.valid_url, | ||
1380 | 164 | 'publicurl': u.valid_url, | ||
1381 | 165 | 'service_id': u.not_null} | ||
1382 | 166 | |||
1383 | 167 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1384 | 168 | public_port, expected) | ||
1385 | 169 | if ret: | ||
1386 | 170 | message = 'osapi endpoint: {}'.format(ret) | ||
1387 | 171 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1388 | 172 | |||
1389 | 173 | def test_ec2_api_endpoint(self): | ||
1390 | 174 | """Verify the EC2 api endpoint data.""" | ||
1391 | 175 | endpoints = self.keystone.endpoints.list() | ||
1392 | 176 | admin_port = internal_port = public_port = '8773' | ||
1393 | 177 | expected = {'id': u.not_null, | ||
1394 | 178 | 'region': 'RegionOne', | ||
1395 | 179 | 'adminurl': u.valid_url, | ||
1396 | 180 | 'internalurl': u.valid_url, | ||
1397 | 181 | 'publicurl': u.valid_url, | ||
1398 | 182 | 'service_id': u.not_null} | ||
1399 | 183 | |||
1400 | 184 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1401 | 185 | public_port, expected) | ||
1402 | 186 | if ret: | ||
1403 | 187 | message = 'EC2 endpoint: {}'.format(ret) | ||
1404 | 188 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1405 | 189 | |||
1406 | 190 | def test_s3_api_endpoint(self): | ||
1407 | 191 | """Verify the S3 api endpoint data.""" | ||
1408 | 192 | endpoints = self.keystone.endpoints.list() | ||
1409 | 193 | admin_port = internal_port = public_port = '3333' | ||
1410 | 194 | expected = {'id': u.not_null, | ||
1411 | 195 | 'region': 'RegionOne', | ||
1412 | 196 | 'adminurl': u.valid_url, | ||
1413 | 197 | 'internalurl': u.valid_url, | ||
1414 | 198 | 'publicurl': u.valid_url, | ||
1415 | 199 | 'service_id': u.not_null} | ||
1416 | 200 | |||
1417 | 201 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1418 | 202 | public_port, expected) | ||
1419 | 203 | if ret: | ||
1420 | 204 | message = 'S3 endpoint: {}'.format(ret) | ||
1421 | 205 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1422 | 206 | |||
1423 | 207 | def test_nova_cc_shared_db_relation(self): | ||
1424 | 208 | """Verify the nova-cc to mysql shared-db relation data""" | ||
1425 | 209 | unit = self.nova_cc_sentry | ||
1426 | 210 | relation = ['shared-db', 'mysql:shared-db'] | ||
1427 | 211 | expected = { | ||
1428 | 212 | 'private-address': u.valid_ip, | ||
1429 | 213 | 'nova_database': 'nova', | ||
1430 | 214 | 'nova_username': 'nova', | ||
1431 | 215 | 'nova_hostname': u.valid_ip | ||
1432 | 216 | } | ||
1433 | 217 | |||
1434 | 218 | ret = u.validate_relation_data(unit, relation, expected) | ||
1435 | 219 | if ret: | ||
1436 | 220 | message = u.relation_error('nova-cc shared-db', ret) | ||
1437 | 221 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1438 | 222 | |||
1439 | 223 | def test_mysql_shared_db_relation(self): | ||
1440 | 224 | """Verify the mysql to nova-cc shared-db relation data""" | ||
1441 | 225 | unit = self.mysql_sentry | ||
1442 | 226 | relation = ['shared-db', 'nova-cloud-controller:shared-db'] | ||
1443 | 227 | expected = { | ||
1444 | 228 | 'private-address': u.valid_ip, | ||
1445 | 229 | 'nova_password': u.not_null, | ||
1446 | 230 | 'db_host': u.valid_ip | ||
1447 | 231 | } | ||
1448 | 232 | |||
1449 | 233 | ret = u.validate_relation_data(unit, relation, expected) | ||
1450 | 234 | if ret: | ||
1451 | 235 | message = u.relation_error('mysql shared-db', ret) | ||
1452 | 236 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1453 | 237 | |||
1454 | 238 | def test_nova_cc_identity_service_relation(self): | ||
1455 | 239 | """Verify the nova-cc to keystone identity-service relation data""" | ||
1456 | 240 | unit = self.nova_cc_sentry | ||
1457 | 241 | relation = ['identity-service', 'keystone:identity-service'] | ||
1458 | 242 | expected = { | ||
1459 | 243 | 'nova_internal_url': u.valid_url, | ||
1460 | 244 | 'nova_public_url': u.valid_url, | ||
1461 | 245 | 's3_public_url': u.valid_url, | ||
1462 | 246 | 's3_service': 's3', | ||
1463 | 247 | 'ec2_admin_url': u.valid_url, | ||
1464 | 248 | 'ec2_internal_url': u.valid_url, | ||
1465 | 249 | 'nova_service': 'nova', | ||
1466 | 250 | 's3_region': 'RegionOne', | ||
1467 | 251 | 'private-address': u.valid_ip, | ||
1468 | 252 | 'nova_region': 'RegionOne', | ||
1469 | 253 | 'ec2_public_url': u.valid_url, | ||
1470 | 254 | 'ec2_region': 'RegionOne', | ||
1471 | 255 | 's3_internal_url': u.valid_url, | ||
1472 | 256 | 's3_admin_url': u.valid_url, | ||
1473 | 257 | 'nova_admin_url': u.valid_url, | ||
1474 | 258 | 'ec2_service': 'ec2' | ||
1475 | 259 | } | ||
1476 | 260 | |||
1477 | 261 | ret = u.validate_relation_data(unit, relation, expected) | ||
1478 | 262 | if ret: | ||
1479 | 263 | message = u.relation_error('nova-cc identity-service', ret) | ||
1480 | 264 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1481 | 265 | |||
1482 | 266 | def test_keystone_identity_service_relation(self): | ||
1483 | 267 | """Verify the keystone to nova-cc identity-service relation data""" | ||
1484 | 268 | unit = self.keystone_sentry | ||
1485 | 269 | relation = ['identity-service', | ||
1486 | 270 | 'nova-cloud-controller:identity-service'] | ||
1487 | 271 | expected = { | ||
1488 | 272 | 'service_protocol': 'http', | ||
1489 | 273 | 'service_tenant': 'services', | ||
1490 | 274 | 'admin_token': 'ubuntutesting', | ||
1491 | 275 | 'service_password': u.not_null, | ||
1492 | 276 | 'service_port': '5000', | ||
1493 | 277 | 'auth_port': '35357', | ||
1494 | 278 | 'auth_protocol': 'http', | ||
1495 | 279 | 'private-address': u.valid_ip, | ||
1496 | 280 | 'https_keystone': 'False', | ||
1497 | 281 | 'auth_host': u.valid_ip, | ||
1498 | 282 | 'service_username': 's3_ec2_nova', | ||
1499 | 283 | 'service_tenant_id': u.not_null, | ||
1500 | 284 | 'service_host': u.valid_ip | ||
1501 | 285 | } | ||
1502 | 286 | |||
1503 | 287 | ret = u.validate_relation_data(unit, relation, expected) | ||
1504 | 288 | if ret: | ||
1505 | 289 | message = u.relation_error('keystone identity-service', ret) | ||
1506 | 290 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1507 | 291 | |||
1508 | 292 | def test_nova_cc_amqp_relation(self): | ||
1509 | 293 | """Verify the nova-cc to rabbitmq-server amqp relation data""" | ||
1510 | 294 | unit = self.nova_cc_sentry | ||
1511 | 295 | relation = ['amqp', 'rabbitmq-server:amqp'] | ||
1512 | 296 | expected = { | ||
1513 | 297 | 'username': 'nova', | ||
1514 | 298 | 'private-address': u.valid_ip, | ||
1515 | 299 | 'vhost': 'openstack' | ||
1516 | 300 | } | ||
1517 | 301 | |||
1518 | 302 | ret = u.validate_relation_data(unit, relation, expected) | ||
1519 | 303 | if ret: | ||
1520 | 304 | message = u.relation_error('nova-cc amqp', ret) | ||
1521 | 305 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1522 | 306 | |||
1523 | 307 | def test_rabbitmq_amqp_relation(self): | ||
1524 | 308 | """Verify the rabbitmq-server to nova-cc amqp relation data""" | ||
1525 | 309 | unit = self.rabbitmq_sentry | ||
1526 | 310 | relation = ['amqp', 'nova-cloud-controller:amqp'] | ||
1527 | 311 | expected = { | ||
1528 | 312 | 'private-address': u.valid_ip, | ||
1529 | 313 | 'password': u.not_null, | ||
1530 | 314 | 'hostname': u.valid_ip | ||
1531 | 315 | } | ||
1532 | 316 | |||
1533 | 317 | ret = u.validate_relation_data(unit, relation, expected) | ||
1534 | 318 | if ret: | ||
1535 | 319 | message = u.relation_error('rabbitmq amqp', ret) | ||
1536 | 320 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1537 | 321 | |||
1538 | 322 | def test_nova_cc_cloud_compute_relation(self): | ||
1539 | 323 | """Verify the nova-cc to nova-compute cloud-compute relation data""" | ||
1540 | 324 | unit = self.nova_cc_sentry | ||
1541 | 325 | relation = ['cloud-compute', 'nova-compute:cloud-compute'] | ||
1542 | 326 | expected = { | ||
1543 | 327 | 'volume_service': 'cinder', | ||
1544 | 328 | 'network_manager': 'flatdhcpmanager', | ||
1545 | 329 | 'ec2_host': u.valid_ip, | ||
1546 | 330 | 'private-address': u.valid_ip, | ||
1547 | 331 | 'restart_trigger': u.not_null | ||
1548 | 332 | } | ||
1549 | 333 | if self._get_openstack_release() == self.precise_essex: | ||
1550 | 334 | expected['volume_service'] = 'nova-volume' | ||
1551 | 335 | |||
1552 | 336 | ret = u.validate_relation_data(unit, relation, expected) | ||
1553 | 337 | if ret: | ||
1554 | 338 | message = u.relation_error('nova-cc cloud-compute', ret) | ||
1555 | 339 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1556 | 340 | |||
1557 | 341 | def test_nova_cloud_compute_relation(self): | ||
1558 | 342 | """Verify the nova-compute to nova-cc cloud-compute relation data""" | ||
1559 | 343 | unit = self.nova_compute_sentry | ||
1560 | 344 | relation = ['cloud-compute', 'nova-cloud-controller:cloud-compute'] | ||
1561 | 345 | expected = { | ||
1562 | 346 | 'private-address': u.valid_ip, | ||
1563 | 347 | } | ||
1564 | 348 | |||
1565 | 349 | ret = u.validate_relation_data(unit, relation, expected) | ||
1566 | 350 | if ret: | ||
1567 | 351 | message = u.relation_error('nova-compute cloud-compute', ret) | ||
1568 | 352 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1569 | 353 | |||
1570 | 354 | def test_nova_cc_image_service_relation(self): | ||
1571 | 355 | """Verify the nova-cc to glance image-service relation data""" | ||
1572 | 356 | unit = self.nova_cc_sentry | ||
1573 | 357 | relation = ['image-service', 'glance:image-service'] | ||
1574 | 358 | expected = { | ||
1575 | 359 | 'private-address': u.valid_ip, | ||
1576 | 360 | } | ||
1577 | 361 | |||
1578 | 362 | ret = u.validate_relation_data(unit, relation, expected) | ||
1579 | 363 | if ret: | ||
1580 | 364 | message = u.relation_error('nova-cc image-service', ret) | ||
1581 | 365 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1582 | 366 | |||
1583 | 367 | def test_glance_image_service_relation(self): | ||
1584 | 368 | """Verify the glance to nova-cc image-service relation data""" | ||
1585 | 369 | unit = self.glance_sentry | ||
1586 | 370 | relation = ['image-service', 'nova-cloud-controller:image-service'] | ||
1587 | 371 | expected = { | ||
1588 | 372 | 'private-address': u.valid_ip, | ||
1589 | 373 | 'glance-api-server': u.valid_url | ||
1590 | 374 | } | ||
1591 | 375 | |||
1592 | 376 | ret = u.validate_relation_data(unit, relation, expected) | ||
1593 | 377 | if ret: | ||
1594 | 378 | message = u.relation_error('glance image-service', ret) | ||
1595 | 379 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1596 | 380 | |||
1597 | 381 | def test_restart_on_config_change(self): | ||
1598 | 382 | """Verify that the specified services are restarted when the config | ||
1599 | 383 | is changed.""" | ||
1600 | 384 | # NOTE(coreycb): Skipping failing test on essex until resolved. | ||
1601 | 385 | # config-flags don't take effect on essex. | ||
1602 | 386 | if self._get_openstack_release() == self.precise_essex: | ||
1603 | 387 | u.log.error("Skipping failing test until resolved") | ||
1604 | 388 | return | ||
1605 | 389 | |||
1606 | 390 | services = ['nova-api-ec2', 'nova-api-os-compute', 'nova-objectstore', | ||
1607 | 391 | 'nova-cert', 'nova-scheduler', 'nova-conductor'] | ||
1608 | 392 | self.d.configure('nova-cloud-controller', | ||
1609 | 393 | {'config-flags': 'quota_cores=20,quota_instances=40,quota_ram=102400'}) | ||
1610 | 394 | pgrep_full = True | ||
1611 | 395 | |||
1612 | 396 | time = 20 | ||
1613 | 397 | conf = '/etc/nova/nova.conf' | ||
1614 | 398 | for s in services: | ||
1615 | 399 | if not u.service_restarted(self.nova_cc_sentry, s, conf, | ||
1616 | 400 | pgrep_full=True, sleep_time=time): | ||
1617 | 401 | msg = "service {} didn't restart after config change".format(s) | ||
1618 | 402 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1619 | 403 | time = 0 | ||
1620 | 404 | |||
1621 | 405 | def test_nova_default_config(self): | ||
1622 | 406 | """Verify the data in the nova config file's default section.""" | ||
1623 | 407 | # NOTE(coreycb): Currently no way to test on essex because config file | ||
1624 | 408 | # has no section headers. | ||
1625 | 409 | if self._get_openstack_release() == self.precise_essex: | ||
1626 | 410 | return | ||
1627 | 411 | |||
1628 | 412 | unit = self.nova_cc_sentry | ||
1629 | 413 | conf = '/etc/nova/nova.conf' | ||
1630 | 414 | rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', | ||
1631 | 415 | 'nova-cloud-controller:amqp') | ||
1632 | 416 | glance_relation = self.glance_sentry.relation('image-service', | ||
1633 | 417 | 'nova-cloud-controller:image-service') | ||
1634 | 418 | mysql_relation = self.mysql_sentry.relation('shared-db', | ||
1635 | 419 | 'nova-cloud-controller:shared-db') | ||
1636 | 420 | db_uri = "mysql://{}:{}@{}/{}".format('nova', | ||
1637 | 421 | mysql_relation['nova_password'], | ||
1638 | 422 | mysql_relation['db_host'], | ||
1639 | 423 | 'nova') | ||
1640 | 424 | keystone_ep = self.keystone_demo.service_catalog.url_for(\ | ||
1641 | 425 | service_type='identity', | ||
1642 | 426 | endpoint_type='publicURL') | ||
1643 | 427 | keystone_ec2 = "{}/ec2tokens".format(keystone_ep) | ||
1644 | 428 | |||
1645 | 429 | expected = {'dhcpbridge_flagfile': '/etc/nova/nova.conf', | ||
1646 | 430 | 'dhcpbridge': '/usr/bin/nova-dhcpbridge', | ||
1647 | 431 | 'logdir': '/var/log/nova', | ||
1648 | 432 | 'state_path': '/var/lib/nova', | ||
1649 | 433 | 'lock_path': '/var/lock/nova', | ||
1650 | 434 | 'force_dhcp_release': 'True', | ||
1651 | 435 | 'iscsi_helper': 'tgtadm', | ||
1652 | 436 | 'libvirt_use_virtio_for_bridges': 'True', | ||
1653 | 437 | 'connection_type': 'libvirt', | ||
1654 | 438 | 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', | ||
1655 | 439 | 'verbose': 'True', | ||
1656 | 440 | 'ec2_private_dns_show_ip': 'True', | ||
1657 | 441 | 'api_paste_config': '/etc/nova/api-paste.ini', | ||
1658 | 442 | 'volumes_path': '/var/lib/nova/volumes', | ||
1659 | 443 | 'enabled_apis': 'ec2,osapi_compute,metadata', | ||
1660 | 444 | 'auth_strategy': 'keystone', | ||
1661 | 445 | 'compute_driver': 'libvirt.LibvirtDriver', | ||
1662 | 446 | 'keystone_ec2_url': keystone_ec2, | ||
1663 | 447 | 'sql_connection': db_uri, | ||
1664 | 448 | 'rabbit_userid': 'nova', | ||
1665 | 449 | 'rabbit_virtual_host': 'openstack', | ||
1666 | 450 | 'rabbit_password': rabbitmq_relation['password'], | ||
1667 | 451 | 'rabbit_host': rabbitmq_relation['hostname'], | ||
1668 | 452 | 'glance_api_servers': glance_relation['glance-api-server'], | ||
1669 | 453 | 'network_manager': 'nova.network.manager.FlatDHCPManager', | ||
1670 | 454 | 's3_listen_port': '3333', | ||
1671 | 455 | 'osapi_compute_listen_port': '8774', | ||
1672 | 456 | 'ec2_listen_port': '8773'} | ||
1673 | 457 | |||
1674 | 458 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) | ||
1675 | 459 | if ret: | ||
1676 | 460 | message = "nova config error: {}".format(ret) | ||
1677 | 461 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1678 | 462 | |||
1679 | 463 | |||
1680 | 464 | def test_nova_keystone_authtoken_config(self): | ||
1681 | 465 | """Verify the data in the nova config file's keystone_authtoken | ||
1682 | 466 | section. This data only exists since icehouse.""" | ||
1683 | 467 | if self._get_openstack_release() < self.precise_icehouse: | ||
1684 | 468 | return | ||
1685 | 469 | |||
1686 | 470 | unit = self.nova_cc_sentry | ||
1687 | 471 | conf = '/etc/nova/nova.conf' | ||
1688 | 472 | keystone_relation = self.keystone_sentry.relation('identity-service', | ||
1689 | 473 | 'nova-cloud-controller:identity-service') | ||
1690 | 474 | keystone_uri = "http://{}:{}/".format(keystone_relation['service_host'], | ||
1691 | 475 | keystone_relation['service_port']) | ||
1692 | 476 | expected = {'auth_uri': keystone_uri, | ||
1693 | 477 | 'auth_host': keystone_relation['service_host'], | ||
1694 | 478 | 'auth_port': keystone_relation['auth_port'], | ||
1695 | 479 | 'auth_protocol': keystone_relation['auth_protocol'], | ||
1696 | 480 | 'admin_tenant_name': keystone_relation['service_tenant'], | ||
1697 | 481 | 'admin_user': keystone_relation['service_username'], | ||
1698 | 482 | 'admin_password': keystone_relation['service_password']} | ||
1699 | 483 | |||
1700 | 484 | ret = u.validate_config_data(unit, conf, 'keystone_authtoken', expected) | ||
1701 | 485 | if ret: | ||
1702 | 486 | message = "nova config error: {}".format(ret) | ||
1703 | 487 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1704 | 488 | |||
1705 | 489 | def test_image_instance_create(self): | ||
1706 | 490 | """Create an image/instance, verify they exist, and delete them.""" | ||
1707 | 491 | # NOTE(coreycb): Skipping failing test on essex until resolved. essex | ||
1708 | 492 | # nova API calls are getting "Malformed request url (HTTP | ||
1709 | 493 | # 400)". | ||
1710 | 494 | if self._get_openstack_release() == self.precise_essex: | ||
1711 | 495 | u.log.error("Skipping failing test until resolved") | ||
1712 | 496 | return | ||
1713 | 497 | |||
1714 | 498 | image = u.create_cirros_image(self.glance, "cirros-image") | ||
1715 | 499 | if not image: | ||
1716 | 500 | amulet.raise_status(amulet.FAIL, msg="Image create failed") | ||
1717 | 501 | |||
1718 | 502 | instance = u.create_instance(self.nova_demo, "cirros-image", "cirros", | ||
1719 | 503 | "m1.tiny") | ||
1720 | 504 | if not instance: | ||
1721 | 505 | amulet.raise_status(amulet.FAIL, msg="Instance create failed") | ||
1722 | 506 | |||
1723 | 507 | found = False | ||
1724 | 508 | for instance in self.nova_demo.servers.list(): | ||
1725 | 509 | if instance.name == 'cirros': | ||
1726 | 510 | found = True | ||
1727 | 511 | if instance.status != 'ACTIVE': | ||
1728 | 512 | msg = "cirros instance is not active" | ||
1729 | 513 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1730 | 514 | |||
1731 | 515 | if not found: | ||
1732 | 516 | message = "nova cirros instance does not exist" | ||
1733 | 517 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1734 | 518 | |||
1735 | 519 | u.delete_image(self.glance, image) | ||
1736 | 520 | u.delete_instance(self.nova_demo, instance) | ||
1737 | 0 | 521 | ||
1738 | === added directory 'tests/charmhelpers' | |||
1739 | === added file 'tests/charmhelpers/__init__.py' | |||
1740 | === added directory 'tests/charmhelpers/contrib' | |||
1741 | === added file 'tests/charmhelpers/contrib/__init__.py' | |||
1742 | === added directory 'tests/charmhelpers/contrib/amulet' | |||
1743 | === added file 'tests/charmhelpers/contrib/amulet/__init__.py' | |||
1744 | === added file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
1745 | --- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1746 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-11 17:34:59 +0000 | |||
1747 | @@ -0,0 +1,63 @@ | |||
1748 | 1 | import amulet | ||
1749 | 2 | import re | ||
1750 | 3 | |||
1751 | 4 | |||
1752 | 5 | class AmuletDeployment(object): | ||
1753 | 6 | """This class provides generic Amulet deployment and test runner | ||
1754 | 7 | methods.""" | ||
1755 | 8 | |||
1756 | 9 | def __init__(self, series): | ||
1757 | 10 | """Initialize the deployment environment.""" | ||
1758 | 11 | self.series = series | ||
1759 | 12 | self.d = amulet.Deployment(series=self.series) | ||
1760 | 13 | |||
1761 | 14 | def _get_charm_name(self, service_name): | ||
1762 | 15 | """Gets the charm name from the service name. Unique service names can | ||
1763 | 16 | be specified with a '-service#' suffix (e.g. mysql-service1).""" | ||
1764 | 17 | if re.match(r"^.*-service\d{1,3}$", service_name): | ||
1765 | 18 | charm_name = re.sub('\-service\d{1,3}$', '', service_name) | ||
1766 | 19 | else: | ||
1767 | 20 | charm_name = service_name | ||
1768 | 21 | return charm_name | ||
1769 | 22 | |||
1770 | 23 | def _add_services(self, this_service, other_services): | ||
1771 | 24 | """Add services to the deployment where this_service is the local charm | ||
1772 | 25 | that we're focused on testing and other_services are the other | ||
1773 | 26 | charms that come from the charm store.""" | ||
1774 | 27 | name, units = range(2) | ||
1775 | 28 | |||
1776 | 29 | charm_name = self._get_charm_name(this_service[name]) | ||
1777 | 30 | self.d.add(this_service[name], | ||
1778 | 31 | units=this_service[units]) | ||
1779 | 32 | |||
1780 | 33 | for svc in other_services: | ||
1781 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
1782 | 35 | self.d.add(svc[name], | ||
1783 | 36 | charm='cs:{}/{}'.format(self.series, charm_name), | ||
1784 | 37 | units=svc[units]) | ||
1785 | 38 | |||
1786 | 39 | def _add_relations(self, relations): | ||
1787 | 40 | """Add all of the relations for the services.""" | ||
1788 | 41 | for k, v in relations.iteritems(): | ||
1789 | 42 | self.d.relate(k, v) | ||
1790 | 43 | |||
1791 | 44 | def _configure_services(self, configs): | ||
1792 | 45 | """Configure all of the services.""" | ||
1793 | 46 | for service, config in configs.iteritems(): | ||
1794 | 47 | self.d.configure(service, config) | ||
1795 | 48 | |||
1796 | 49 | def _deploy(self): | ||
1797 | 50 | """Deploy environment and wait for all hooks to finish executing.""" | ||
1798 | 51 | try: | ||
1799 | 52 | self.d.setup() | ||
1800 | 53 | self.d.sentry.wait() | ||
1801 | 54 | except amulet.helpers.TimeoutError: | ||
1802 | 55 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | ||
1803 | 56 | except: | ||
1804 | 57 | raise | ||
1805 | 58 | |||
1806 | 59 | def run_tests(self): | ||
1807 | 60 | """Run all of the methods that are prefixed with 'test_'.""" | ||
1808 | 61 | for test in dir(self): | ||
1809 | 62 | if test.startswith('test_'): | ||
1810 | 63 | getattr(self, test)() | ||
1811 | 0 | 64 | ||
1812 | === added file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
1813 | --- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
1814 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-11 17:34:59 +0000 | |||
1815 | @@ -0,0 +1,157 @@ | |||
1816 | 1 | import ConfigParser | ||
1817 | 2 | import io | ||
1818 | 3 | import logging | ||
1819 | 4 | import re | ||
1820 | 5 | import sys | ||
1821 | 6 | from time import sleep | ||
1822 | 7 | |||
1823 | 8 | |||
1824 | 9 | class AmuletUtils(object): | ||
1825 | 10 | """This class provides common utility functions that are used by Amulet | ||
1826 | 11 | tests.""" | ||
1827 | 12 | |||
1828 | 13 | def __init__(self, log_level=logging.ERROR): | ||
1829 | 14 | self.log = self.get_logger(level=log_level) | ||
1830 | 15 | |||
1831 | 16 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | ||
1832 | 17 | """Get a logger object that will log to stdout.""" | ||
1833 | 18 | log = logging | ||
1834 | 19 | logger = log.getLogger(name) | ||
1835 | 20 | fmt = \ | ||
1836 | 21 | log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") | ||
1837 | 22 | |||
1838 | 23 | handler = log.StreamHandler(stream=sys.stdout) | ||
1839 | 24 | handler.setLevel(level) | ||
1840 | 25 | handler.setFormatter(fmt) | ||
1841 | 26 | |||
1842 | 27 | logger.addHandler(handler) | ||
1843 | 28 | logger.setLevel(level) | ||
1844 | 29 | |||
1845 | 30 | return logger | ||
1846 | 31 | |||
1847 | 32 | def valid_ip(self, ip): | ||
1848 | 33 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): | ||
1849 | 34 | return True | ||
1850 | 35 | else: | ||
1851 | 36 | return False | ||
1852 | 37 | |||
1853 | 38 | def valid_url(self, url): | ||
1854 | 39 | p = re.compile( | ||
1855 | 40 | r'^(?:http|ftp)s?://' | ||
1856 | 41 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa | ||
1857 | 42 | r'localhost|' | ||
1858 | 43 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | ||
1859 | 44 | r'(?::\d+)?' | ||
1860 | 45 | r'(?:/?|[/?]\S+)$', | ||
1861 | 46 | re.IGNORECASE) | ||
1862 | 47 | if p.match(url): | ||
1863 | 48 | return True | ||
1864 | 49 | else: | ||
1865 | 50 | return False | ||
1866 | 51 | |||
1867 | 52 | def validate_services(self, commands): | ||
1868 | 53 | """Verify the specified services are running on the corresponding | ||
1869 | 54 | service units.""" | ||
1870 | 55 | for k, v in commands.iteritems(): | ||
1871 | 56 | for cmd in v: | ||
1872 | 57 | output, code = k.run(cmd) | ||
1873 | 58 | if code != 0: | ||
1874 | 59 | return "command `{}` returned {}".format(cmd, str(code)) | ||
1875 | 60 | return None | ||
1876 | 61 | |||
1877 | 62 | def _get_config(self, unit, filename): | ||
1878 | 63 | """Get a ConfigParser object for parsing a unit's config file.""" | ||
1879 | 64 | file_contents = unit.file_contents(filename) | ||
1880 | 65 | config = ConfigParser.ConfigParser() | ||
1881 | 66 | config.readfp(io.StringIO(file_contents)) | ||
1882 | 67 | return config | ||
1883 | 68 | |||
1884 | 69 | def validate_config_data(self, sentry_unit, config_file, section, expected): | ||
1885 | 70 | """Verify that the specified section of the config file contains | ||
1886 | 71 | the expected option key:value pairs.""" | ||
1887 | 72 | config = self._get_config(sentry_unit, config_file) | ||
1888 | 73 | |||
1889 | 74 | if section != 'DEFAULT' and not config.has_section(section): | ||
1890 | 75 | return "section [{}] does not exist".format(section) | ||
1891 | 76 | |||
1892 | 77 | for k in expected.keys(): | ||
1893 | 78 | if not config.has_option(section, k): | ||
1894 | 79 | return "section [{}] is missing option {}".format(section, k) | ||
1895 | 80 | if config.get(section, k) != expected[k]: | ||
1896 | 81 | return "section [{}] {}:{} != expected {}:{}".format(section, | ||
1897 | 82 | k, config.get(section, k), k, expected[k]) | ||
1898 | 83 | return None | ||
1899 | 84 | |||
1900 | 85 | def _validate_dict_data(self, expected, actual): | ||
1901 | 86 | """Compare expected dictionary data vs actual dictionary data. | ||
1902 | 87 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
1903 | 88 | longs, or can be a function that evaluate a variable and returns a | ||
1904 | 89 | bool.""" | ||
1905 | 90 | for k, v in expected.iteritems(): | ||
1906 | 91 | if k in actual: | ||
1907 | 92 | if isinstance(v, basestring) or \ | ||
1908 | 93 | isinstance(v, bool) or \ | ||
1909 | 94 | isinstance(v, (int, long)): | ||
1910 | 95 | if v != actual[k]: | ||
1911 | 96 | return "{}:{}".format(k, actual[k]) | ||
1912 | 97 | elif not v(actual[k]): | ||
1913 | 98 | return "{}:{}".format(k, actual[k]) | ||
1914 | 99 | else: | ||
1915 | 100 | return "key '{}' does not exist".format(k) | ||
1916 | 101 | return None | ||
1917 | 102 | |||
1918 | 103 | def validate_relation_data(self, sentry_unit, relation, expected): | ||
1919 | 104 | """Validate actual relation data based on expected relation data.""" | ||
1920 | 105 | actual = sentry_unit.relation(relation[0], relation[1]) | ||
1921 | 106 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1922 | 107 | return self._validate_dict_data(expected, actual) | ||
1923 | 108 | |||
1924 | 109 | def _validate_list_data(self, expected, actual): | ||
1925 | 110 | """Compare expected list vs actual list data.""" | ||
1926 | 111 | for e in expected: | ||
1927 | 112 | if e not in actual: | ||
1928 | 113 | return "expected item {} not found in actual list".format(e) | ||
1929 | 114 | return None | ||
1930 | 115 | |||
1931 | 116 | def not_null(self, string): | ||
1932 | 117 | if string != None: | ||
1933 | 118 | return True | ||
1934 | 119 | else: | ||
1935 | 120 | return False | ||
1936 | 121 | |||
1937 | 122 | def _get_file_mtime(self, sentry_unit, filename): | ||
1938 | 123 | """Get last modification time of file.""" | ||
1939 | 124 | return sentry_unit.file_stat(filename)['mtime'] | ||
1940 | 125 | |||
1941 | 126 | def _get_dir_mtime(self, sentry_unit, directory): | ||
1942 | 127 | """Get last modification time of directory.""" | ||
1943 | 128 | return sentry_unit.directory_stat(directory)['mtime'] | ||
1944 | 129 | |||
1945 | 130 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | ||
1946 | 131 | """Determine start time of the process based on the last modification | ||
1947 | 132 | time of the /proc/pid directory. If pgrep_full is True, the process | ||
1948 | 133 | name is matched against the full command line.""" | ||
1949 | 134 | if pgrep_full: | ||
1950 | 135 | cmd = 'pgrep -o -f {}'.format(service) | ||
1951 | 136 | else: | ||
1952 | 137 | cmd = 'pgrep -o {}'.format(service) | ||
1953 | 138 | proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) | ||
1954 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
1955 | 140 | |||
1956 | 141 | def service_restarted(self, sentry_unit, service, filename, | ||
1957 | 142 | pgrep_full=False, sleep_time=20): | ||
1958 | 143 | """Compare a service's start time vs a file's last modification time | ||
1959 | 144 | (such as a config file for that service) to determine if the service | ||
1960 | 145 | has been restarted.""" | ||
1961 | 146 | sleep(sleep_time) | ||
1962 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | ||
1963 | 148 | self._get_file_mtime(sentry_unit, filename): | ||
1964 | 149 | return True | ||
1965 | 150 | else: | ||
1966 | 151 | return False | ||
1967 | 152 | |||
1968 | 153 | def relation_error(self, name, data): | ||
1969 | 154 | return 'unexpected relation data in {} - {}'.format(name, data) | ||
1970 | 155 | |||
1971 | 156 | def endpoint_error(self, name, data): | ||
1972 | 157 | return 'unexpected endpoint data in {} - {}'.format(name, data) | ||
1973 | 0 | 158 | ||
1974 | === added directory 'tests/charmhelpers/contrib/openstack' | |||
1975 | === added file 'tests/charmhelpers/contrib/openstack/__init__.py' | |||
1976 | === added directory 'tests/charmhelpers/contrib/openstack/amulet' | |||
1977 | === added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
1978 | === added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
1979 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1980 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 17:34:59 +0000 | |||
1981 | @@ -0,0 +1,57 @@ | |||
1982 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
1983 | 2 | AmuletDeployment | ||
1984 | 3 | ) | ||
1985 | 4 | |||
1986 | 5 | |||
1987 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
1988 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
1989 | 8 | that is specifically for use by OpenStack charms.""" | ||
1990 | 9 | |||
1991 | 10 | def __init__(self, series, openstack=None, source=None): | ||
1992 | 11 | """Initialize the deployment environment.""" | ||
1993 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
1994 | 13 | self.openstack = openstack | ||
1995 | 14 | self.source = source | ||
1996 | 15 | |||
1997 | 16 | def _add_services(self, this_service, other_services): | ||
1998 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
1999 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
2000 | 19 | other_services) | ||
2001 | 20 | name = 0 | ||
2002 | 21 | services = other_services | ||
2003 | 22 | services.append(this_service) | ||
2004 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
2005 | 24 | |||
2006 | 25 | if self.openstack: | ||
2007 | 26 | for svc in services: | ||
2008 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
2009 | 28 | if charm_name not in use_source: | ||
2010 | 29 | config = {'openstack-origin': self.openstack} | ||
2011 | 30 | self.d.configure(svc[name], config) | ||
2012 | 31 | |||
2013 | 32 | if self.source: | ||
2014 | 33 | for svc in services: | ||
2015 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
2016 | 35 | if charm_name in use_source: | ||
2017 | 36 | config = {'source': self.source} | ||
2018 | 37 | self.d.configure(svc[name], config) | ||
2019 | 38 | |||
2020 | 39 | def _configure_services(self, configs): | ||
2021 | 40 | """Configure all of the services.""" | ||
2022 | 41 | for service, config in configs.iteritems(): | ||
2023 | 42 | self.d.configure(service, config) | ||
2024 | 43 | |||
2025 | 44 | def _get_openstack_release(self): | ||
2026 | 45 | """Return an integer representing the enum value of the openstack | ||
2027 | 46 | release.""" | ||
2028 | 47 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
2029 | 48 | self.precise_havana, self.precise_icehouse, \ | ||
2030 | 49 | self.trusty_icehouse = range(6) | ||
2031 | 50 | releases = { | ||
2032 | 51 | ('precise', None): self.precise_essex, | ||
2033 | 52 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
2034 | 53 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
2035 | 54 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
2036 | 55 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
2037 | 56 | ('trusty', None): self.trusty_icehouse} | ||
2038 | 57 | return releases[(self.series, self.openstack)] | ||
2039 | 0 | 58 | ||
2040 | === added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
2041 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
2042 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 17:34:59 +0000 | |||
2043 | @@ -0,0 +1,253 @@ | |||
2044 | 1 | import logging | ||
2045 | 2 | import os | ||
2046 | 3 | import time | ||
2047 | 4 | import urllib | ||
2048 | 5 | |||
2049 | 6 | import glanceclient.v1.client as glance_client | ||
2050 | 7 | import keystoneclient.v2_0 as keystone_client | ||
2051 | 8 | import novaclient.v1_1.client as nova_client | ||
2052 | 9 | |||
2053 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
2054 | 11 | AmuletUtils | ||
2055 | 12 | ) | ||
2056 | 13 | |||
2057 | 14 | DEBUG = logging.DEBUG | ||
2058 | 15 | ERROR = logging.ERROR | ||
2059 | 16 | |||
2060 | 17 | |||
2061 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
2062 | 19 | """This class inherits from AmuletUtils and has additional support | ||
2063 | 20 | that is specifically for use by OpenStack charms.""" | ||
2064 | 21 | |||
2065 | 22 | def __init__(self, log_level=ERROR): | ||
2066 | 23 | """Initialize the deployment environment.""" | ||
2067 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
2068 | 25 | |||
2069 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
2070 | 27 | public_port, expected): | ||
2071 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
2072 | 29 | are used to find the matching endpoint.""" | ||
2073 | 30 | found = False | ||
2074 | 31 | for ep in endpoints: | ||
2075 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
2076 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
2077 | 34 | and public_port in ep.publicurl: | ||
2078 | 35 | found = True | ||
2079 | 36 | actual = {'id': ep.id, | ||
2080 | 37 | 'region': ep.region, | ||
2081 | 38 | 'adminurl': ep.adminurl, | ||
2082 | 39 | 'internalurl': ep.internalurl, | ||
2083 | 40 | 'publicurl': ep.publicurl, | ||
2084 | 41 | 'service_id': ep.service_id} | ||
2085 | 42 | ret = self._validate_dict_data(expected, actual) | ||
2086 | 43 | if ret: | ||
2087 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
2088 | 45 | |||
2089 | 46 | if not found: | ||
2090 | 47 | return 'endpoint not found' | ||
2091 | 48 | |||
2092 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
2093 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
2094 | 51 | expected service catalog endpoints.""" | ||
2095 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
2096 | 53 | for k, v in expected.iteritems(): | ||
2097 | 54 | if k in actual: | ||
2098 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
2099 | 56 | if ret: | ||
2100 | 57 | return self.endpoint_error(k, ret) | ||
2101 | 58 | else: | ||
2102 | 59 | return "endpoint {} does not exist".format(k) | ||
2103 | 60 | return ret | ||
2104 | 61 | |||
2105 | 62 | def validate_tenant_data(self, expected, actual): | ||
2106 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
2107 | 64 | data.""" | ||
2108 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
2109 | 66 | for e in expected: | ||
2110 | 67 | found = False | ||
2111 | 68 | for act in actual: | ||
2112 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
2113 | 70 | 'name': act.name, 'id': act.id} | ||
2114 | 71 | if e['name'] == a['name']: | ||
2115 | 72 | found = True | ||
2116 | 73 | ret = self._validate_dict_data(e, a) | ||
2117 | 74 | if ret: | ||
2118 | 75 | return "unexpected tenant data - {}".format(ret) | ||
2119 | 76 | if not found: | ||
2120 | 77 | return "tenant {} does not exist".format(e['name']) | ||
2121 | 78 | return ret | ||
2122 | 79 | |||
2123 | 80 | def validate_role_data(self, expected, actual): | ||
2124 | 81 | """Validate a list of actual role data vs a list of expected role | ||
2125 | 82 | data.""" | ||
2126 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
2127 | 84 | for e in expected: | ||
2128 | 85 | found = False | ||
2129 | 86 | for act in actual: | ||
2130 | 87 | a = {'name': act.name, 'id': act.id} | ||
2131 | 88 | if e['name'] == a['name']: | ||
2132 | 89 | found = True | ||
2133 | 90 | ret = self._validate_dict_data(e, a) | ||
2134 | 91 | if ret: | ||
2135 | 92 | return "unexpected role data - {}".format(ret) | ||
2136 | 93 | if not found: | ||
2137 | 94 | return "role {} does not exist".format(e['name']) | ||
2138 | 95 | return ret | ||
2139 | 96 | |||
2140 | 97 | def validate_user_data(self, expected, actual): | ||
2141 | 98 | """Validate a list of actual user data vs a list of expected user | ||
2142 | 99 | data.""" | ||
2143 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
2144 | 101 | for e in expected: | ||
2145 | 102 | found = False | ||
2146 | 103 | for act in actual: | ||
2147 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
2148 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
2149 | 106 | 'id': act.id} | ||
2150 | 107 | if e['name'] == a['name']: | ||
2151 | 108 | found = True | ||
2152 | 109 | ret = self._validate_dict_data(e, a) | ||
2153 | 110 | if ret: | ||
2154 | 111 | return "unexpected user data - {}".format(ret) | ||
2155 | 112 | if not found: | ||
2156 | 113 | return "user {} does not exist".format(e['name']) | ||
2157 | 114 | return ret | ||
2158 | 115 | |||
2159 | 116 | def validate_flavor_data(self, expected, actual): | ||
2160 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
2161 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
2162 | 119 | act = [a.name for a in actual] | ||
2163 | 120 | return self._validate_list_data(expected, act) | ||
2164 | 121 | |||
2165 | 122 | def tenant_exists(self, keystone, tenant): | ||
2166 | 123 | """Return True if tenant exists""" | ||
2167 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
2168 | 125 | |||
2169 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
2170 | 127 | tenant): | ||
2171 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
2172 | 129 | service_ip = \ | ||
2173 | 130 | keystone_sentry.relation('shared-db', | ||
2174 | 131 | 'mysql:shared-db')['private-address'] | ||
2175 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
2176 | 133 | return keystone_client.Client(username=user, password=password, | ||
2177 | 134 | tenant_name=tenant, auth_url=ep) | ||
2178 | 135 | |||
2179 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
2180 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
2181 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
2182 | 139 | endpoint_type='publicURL') | ||
2183 | 140 | return keystone_client.Client(username=user, password=password, | ||
2184 | 141 | tenant_name=tenant, auth_url=ep) | ||
2185 | 142 | |||
2186 | 143 | def authenticate_glance_admin(self, keystone): | ||
2187 | 144 | """Authenticates admin user with glance.""" | ||
2188 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
2189 | 146 | endpoint_type='adminURL') | ||
2190 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
2191 | 148 | |||
2192 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
2193 | 150 | """Authenticates a regular user with nova-api.""" | ||
2194 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
2195 | 152 | endpoint_type='publicURL') | ||
2196 | 153 | return nova_client.Client(username=user, api_key=password, | ||
2197 | 154 | project_id=tenant, auth_url=ep) | ||
2198 | 155 | |||
2199 | 156 | def create_cirros_image(self, glance, image_name): | ||
2200 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
2201 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
2202 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
2203 | 160 | if http_proxy: | ||
2204 | 161 | proxies = {'http': http_proxy} | ||
2205 | 162 | opener = urllib.FancyURLopener(proxies) | ||
2206 | 163 | else: | ||
2207 | 164 | opener = urllib.FancyURLopener() | ||
2208 | 165 | |||
2209 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
2210 | 167 | version = f.read().strip() | ||
2211 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
2212 | 169 | |||
2213 | 170 | if not os.path.exists(cirros_img): | ||
2214 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
2215 | 172 | version, cirros_img) | ||
2216 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
2217 | 174 | f.close() | ||
2218 | 175 | |||
2219 | 176 | with open(cirros_img) as f: | ||
2220 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
2221 | 178 | disk_format='qcow2', | ||
2222 | 179 | container_format='bare', data=f) | ||
2223 | 180 | count = 1 | ||
2224 | 181 | status = image.status | ||
2225 | 182 | while status != 'active' and count < 10: | ||
2226 | 183 | time.sleep(3) | ||
2227 | 184 | image = glance.images.get(image.id) | ||
2228 | 185 | status = image.status | ||
2229 | 186 | self.log.debug('image status: {}'.format(status)) | ||
2230 | 187 | count += 1 | ||
2231 | 188 | |||
2232 | 189 | if status != 'active': | ||
2233 | 190 | self.log.error('image creation timed out') | ||
2234 | 191 | return None | ||
2235 | 192 | |||
2236 | 193 | return image | ||
2237 | 194 | |||
2238 | 195 | def delete_image(self, glance, image): | ||
2239 | 196 | """Delete the specified image.""" | ||
2240 | 197 | num_before = len(list(glance.images.list())) | ||
2241 | 198 | glance.images.delete(image) | ||
2242 | 199 | |||
2243 | 200 | count = 1 | ||
2244 | 201 | num_after = len(list(glance.images.list())) | ||
2245 | 202 | while num_after != (num_before - 1) and count < 10: | ||
2246 | 203 | time.sleep(3) | ||
2247 | 204 | num_after = len(list(glance.images.list())) | ||
2248 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
2249 | 206 | count += 1 | ||
2250 | 207 | |||
2251 | 208 | if num_after != (num_before - 1): | ||
2252 | 209 | self.log.error('image deletion timed out') | ||
2253 | 210 | return False | ||
2254 | 211 | |||
2255 | 212 | return True | ||
2256 | 213 | |||
2257 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
2258 | 215 | """Create the specified instance.""" | ||
2259 | 216 | image = nova.images.find(name=image_name) | ||
2260 | 217 | flavor = nova.flavors.find(name=flavor) | ||
2261 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
2262 | 219 | flavor=flavor) | ||
2263 | 220 | |||
2264 | 221 | count = 1 | ||
2265 | 222 | status = instance.status | ||
2266 | 223 | while status != 'ACTIVE' and count < 60: | ||
2267 | 224 | time.sleep(3) | ||
2268 | 225 | instance = nova.servers.get(instance.id) | ||
2269 | 226 | status = instance.status | ||
2270 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
2271 | 228 | count += 1 | ||
2272 | 229 | |||
2273 | 230 | if status != 'ACTIVE': | ||
2274 | 231 | self.log.error('instance creation timed out') | ||
2275 | 232 | return None | ||
2276 | 233 | |||
2277 | 234 | return instance | ||
2278 | 235 | |||
2279 | 236 | def delete_instance(self, nova, instance): | ||
2280 | 237 | """Delete the specified instance.""" | ||
2281 | 238 | num_before = len(list(nova.servers.list())) | ||
2282 | 239 | nova.servers.delete(instance) | ||
2283 | 240 | |||
2284 | 241 | count = 1 | ||
2285 | 242 | num_after = len(list(nova.servers.list())) | ||
2286 | 243 | while num_after != (num_before - 1) and count < 10: | ||
2287 | 244 | time.sleep(3) | ||
2288 | 245 | num_after = len(list(nova.servers.list())) | ||
2289 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
2290 | 247 | count += 1 | ||
2291 | 248 | |||
2292 | 249 | if num_after != (num_before - 1): | ||
2293 | 250 | self.log.error('instance deletion timed out') | ||
2294 | 251 | return False | ||
2295 | 252 | |||
2296 | 253 | return True |
Looks good but some lint fixes are needed