Merge lp:~corey.bryant/charms/trusty/swift-storage/amulet-basic into lp:~openstack-charmers-archive/charms/trusty/swift-storage/next
- Trusty Tahr (14.04)
- amulet-basic
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 33 |
Proposed branch: | lp:~corey.bryant/charms/trusty/swift-storage/amulet-basic |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/swift-storage/next |
Diff against target: |
1989 lines (+1565/-77) 29 files modified
Makefile (+12/-4) charm-helpers-hooks.yaml (+11/-0) charm-helpers-tests.yaml (+5/-0) charm-helpers.yaml (+0/-11) hooks/charmhelpers/contrib/hahelpers/cluster.py (+1/-0) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0) hooks/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0) hooks/charmhelpers/contrib/openstack/context.py (+45/-13) hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0) hooks/charmhelpers/contrib/openstack/templating.py (+22/-23) hooks/charmhelpers/contrib/openstack/utils.py (+3/-1) hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1) hooks/charmhelpers/contrib/storage/linux/utils.py (+1/-0) hooks/charmhelpers/core/hookenv.py (+5/-4) hooks/charmhelpers/core/host.py (+7/-5) hooks/charmhelpers/fetch/__init__.py (+23/-15) tests/00-setup (+11/-0) tests/10-basic-precise-essex (+9/-0) tests/11-basic-precise-folsom (+11/-0) tests/12-basic-precise-grizzly (+11/-0) tests/13-basic-precise-havana (+11/-0) tests/14-basic-precise-icehouse (+11/-0) tests/15-basic-trusty-icehouse (+9/-0) tests/README (+52/-0) tests/basic_deployment.py (+450/-0) tests/charmhelpers/contrib/amulet/deployment.py (+63/-0) tests/charmhelpers/contrib/amulet/utils.py (+157/-0) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+57/-0) tests/charmhelpers/contrib/openstack/amulet/utils.py (+253/-0) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/swift-storage/amulet-basic |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Approve | ||
Review via email: mp+226492@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Revision history for this message
James Page (james-page) : | # |
review:
Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' | |||
2 | --- Makefile 2014-05-21 10:08:22 +0000 | |||
3 | +++ Makefile 2014-07-11 16:43:39 +0000 | |||
4 | @@ -3,15 +3,23 @@ | |||
5 | 3 | 3 | ||
6 | 4 | lint: | 4 | lint: |
7 | 5 | @flake8 --exclude hooks/charmhelpers hooks | 5 | @flake8 --exclude hooks/charmhelpers hooks |
9 | 6 | @flake8 --exclude hooks/charmhelpers unit_tests | 6 | @flake8 --exclude hooks/charmhelpers unit_tests tests |
10 | 7 | @charm proof | 7 | @charm proof |
11 | 8 | 8 | ||
12 | 9 | unit_test: | ||
13 | 10 | @echo Starting unit tests... | ||
14 | 11 | @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests | ||
15 | 12 | |||
16 | 9 | test: | 13 | test: |
19 | 10 | @echo Starting tests... | 14 | @echo Starting Amulet tests... |
20 | 11 | @$(PYTHON) /usr/bin/nosetests -v --nologcapture --with-coverage unit_tests | 15 | # coreycb note: The -v should only be temporary until Amulet sends |
21 | 16 | # raise_status() messages to stderr: | ||
22 | 17 | # https://bugs.launchpad.net/amulet/+bug/1320357 | ||
23 | 18 | @juju test -v -p AMULET_HTTP_PROXY | ||
24 | 12 | 19 | ||
25 | 13 | sync: | 20 | sync: |
27 | 14 | @charm-helper-sync -c charm-helpers.yaml | 21 | @charm-helper-sync -c charm-helpers-hooks.yaml |
28 | 22 | @charm-helper-sync -c charm-helpers-tests.yaml | ||
29 | 15 | 23 | ||
30 | 16 | publish: lint test | 24 | publish: lint test |
31 | 17 | bzr push lp:charms/swift-storage | 25 | bzr push lp:charms/swift-storage |
32 | 18 | 26 | ||
33 | === added file 'charm-helpers-hooks.yaml' | |||
34 | --- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000 | |||
35 | +++ charm-helpers-hooks.yaml 2014-07-11 16:43:39 +0000 | |||
36 | @@ -0,0 +1,11 @@ | |||
37 | 1 | branch: lp:charm-helpers | ||
38 | 2 | destination: hooks/charmhelpers | ||
39 | 3 | include: | ||
40 | 4 | - core | ||
41 | 5 | - contrib.openstack|inc=* | ||
42 | 6 | - contrib.storage | ||
43 | 7 | - fetch | ||
44 | 8 | - contrib.hahelpers: | ||
45 | 9 | - apache | ||
46 | 10 | - cluster | ||
47 | 11 | - payload.execd | ||
48 | 0 | 12 | ||
49 | === added file 'charm-helpers-tests.yaml' | |||
50 | --- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000 | |||
51 | +++ charm-helpers-tests.yaml 2014-07-11 16:43:39 +0000 | |||
52 | @@ -0,0 +1,5 @@ | |||
53 | 1 | branch: lp:charm-helpers | ||
54 | 2 | destination: tests/charmhelpers | ||
55 | 3 | include: | ||
56 | 4 | - contrib.amulet | ||
57 | 5 | - contrib.openstack.amulet | ||
58 | 0 | 6 | ||
59 | === removed file 'charm-helpers.yaml' | |||
60 | --- charm-helpers.yaml 2014-03-25 17:05:07 +0000 | |||
61 | +++ charm-helpers.yaml 1970-01-01 00:00:00 +0000 | |||
62 | @@ -1,11 +0,0 @@ | |||
63 | 1 | branch: lp:charm-helpers | ||
64 | 2 | destination: hooks/charmhelpers | ||
65 | 3 | include: | ||
66 | 4 | - core | ||
67 | 5 | - contrib.openstack|inc=* | ||
68 | 6 | - contrib.storage | ||
69 | 7 | - fetch | ||
70 | 8 | - contrib.hahelpers: | ||
71 | 9 | - apache | ||
72 | 10 | - cluster | ||
73 | 11 | - payload.execd | ||
74 | 12 | 0 | ||
75 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
76 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-02-24 17:52:34 +0000 | |||
77 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-11 16:43:39 +0000 | |||
78 | @@ -170,6 +170,7 @@ | |||
79 | 170 | 170 | ||
80 | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for | 171 | :configs : OSTemplateRenderer: A config tempating object to inspect for |
81 | 172 | a complete https context. | 172 | a complete https context. |
82 | 173 | |||
83 | 173 | :vip_setting: str: Setting in charm config that specifies | 174 | :vip_setting: str: Setting in charm config that specifies |
84 | 174 | VIP address. | 175 | VIP address. |
85 | 175 | ''' | 176 | ''' |
86 | 176 | 177 | ||
87 | === added directory 'hooks/charmhelpers/contrib/openstack/amulet' | |||
88 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
89 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
90 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
91 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 16:43:39 +0000 | |||
92 | @@ -0,0 +1,57 @@ | |||
93 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
94 | 2 | AmuletDeployment | ||
95 | 3 | ) | ||
96 | 4 | |||
97 | 5 | |||
98 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
99 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
100 | 8 | that is specifically for use by OpenStack charms.""" | ||
101 | 9 | |||
102 | 10 | def __init__(self, series, openstack=None, source=None): | ||
103 | 11 | """Initialize the deployment environment.""" | ||
104 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
105 | 13 | self.openstack = openstack | ||
106 | 14 | self.source = source | ||
107 | 15 | |||
108 | 16 | def _add_services(self, this_service, other_services): | ||
109 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
110 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
111 | 19 | other_services) | ||
112 | 20 | name = 0 | ||
113 | 21 | services = other_services | ||
114 | 22 | services.append(this_service) | ||
115 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
116 | 24 | |||
117 | 25 | if self.openstack: | ||
118 | 26 | for svc in services: | ||
119 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
120 | 28 | if charm_name not in use_source: | ||
121 | 29 | config = {'openstack-origin': self.openstack} | ||
122 | 30 | self.d.configure(svc[name], config) | ||
123 | 31 | |||
124 | 32 | if self.source: | ||
125 | 33 | for svc in services: | ||
126 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
127 | 35 | if charm_name in use_source: | ||
128 | 36 | config = {'source': self.source} | ||
129 | 37 | self.d.configure(svc[name], config) | ||
130 | 38 | |||
131 | 39 | def _configure_services(self, configs): | ||
132 | 40 | """Configure all of the services.""" | ||
133 | 41 | for service, config in configs.iteritems(): | ||
134 | 42 | self.d.configure(service, config) | ||
135 | 43 | |||
136 | 44 | def _get_openstack_release(self): | ||
137 | 45 | """Return an integer representing the enum value of the openstack | ||
138 | 46 | release.""" | ||
139 | 47 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
140 | 48 | self.precise_havana, self.precise_icehouse, \ | ||
141 | 49 | self.trusty_icehouse = range(6) | ||
142 | 50 | releases = { | ||
143 | 51 | ('precise', None): self.precise_essex, | ||
144 | 52 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
145 | 53 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
146 | 54 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
147 | 55 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
148 | 56 | ('trusty', None): self.trusty_icehouse} | ||
149 | 57 | return releases[(self.series, self.openstack)] | ||
150 | 0 | 58 | ||
151 | === added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py' | |||
152 | --- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
153 | +++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 16:43:39 +0000 | |||
154 | @@ -0,0 +1,253 @@ | |||
155 | 1 | import logging | ||
156 | 2 | import os | ||
157 | 3 | import time | ||
158 | 4 | import urllib | ||
159 | 5 | |||
160 | 6 | import glanceclient.v1.client as glance_client | ||
161 | 7 | import keystoneclient.v2_0 as keystone_client | ||
162 | 8 | import novaclient.v1_1.client as nova_client | ||
163 | 9 | |||
164 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
165 | 11 | AmuletUtils | ||
166 | 12 | ) | ||
167 | 13 | |||
168 | 14 | DEBUG = logging.DEBUG | ||
169 | 15 | ERROR = logging.ERROR | ||
170 | 16 | |||
171 | 17 | |||
172 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
173 | 19 | """This class inherits from AmuletUtils and has additional support | ||
174 | 20 | that is specifically for use by OpenStack charms.""" | ||
175 | 21 | |||
176 | 22 | def __init__(self, log_level=ERROR): | ||
177 | 23 | """Initialize the deployment environment.""" | ||
178 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
179 | 25 | |||
180 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
181 | 27 | public_port, expected): | ||
182 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
183 | 29 | are used to find the matching endpoint.""" | ||
184 | 30 | found = False | ||
185 | 31 | for ep in endpoints: | ||
186 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
187 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
188 | 34 | and public_port in ep.publicurl: | ||
189 | 35 | found = True | ||
190 | 36 | actual = {'id': ep.id, | ||
191 | 37 | 'region': ep.region, | ||
192 | 38 | 'adminurl': ep.adminurl, | ||
193 | 39 | 'internalurl': ep.internalurl, | ||
194 | 40 | 'publicurl': ep.publicurl, | ||
195 | 41 | 'service_id': ep.service_id} | ||
196 | 42 | ret = self._validate_dict_data(expected, actual) | ||
197 | 43 | if ret: | ||
198 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
199 | 45 | |||
200 | 46 | if not found: | ||
201 | 47 | return 'endpoint not found' | ||
202 | 48 | |||
203 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
204 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
205 | 51 | expected service catalog endpoints.""" | ||
206 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
207 | 53 | for k, v in expected.iteritems(): | ||
208 | 54 | if k in actual: | ||
209 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
210 | 56 | if ret: | ||
211 | 57 | return self.endpoint_error(k, ret) | ||
212 | 58 | else: | ||
213 | 59 | return "endpoint {} does not exist".format(k) | ||
214 | 60 | return ret | ||
215 | 61 | |||
216 | 62 | def validate_tenant_data(self, expected, actual): | ||
217 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
218 | 64 | data.""" | ||
219 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
220 | 66 | for e in expected: | ||
221 | 67 | found = False | ||
222 | 68 | for act in actual: | ||
223 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
224 | 70 | 'name': act.name, 'id': act.id} | ||
225 | 71 | if e['name'] == a['name']: | ||
226 | 72 | found = True | ||
227 | 73 | ret = self._validate_dict_data(e, a) | ||
228 | 74 | if ret: | ||
229 | 75 | return "unexpected tenant data - {}".format(ret) | ||
230 | 76 | if not found: | ||
231 | 77 | return "tenant {} does not exist".format(e['name']) | ||
232 | 78 | return ret | ||
233 | 79 | |||
234 | 80 | def validate_role_data(self, expected, actual): | ||
235 | 81 | """Validate a list of actual role data vs a list of expected role | ||
236 | 82 | data.""" | ||
237 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
238 | 84 | for e in expected: | ||
239 | 85 | found = False | ||
240 | 86 | for act in actual: | ||
241 | 87 | a = {'name': act.name, 'id': act.id} | ||
242 | 88 | if e['name'] == a['name']: | ||
243 | 89 | found = True | ||
244 | 90 | ret = self._validate_dict_data(e, a) | ||
245 | 91 | if ret: | ||
246 | 92 | return "unexpected role data - {}".format(ret) | ||
247 | 93 | if not found: | ||
248 | 94 | return "role {} does not exist".format(e['name']) | ||
249 | 95 | return ret | ||
250 | 96 | |||
251 | 97 | def validate_user_data(self, expected, actual): | ||
252 | 98 | """Validate a list of actual user data vs a list of expected user | ||
253 | 99 | data.""" | ||
254 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
255 | 101 | for e in expected: | ||
256 | 102 | found = False | ||
257 | 103 | for act in actual: | ||
258 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
259 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
260 | 106 | 'id': act.id} | ||
261 | 107 | if e['name'] == a['name']: | ||
262 | 108 | found = True | ||
263 | 109 | ret = self._validate_dict_data(e, a) | ||
264 | 110 | if ret: | ||
265 | 111 | return "unexpected user data - {}".format(ret) | ||
266 | 112 | if not found: | ||
267 | 113 | return "user {} does not exist".format(e['name']) | ||
268 | 114 | return ret | ||
269 | 115 | |||
270 | 116 | def validate_flavor_data(self, expected, actual): | ||
271 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
272 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
273 | 119 | act = [a.name for a in actual] | ||
274 | 120 | return self._validate_list_data(expected, act) | ||
275 | 121 | |||
276 | 122 | def tenant_exists(self, keystone, tenant): | ||
277 | 123 | """Return True if tenant exists""" | ||
278 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
279 | 125 | |||
280 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
281 | 127 | tenant): | ||
282 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
283 | 129 | service_ip = \ | ||
284 | 130 | keystone_sentry.relation('shared-db', | ||
285 | 131 | 'mysql:shared-db')['private-address'] | ||
286 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
287 | 133 | return keystone_client.Client(username=user, password=password, | ||
288 | 134 | tenant_name=tenant, auth_url=ep) | ||
289 | 135 | |||
290 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
291 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
292 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
293 | 139 | endpoint_type='publicURL') | ||
294 | 140 | return keystone_client.Client(username=user, password=password, | ||
295 | 141 | tenant_name=tenant, auth_url=ep) | ||
296 | 142 | |||
297 | 143 | def authenticate_glance_admin(self, keystone): | ||
298 | 144 | """Authenticates admin user with glance.""" | ||
299 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
300 | 146 | endpoint_type='adminURL') | ||
301 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
302 | 148 | |||
303 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
304 | 150 | """Authenticates a regular user with nova-api.""" | ||
305 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
306 | 152 | endpoint_type='publicURL') | ||
307 | 153 | return nova_client.Client(username=user, api_key=password, | ||
308 | 154 | project_id=tenant, auth_url=ep) | ||
309 | 155 | |||
310 | 156 | def create_cirros_image(self, glance, image_name): | ||
311 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
312 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
313 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
314 | 160 | if http_proxy: | ||
315 | 161 | proxies = {'http': http_proxy} | ||
316 | 162 | opener = urllib.FancyURLopener(proxies) | ||
317 | 163 | else: | ||
318 | 164 | opener = urllib.FancyURLopener() | ||
319 | 165 | |||
320 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
321 | 167 | version = f.read().strip() | ||
322 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
323 | 169 | |||
324 | 170 | if not os.path.exists(cirros_img): | ||
325 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
326 | 172 | version, cirros_img) | ||
327 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
328 | 174 | f.close() | ||
329 | 175 | |||
330 | 176 | with open(cirros_img) as f: | ||
331 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
332 | 178 | disk_format='qcow2', | ||
333 | 179 | container_format='bare', data=f) | ||
334 | 180 | count = 1 | ||
335 | 181 | status = image.status | ||
336 | 182 | while status != 'active' and count < 10: | ||
337 | 183 | time.sleep(3) | ||
338 | 184 | image = glance.images.get(image.id) | ||
339 | 185 | status = image.status | ||
340 | 186 | self.log.debug('image status: {}'.format(status)) | ||
341 | 187 | count += 1 | ||
342 | 188 | |||
343 | 189 | if status != 'active': | ||
344 | 190 | self.log.error('image creation timed out') | ||
345 | 191 | return None | ||
346 | 192 | |||
347 | 193 | return image | ||
348 | 194 | |||
349 | 195 | def delete_image(self, glance, image): | ||
350 | 196 | """Delete the specified image.""" | ||
351 | 197 | num_before = len(list(glance.images.list())) | ||
352 | 198 | glance.images.delete(image) | ||
353 | 199 | |||
354 | 200 | count = 1 | ||
355 | 201 | num_after = len(list(glance.images.list())) | ||
356 | 202 | while num_after != (num_before - 1) and count < 10: | ||
357 | 203 | time.sleep(3) | ||
358 | 204 | num_after = len(list(glance.images.list())) | ||
359 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
360 | 206 | count += 1 | ||
361 | 207 | |||
362 | 208 | if num_after != (num_before - 1): | ||
363 | 209 | self.log.error('image deletion timed out') | ||
364 | 210 | return False | ||
365 | 211 | |||
366 | 212 | return True | ||
367 | 213 | |||
368 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
369 | 215 | """Create the specified instance.""" | ||
370 | 216 | image = nova.images.find(name=image_name) | ||
371 | 217 | flavor = nova.flavors.find(name=flavor) | ||
372 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
373 | 219 | flavor=flavor) | ||
374 | 220 | |||
375 | 221 | count = 1 | ||
376 | 222 | status = instance.status | ||
377 | 223 | while status != 'ACTIVE' and count < 60: | ||
378 | 224 | time.sleep(3) | ||
379 | 225 | instance = nova.servers.get(instance.id) | ||
380 | 226 | status = instance.status | ||
381 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
382 | 228 | count += 1 | ||
383 | 229 | |||
384 | 230 | if status != 'ACTIVE': | ||
385 | 231 | self.log.error('instance creation timed out') | ||
386 | 232 | return None | ||
387 | 233 | |||
388 | 234 | return instance | ||
389 | 235 | |||
390 | 236 | def delete_instance(self, nova, instance): | ||
391 | 237 | """Delete the specified instance.""" | ||
392 | 238 | num_before = len(list(nova.servers.list())) | ||
393 | 239 | nova.servers.delete(instance) | ||
394 | 240 | |||
395 | 241 | count = 1 | ||
396 | 242 | num_after = len(list(nova.servers.list())) | ||
397 | 243 | while num_after != (num_before - 1) and count < 10: | ||
398 | 244 | time.sleep(3) | ||
399 | 245 | num_after = len(list(nova.servers.list())) | ||
400 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
401 | 247 | count += 1 | ||
402 | 248 | |||
403 | 249 | if num_after != (num_before - 1): | ||
404 | 250 | self.log.error('instance deletion timed out') | ||
405 | 251 | return False | ||
406 | 252 | |||
407 | 253 | return True | ||
408 | 0 | 254 | ||
409 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
410 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-05-19 11:41:35 +0000 | |||
411 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-11 16:43:39 +0000 | |||
412 | @@ -243,23 +243,31 @@ | |||
413 | 243 | 243 | ||
414 | 244 | 244 | ||
415 | 245 | class AMQPContext(OSContextGenerator): | 245 | class AMQPContext(OSContextGenerator): |
416 | 246 | interfaces = ['amqp'] | ||
417 | 247 | 246 | ||
419 | 248 | def __init__(self, ssl_dir=None): | 247 | def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): |
420 | 249 | self.ssl_dir = ssl_dir | 248 | self.ssl_dir = ssl_dir |
421 | 249 | self.rel_name = rel_name | ||
422 | 250 | self.relation_prefix = relation_prefix | ||
423 | 251 | self.interfaces = [rel_name] | ||
424 | 250 | 252 | ||
425 | 251 | def __call__(self): | 253 | def __call__(self): |
426 | 252 | log('Generating template context for amqp') | 254 | log('Generating template context for amqp') |
427 | 253 | conf = config() | 255 | conf = config() |
428 | 256 | user_setting = 'rabbit-user' | ||
429 | 257 | vhost_setting = 'rabbit-vhost' | ||
430 | 258 | if self.relation_prefix: | ||
431 | 259 | user_setting = self.relation_prefix + '-rabbit-user' | ||
432 | 260 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | ||
433 | 261 | |||
434 | 254 | try: | 262 | try: |
437 | 255 | username = conf['rabbit-user'] | 263 | username = conf[user_setting] |
438 | 256 | vhost = conf['rabbit-vhost'] | 264 | vhost = conf[vhost_setting] |
439 | 257 | except KeyError as e: | 265 | except KeyError as e: |
440 | 258 | log('Could not generate shared_db context. ' | 266 | log('Could not generate shared_db context. ' |
441 | 259 | 'Missing required charm config options: %s.' % e) | 267 | 'Missing required charm config options: %s.' % e) |
442 | 260 | raise OSContextError | 268 | raise OSContextError |
443 | 261 | ctxt = {} | 269 | ctxt = {} |
445 | 262 | for rid in relation_ids('amqp'): | 270 | for rid in relation_ids(self.rel_name): |
446 | 263 | ha_vip_only = False | 271 | ha_vip_only = False |
447 | 264 | for unit in related_units(rid): | 272 | for unit in related_units(rid): |
448 | 265 | if relation_get('clustered', rid=rid, unit=unit): | 273 | if relation_get('clustered', rid=rid, unit=unit): |
449 | @@ -418,12 +426,13 @@ | |||
450 | 418 | """ | 426 | """ |
451 | 419 | Generates a context for an apache vhost configuration that configures | 427 | Generates a context for an apache vhost configuration that configures |
452 | 420 | HTTPS reverse proxying for one or many endpoints. Generated context | 428 | HTTPS reverse proxying for one or many endpoints. Generated context |
459 | 421 | looks something like: | 429 | looks something like:: |
460 | 422 | { | 430 | |
461 | 423 | 'namespace': 'cinder', | 431 | { |
462 | 424 | 'private_address': 'iscsi.mycinderhost.com', | 432 | 'namespace': 'cinder', |
463 | 425 | 'endpoints': [(8776, 8766), (8777, 8767)] | 433 | 'private_address': 'iscsi.mycinderhost.com', |
464 | 426 | } | 434 | 'endpoints': [(8776, 8766), (8777, 8767)] |
465 | 435 | } | ||
466 | 427 | 436 | ||
467 | 428 | The endpoints list consists of a tuples mapping external ports | 437 | The endpoints list consists of a tuples mapping external ports |
468 | 429 | to internal ports. | 438 | to internal ports. |
469 | @@ -541,6 +550,26 @@ | |||
470 | 541 | 550 | ||
471 | 542 | return nvp_ctxt | 551 | return nvp_ctxt |
472 | 543 | 552 | ||
473 | 553 | def n1kv_ctxt(self): | ||
474 | 554 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
475 | 555 | self.network_manager) | ||
476 | 556 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | ||
477 | 557 | self.network_manager) | ||
478 | 558 | n1kv_ctxt = { | ||
479 | 559 | 'core_plugin': driver, | ||
480 | 560 | 'neutron_plugin': 'n1kv', | ||
481 | 561 | 'neutron_security_groups': self.neutron_security_groups, | ||
482 | 562 | 'local_ip': unit_private_ip(), | ||
483 | 563 | 'config': n1kv_config, | ||
484 | 564 | 'vsm_ip': config('n1kv-vsm-ip'), | ||
485 | 565 | 'vsm_username': config('n1kv-vsm-username'), | ||
486 | 566 | 'vsm_password': config('n1kv-vsm-password'), | ||
487 | 567 | 'restrict_policy_profiles': config( | ||
488 | 568 | 'n1kv_restrict_policy_profiles'), | ||
489 | 569 | } | ||
490 | 570 | |||
491 | 571 | return n1kv_ctxt | ||
492 | 572 | |||
493 | 544 | def neutron_ctxt(self): | 573 | def neutron_ctxt(self): |
494 | 545 | if https(): | 574 | if https(): |
495 | 546 | proto = 'https' | 575 | proto = 'https' |
496 | @@ -572,6 +601,8 @@ | |||
497 | 572 | ctxt.update(self.ovs_ctxt()) | 601 | ctxt.update(self.ovs_ctxt()) |
498 | 573 | elif self.plugin in ['nvp', 'nsx']: | 602 | elif self.plugin in ['nvp', 'nsx']: |
499 | 574 | ctxt.update(self.nvp_ctxt()) | 603 | ctxt.update(self.nvp_ctxt()) |
500 | 604 | elif self.plugin == 'n1kv': | ||
501 | 605 | ctxt.update(self.n1kv_ctxt()) | ||
502 | 575 | 606 | ||
503 | 576 | alchemy_flags = config('neutron-alchemy-flags') | 607 | alchemy_flags = config('neutron-alchemy-flags') |
504 | 577 | if alchemy_flags: | 608 | if alchemy_flags: |
505 | @@ -611,7 +642,7 @@ | |||
506 | 611 | The subordinate interface allows subordinates to export their | 642 | The subordinate interface allows subordinates to export their |
507 | 612 | configuration requirements to the principle for multiple config | 643 | configuration requirements to the principle for multiple config |
508 | 613 | files and multiple serivces. Ie, a subordinate that has interfaces | 644 | files and multiple serivces. Ie, a subordinate that has interfaces |
510 | 614 | to both glance and nova may export to following yaml blob as json: | 645 | to both glance and nova may export to following yaml blob as json:: |
511 | 615 | 646 | ||
512 | 616 | glance: | 647 | glance: |
513 | 617 | /etc/glance/glance-api.conf: | 648 | /etc/glance/glance-api.conf: |
514 | @@ -630,7 +661,8 @@ | |||
515 | 630 | 661 | ||
516 | 631 | It is then up to the principle charms to subscribe this context to | 662 | It is then up to the principle charms to subscribe this context to |
517 | 632 | the service+config file it is interestd in. Configuration data will | 663 | the service+config file it is interestd in. Configuration data will |
519 | 633 | be available in the template context, in glance's case, as: | 664 | be available in the template context, in glance's case, as:: |
520 | 665 | |||
521 | 634 | ctxt = { | 666 | ctxt = { |
522 | 635 | ... other context ... | 667 | ... other context ... |
523 | 636 | 'subordinate_config': { | 668 | 'subordinate_config': { |
524 | 637 | 669 | ||
525 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
526 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:41:35 +0000 | |||
527 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-11 16:43:39 +0000 | |||
528 | @@ -128,6 +128,20 @@ | |||
529 | 128 | 'server_packages': ['neutron-server', | 128 | 'server_packages': ['neutron-server', |
530 | 129 | 'neutron-plugin-vmware'], | 129 | 'neutron-plugin-vmware'], |
531 | 130 | 'server_services': ['neutron-server'] | 130 | 'server_services': ['neutron-server'] |
532 | 131 | }, | ||
533 | 132 | 'n1kv': { | ||
534 | 133 | 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', | ||
535 | 134 | 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', | ||
536 | 135 | 'contexts': [ | ||
537 | 136 | context.SharedDBContext(user=config('neutron-database-user'), | ||
538 | 137 | database=config('neutron-database'), | ||
539 | 138 | relation_prefix='neutron', | ||
540 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | ||
541 | 140 | 'services': [], | ||
542 | 141 | 'packages': [['neutron-plugin-cisco']], | ||
543 | 142 | 'server_packages': ['neutron-server', | ||
544 | 143 | 'neutron-plugin-cisco'], | ||
545 | 144 | 'server_services': ['neutron-server'] | ||
546 | 131 | } | 145 | } |
547 | 132 | } | 146 | } |
548 | 133 | if release >= 'icehouse': | 147 | if release >= 'icehouse': |
549 | 134 | 148 | ||
550 | === modified file 'hooks/charmhelpers/contrib/openstack/templating.py' | |||
551 | --- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-23 19:01:06 +0000 | |||
552 | +++ hooks/charmhelpers/contrib/openstack/templating.py 2014-07-11 16:43:39 +0000 | |||
553 | @@ -30,17 +30,17 @@ | |||
554 | 30 | loading dir. | 30 | loading dir. |
555 | 31 | 31 | ||
556 | 32 | A charm may also ship a templates dir with this module | 32 | A charm may also ship a templates dir with this module |
568 | 33 | and it will be appended to the bottom of the search list, eg: | 33 | and it will be appended to the bottom of the search list, eg:: |
569 | 34 | hooks/charmhelpers/contrib/openstack/templates. | 34 | |
570 | 35 | 35 | hooks/charmhelpers/contrib/openstack/templates | |
571 | 36 | :param templates_dir: str: Base template directory containing release | 36 | |
572 | 37 | sub-directories. | 37 | :param templates_dir (str): Base template directory containing release |
573 | 38 | :param os_release : str: OpenStack release codename to construct template | 38 | sub-directories. |
574 | 39 | loader. | 39 | :param os_release (str): OpenStack release codename to construct template |
575 | 40 | 40 | loader. | |
576 | 41 | :returns : jinja2.ChoiceLoader constructed with a list of | 41 | :returns: jinja2.ChoiceLoader constructed with a list of |
577 | 42 | jinja2.FilesystemLoaders, ordered in descending | 42 | jinja2.FilesystemLoaders, ordered in descending |
578 | 43 | order by OpenStack release. | 43 | order by OpenStack release. |
579 | 44 | """ | 44 | """ |
580 | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) | 45 | tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) |
581 | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] | 46 | for rel in OPENSTACK_CODENAMES.itervalues()] |
582 | @@ -111,7 +111,8 @@ | |||
583 | 111 | and ease the burden of managing config templates across multiple OpenStack | 111 | and ease the burden of managing config templates across multiple OpenStack |
584 | 112 | releases. | 112 | releases. |
585 | 113 | 113 | ||
587 | 114 | Basic usage: | 114 | Basic usage:: |
588 | 115 | |||
589 | 115 | # import some common context generates from charmhelpers | 116 | # import some common context generates from charmhelpers |
590 | 116 | from charmhelpers.contrib.openstack import context | 117 | from charmhelpers.contrib.openstack import context |
591 | 117 | 118 | ||
592 | @@ -131,21 +132,19 @@ | |||
593 | 131 | # write out all registered configs | 132 | # write out all registered configs |
594 | 132 | configs.write_all() | 133 | configs.write_all() |
595 | 133 | 134 | ||
597 | 134 | Details: | 135 | **OpenStack Releases and template loading** |
598 | 135 | 136 | ||
599 | 136 | OpenStack Releases and template loading | ||
600 | 137 | --------------------------------------- | ||
601 | 138 | When the object is instantiated, it is associated with a specific OS | 137 | When the object is instantiated, it is associated with a specific OS |
602 | 139 | release. This dictates how the template loader will be constructed. | 138 | release. This dictates how the template loader will be constructed. |
603 | 140 | 139 | ||
604 | 141 | The constructed loader attempts to load the template from several places | 140 | The constructed loader attempts to load the template from several places |
605 | 142 | in the following order: | 141 | in the following order: |
612 | 143 | - from the most recent OS release-specific template dir (if one exists) | 142 | - from the most recent OS release-specific template dir (if one exists) |
613 | 144 | - the base templates_dir | 143 | - the base templates_dir |
614 | 145 | - a template directory shipped in the charm with this helper file. | 144 | - a template directory shipped in the charm with this helper file. |
615 | 146 | 145 | ||
616 | 147 | 146 | For the example above, '/tmp/templates' contains the following structure:: | |
617 | 148 | For the example above, '/tmp/templates' contains the following structure: | 147 | |
618 | 149 | /tmp/templates/nova.conf | 148 | /tmp/templates/nova.conf |
619 | 150 | /tmp/templates/api-paste.ini | 149 | /tmp/templates/api-paste.ini |
620 | 151 | /tmp/templates/grizzly/api-paste.ini | 150 | /tmp/templates/grizzly/api-paste.ini |
621 | @@ -169,8 +168,8 @@ | |||
622 | 169 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows | 168 | $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows |
623 | 170 | us to ship common templates (haproxy, apache) with the helpers. | 169 | us to ship common templates (haproxy, apache) with the helpers. |
624 | 171 | 170 | ||
627 | 172 | Context generators | 171 | **Context generators** |
628 | 173 | --------------------------------------- | 172 | |
629 | 174 | Context generators are used to generate template contexts during hook | 173 | Context generators are used to generate template contexts during hook |
630 | 175 | execution. Doing so may require inspecting service relations, charm | 174 | execution. Doing so may require inspecting service relations, charm |
631 | 176 | config, etc. When registered, a config file is associated with a list | 175 | config, etc. When registered, a config file is associated with a list |
632 | 177 | 176 | ||
633 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
634 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-06-19 10:11:47 +0000 | |||
635 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-07-11 16:43:39 +0000 | |||
636 | @@ -84,6 +84,8 @@ | |||
637 | 84 | '''Derive OpenStack release codename from a given installation source.''' | 84 | '''Derive OpenStack release codename from a given installation source.''' |
638 | 85 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] | 85 | ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] |
639 | 86 | rel = '' | 86 | rel = '' |
640 | 87 | if src is None: | ||
641 | 88 | return rel | ||
642 | 87 | if src in ['distro', 'distro-proposed']: | 89 | if src in ['distro', 'distro-proposed']: |
643 | 88 | try: | 90 | try: |
644 | 89 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] | 91 | rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] |
645 | @@ -189,7 +191,7 @@ | |||
646 | 189 | for version, cname in vers_map.iteritems(): | 191 | for version, cname in vers_map.iteritems(): |
647 | 190 | if cname == codename: | 192 | if cname == codename: |
648 | 191 | return version | 193 | return version |
650 | 192 | #e = "Could not determine OpenStack version for package: %s" % pkg | 194 | # e = "Could not determine OpenStack version for package: %s" % pkg |
651 | 193 | # error_out(e) | 195 | # error_out(e) |
652 | 194 | 196 | ||
653 | 195 | 197 | ||
654 | 196 | 198 | ||
655 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
656 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-02-24 17:52:34 +0000 | |||
657 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-11 16:43:39 +0000 | |||
658 | @@ -303,7 +303,7 @@ | |||
659 | 303 | blk_device, fstype, system_services=[]): | 303 | blk_device, fstype, system_services=[]): |
660 | 304 | """ | 304 | """ |
661 | 305 | NOTE: This function must only be called from a single service unit for | 305 | NOTE: This function must only be called from a single service unit for |
663 | 306 | the same rbd_img otherwise data loss will occur. | 306 | the same rbd_img otherwise data loss will occur. |
664 | 307 | 307 | ||
665 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 308 | Ensures given pool and RBD image exists, is mapped to a block device, |
666 | 309 | and the device is formatted and mounted at the given mount_point. | 309 | and the device is formatted and mounted at the given mount_point. |
667 | 310 | 310 | ||
668 | === modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py' | |||
669 | --- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-05-19 11:41:35 +0000 | |||
670 | +++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-07-11 16:43:39 +0000 | |||
671 | @@ -37,6 +37,7 @@ | |||
672 | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), | 37 | check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), |
673 | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) | 38 | 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) |
674 | 39 | 39 | ||
675 | 40 | |||
676 | 40 | def is_device_mounted(device): | 41 | def is_device_mounted(device): |
677 | 41 | '''Given a device path, return True if that device is mounted, and False | 42 | '''Given a device path, return True if that device is mounted, and False |
678 | 42 | if it isn't. | 43 | if it isn't. |
679 | 43 | 44 | ||
680 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
681 | --- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:41:35 +0000 | |||
682 | +++ hooks/charmhelpers/core/hookenv.py 2014-07-11 16:43:39 +0000 | |||
683 | @@ -25,7 +25,7 @@ | |||
684 | 25 | def cached(func): | 25 | def cached(func): |
685 | 26 | """Cache return values for multiple executions of func + args | 26 | """Cache return values for multiple executions of func + args |
686 | 27 | 27 | ||
688 | 28 | For example: | 28 | For example:: |
689 | 29 | 29 | ||
690 | 30 | @cached | 30 | @cached |
691 | 31 | def unit_get(attribute): | 31 | def unit_get(attribute): |
692 | @@ -445,18 +445,19 @@ | |||
693 | 445 | class Hooks(object): | 445 | class Hooks(object): |
694 | 446 | """A convenient handler for hook functions. | 446 | """A convenient handler for hook functions. |
695 | 447 | 447 | ||
697 | 448 | Example: | 448 | Example:: |
698 | 449 | |||
699 | 449 | hooks = Hooks() | 450 | hooks = Hooks() |
700 | 450 | 451 | ||
701 | 451 | # register a hook, taking its name from the function name | 452 | # register a hook, taking its name from the function name |
702 | 452 | @hooks.hook() | 453 | @hooks.hook() |
703 | 453 | def install(): | 454 | def install(): |
705 | 454 | ... | 455 | pass # your code here |
706 | 455 | 456 | ||
707 | 456 | # register a hook, providing a custom hook name | 457 | # register a hook, providing a custom hook name |
708 | 457 | @hooks.hook("config-changed") | 458 | @hooks.hook("config-changed") |
709 | 458 | def config_changed(): | 459 | def config_changed(): |
711 | 459 | ... | 460 | pass # your code here |
712 | 460 | 461 | ||
713 | 461 | if __name__ == "__main__": | 462 | if __name__ == "__main__": |
714 | 462 | # execute a hook based on the name the program is called by | 463 | # execute a hook based on the name the program is called by |
715 | 463 | 464 | ||
716 | === modified file 'hooks/charmhelpers/core/host.py' | |||
717 | --- hooks/charmhelpers/core/host.py 2014-06-19 10:11:47 +0000 | |||
718 | +++ hooks/charmhelpers/core/host.py 2014-07-11 16:43:39 +0000 | |||
719 | @@ -211,13 +211,13 @@ | |||
720 | 211 | def restart_on_change(restart_map, stopstart=False): | 211 | def restart_on_change(restart_map, stopstart=False): |
721 | 212 | """Restart services based on configuration files changing | 212 | """Restart services based on configuration files changing |
722 | 213 | 213 | ||
724 | 214 | This function is used a decorator, for example | 214 | This function is used a decorator, for example:: |
725 | 215 | 215 | ||
726 | 216 | @restart_on_change({ | 216 | @restart_on_change({ |
727 | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] | 217 | '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] |
728 | 218 | }) | 218 | }) |
729 | 219 | def ceph_client_changed(): | 219 | def ceph_client_changed(): |
731 | 220 | ... | 220 | pass # your code here |
732 | 221 | 221 | ||
733 | 222 | In this example, the cinder-api and cinder-volume services | 222 | In this example, the cinder-api and cinder-volume services |
734 | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the | 223 | would be restarted if /etc/ceph/ceph.conf is changed by the |
735 | @@ -313,9 +313,11 @@ | |||
736 | 313 | 313 | ||
737 | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): | 314 | def cmp_pkgrevno(package, revno, pkgcache=None): |
738 | 315 | '''Compare supplied revno with the revno of the installed package | 315 | '''Compare supplied revno with the revno of the installed package |
742 | 316 | 1 => Installed revno is greater than supplied arg | 316 | |
743 | 317 | 0 => Installed revno is the same as supplied arg | 317 | * 1 => Installed revno is greater than supplied arg |
744 | 318 | -1 => Installed revno is less than supplied arg | 318 | * 0 => Installed revno is the same as supplied arg |
745 | 319 | * -1 => Installed revno is less than supplied arg | ||
746 | 320 | |||
747 | 319 | ''' | 321 | ''' |
748 | 320 | import apt_pkg | 322 | import apt_pkg |
749 | 321 | if not pkgcache: | 323 | if not pkgcache: |
750 | 322 | 324 | ||
751 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
752 | --- hooks/charmhelpers/fetch/__init__.py 2014-06-19 10:11:47 +0000 | |||
753 | +++ hooks/charmhelpers/fetch/__init__.py 2014-07-11 16:43:39 +0000 | |||
754 | @@ -235,31 +235,39 @@ | |||
755 | 235 | sources_var='install_sources', | 235 | sources_var='install_sources', |
756 | 236 | keys_var='install_keys'): | 236 | keys_var='install_keys'): |
757 | 237 | """ | 237 | """ |
759 | 238 | Configure multiple sources from charm configuration | 238 | Configure multiple sources from charm configuration. |
760 | 239 | |||
761 | 240 | The lists are encoded as yaml fragments in the configuration. | ||
762 | 241 | The frament needs to be included as a string. | ||
763 | 239 | 242 | ||
764 | 240 | Example config: | 243 | Example config: |
766 | 241 | install_sources: | 244 | install_sources: | |
767 | 242 | - "ppa:foo" | 245 | - "ppa:foo" |
768 | 243 | - "http://example.com/repo precise main" | 246 | - "http://example.com/repo precise main" |
770 | 244 | install_keys: | 247 | install_keys: | |
771 | 245 | - null | 248 | - null |
772 | 246 | - "a1b2c3d4" | 249 | - "a1b2c3d4" |
773 | 247 | 250 | ||
774 | 248 | Note that 'null' (a.k.a. None) should not be quoted. | 251 | Note that 'null' (a.k.a. None) should not be quoted. |
775 | 249 | """ | 252 | """ |
783 | 250 | sources = safe_load(config(sources_var)) | 253 | sources = safe_load((config(sources_var) or '').strip()) or [] |
784 | 251 | keys = config(keys_var) | 254 | keys = safe_load((config(keys_var) or '').strip()) or None |
785 | 252 | if keys is not None: | 255 | |
786 | 253 | keys = safe_load(keys) | 256 | if isinstance(sources, basestring): |
787 | 254 | if isinstance(sources, basestring) and ( | 257 | sources = [sources] |
788 | 255 | keys is None or isinstance(keys, basestring)): | 258 | |
789 | 256 | add_source(sources, keys) | 259 | if keys is None: |
790 | 260 | for source in sources: | ||
791 | 261 | add_source(source, None) | ||
792 | 257 | else: | 262 | else: |
798 | 258 | if not len(sources) == len(keys): | 263 | if isinstance(keys, basestring): |
799 | 259 | msg = 'Install sources and keys lists are different lengths' | 264 | keys = [keys] |
800 | 260 | raise SourceConfigError(msg) | 265 | |
801 | 261 | for src_num in range(len(sources)): | 266 | if len(sources) != len(keys): |
802 | 262 | add_source(sources[src_num], keys[src_num]) | 267 | raise SourceConfigError( |
803 | 268 | 'Install sources and keys lists are different lengths') | ||
804 | 269 | for source, key in zip(sources, keys): | ||
805 | 270 | add_source(source, key) | ||
806 | 263 | if update: | 271 | if update: |
807 | 264 | apt_update(fatal=True) | 272 | apt_update(fatal=True) |
808 | 265 | 273 | ||
809 | 266 | 274 | ||
810 | === added directory 'tests' | |||
811 | === added file 'tests/00-setup' | |||
812 | --- tests/00-setup 1970-01-01 00:00:00 +0000 | |||
813 | +++ tests/00-setup 2014-07-11 16:43:39 +0000 | |||
814 | @@ -0,0 +1,11 @@ | |||
815 | 1 | #!/bin/bash | ||
816 | 2 | |||
817 | 3 | set -ex | ||
818 | 4 | |||
819 | 5 | sudo add-apt-repository --yes ppa:juju/stable | ||
820 | 6 | sudo apt-get update --yes | ||
821 | 7 | sudo apt-get install --yes python-amulet | ||
822 | 8 | sudo apt-get install --yes python-swiftclient | ||
823 | 9 | sudo apt-get install --yes python-glanceclient | ||
824 | 10 | sudo apt-get install --yes python-keystoneclient | ||
825 | 11 | sudo apt-get install --yes python-novaclient | ||
826 | 0 | 12 | ||
827 | === added file 'tests/10-basic-precise-essex' | |||
828 | --- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000 | |||
829 | +++ tests/10-basic-precise-essex 2014-07-11 16:43:39 +0000 | |||
830 | @@ -0,0 +1,9 @@ | |||
831 | 1 | #!/usr/bin/python | ||
832 | 2 | |||
833 | 3 | """Amulet tests on a basic swift-storage deployment on precise-essex.""" | ||
834 | 4 | |||
835 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
836 | 6 | |||
837 | 7 | if __name__ == '__main__': | ||
838 | 8 | deployment = SwiftStorageBasicDeployment(series='precise') | ||
839 | 9 | deployment.run_tests() | ||
840 | 0 | 10 | ||
841 | === added file 'tests/11-basic-precise-folsom' | |||
842 | --- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000 | |||
843 | +++ tests/11-basic-precise-folsom 2014-07-11 16:43:39 +0000 | |||
844 | @@ -0,0 +1,11 @@ | |||
845 | 1 | #!/usr/bin/python | ||
846 | 2 | |||
847 | 3 | """Amulet tests on a basic swift-storage deployment on precise-folsom.""" | ||
848 | 4 | |||
849 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
850 | 6 | |||
851 | 7 | if __name__ == '__main__': | ||
852 | 8 | deployment = SwiftStorageBasicDeployment(series='precise', | ||
853 | 9 | openstack='cloud:precise-folsom', | ||
854 | 10 | source='cloud:precise-updates/folsom') | ||
855 | 11 | deployment.run_tests() | ||
856 | 0 | 12 | ||
857 | === added file 'tests/12-basic-precise-grizzly' | |||
858 | --- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000 | |||
859 | +++ tests/12-basic-precise-grizzly 2014-07-11 16:43:39 +0000 | |||
860 | @@ -0,0 +1,11 @@ | |||
861 | 1 | #!/usr/bin/python | ||
862 | 2 | |||
863 | 3 | """Amulet tests on a basic swift-storage deployment on precise-grizzly.""" | ||
864 | 4 | |||
865 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
866 | 6 | |||
867 | 7 | if __name__ == '__main__': | ||
868 | 8 | deployment = SwiftStorageBasicDeployment(series='precise', | ||
869 | 9 | openstack='cloud:precise-grizzly', | ||
870 | 10 | source='cloud:precise-updates/grizzly') | ||
871 | 11 | deployment.run_tests() | ||
872 | 0 | 12 | ||
873 | === added file 'tests/13-basic-precise-havana' | |||
874 | --- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000 | |||
875 | +++ tests/13-basic-precise-havana 2014-07-11 16:43:39 +0000 | |||
876 | @@ -0,0 +1,11 @@ | |||
877 | 1 | #!/usr/bin/python | ||
878 | 2 | |||
879 | 3 | """Amulet tests on a basic swift-storage deployment on precise-havana.""" | ||
880 | 4 | |||
881 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
882 | 6 | |||
883 | 7 | if __name__ == '__main__': | ||
884 | 8 | deployment = SwiftStorageBasicDeployment(series='precise', | ||
885 | 9 | openstack='cloud:precise-havana', | ||
886 | 10 | source='cloud:precise-updates/havana') | ||
887 | 11 | deployment.run_tests() | ||
888 | 0 | 12 | ||
889 | === added file 'tests/14-basic-precise-icehouse' | |||
890 | --- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000 | |||
891 | +++ tests/14-basic-precise-icehouse 2014-07-11 16:43:39 +0000 | |||
892 | @@ -0,0 +1,11 @@ | |||
893 | 1 | #!/usr/bin/python | ||
894 | 2 | |||
895 | 3 | """Amulet tests on a basic swift-storage deployment on precise-icehouse.""" | ||
896 | 4 | |||
897 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
898 | 6 | |||
899 | 7 | if __name__ == '__main__': | ||
900 | 8 | deployment = SwiftStorageBasicDeployment(series='precise', | ||
901 | 9 | openstack='cloud:precise-icehouse', | ||
902 | 10 | source='cloud:precise-updates/icehouse') | ||
903 | 11 | deployment.run_tests() | ||
904 | 0 | 12 | ||
905 | === added file 'tests/15-basic-trusty-icehouse' | |||
906 | --- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000 | |||
907 | +++ tests/15-basic-trusty-icehouse 2014-07-11 16:43:39 +0000 | |||
908 | @@ -0,0 +1,9 @@ | |||
909 | 1 | #!/usr/bin/python | ||
910 | 2 | |||
911 | 3 | """Amulet tests on a basic swift-storage deployment on trusty-icehouse.""" | ||
912 | 4 | |||
913 | 5 | from basic_deployment import SwiftStorageBasicDeployment | ||
914 | 6 | |||
915 | 7 | if __name__ == '__main__': | ||
916 | 8 | deployment = SwiftStorageBasicDeployment(series='trusty') | ||
917 | 9 | deployment.run_tests() | ||
918 | 0 | 10 | ||
919 | === added file 'tests/README' | |||
920 | --- tests/README 1970-01-01 00:00:00 +0000 | |||
921 | +++ tests/README 2014-07-11 16:43:39 +0000 | |||
922 | @@ -0,0 +1,52 @@ | |||
923 | 1 | This directory provides Amulet tests that focus on verification of swift-storage | ||
924 | 2 | deployments. | ||
925 | 3 | |||
926 | 4 | If you use a web proxy server to access the web, you'll need to set the | ||
927 | 5 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. | ||
928 | 6 | |||
929 | 7 | The following examples demonstrate different ways that tests can be executed. | ||
930 | 8 | All examples are run from the charm's root directory. | ||
931 | 9 | |||
932 | 10 | * To run all tests (starting with 00-setup): | ||
933 | 11 | |||
934 | 12 | make test | ||
935 | 13 | |||
936 | 14 | * To run a specific test module (or modules): | ||
937 | 15 | |||
938 | 16 | juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
939 | 17 | |||
940 | 18 | * To run a specific test module (or modules), and keep the environment | ||
941 | 19 | deployed after a failure: | ||
942 | 20 | |||
943 | 21 | juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse | ||
944 | 22 | |||
945 | 23 | * To re-run a test module against an already deployed environment (one | ||
946 | 24 | that was deployed by a previous call to 'juju test --set-e'): | ||
947 | 25 | |||
948 | 26 | ./tests/15-basic-trusty-icehouse | ||
949 | 27 | |||
950 | 28 | For debugging and test development purposes, all code should be idempotent. | ||
951 | 29 | In other words, the code should have the ability to be re-run without changing | ||
952 | 30 | the results beyond the initial run. This enables editing and re-running of a | ||
953 | 31 | test module against an already deployed environment, as described above. | ||
954 | 32 | |||
955 | 33 | Manual debugging tips: | ||
956 | 34 | |||
957 | 35 | * Set the following env vars before using the OpenStack CLI as admin: | ||
958 | 36 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
959 | 37 | export OS_TENANT_NAME=admin | ||
960 | 38 | export OS_USERNAME=admin | ||
961 | 39 | export OS_PASSWORD=openstack | ||
962 | 40 | export OS_REGION_NAME=RegionOne | ||
963 | 41 | |||
964 | 42 | * Set the following env vars before using the OpenStack CLI as demoUser: | ||
965 | 43 | export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 | ||
966 | 44 | export OS_TENANT_NAME=demoTenant | ||
967 | 45 | export OS_USERNAME=demoUser | ||
968 | 46 | export OS_PASSWORD=password | ||
969 | 47 | export OS_REGION_NAME=RegionOne | ||
970 | 48 | |||
971 | 49 | * Sample swift command: | ||
972 | 50 | swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \ | ||
973 | 51 | --os-password password list | ||
974 | 52 | (where tenant/user names and password are in swift-proxy's nova.conf file) | ||
975 | 0 | 53 | ||
976 | === added file 'tests/basic_deployment.py' | |||
977 | --- tests/basic_deployment.py 1970-01-01 00:00:00 +0000 | |||
978 | +++ tests/basic_deployment.py 2014-07-11 16:43:39 +0000 | |||
979 | @@ -0,0 +1,450 @@ | |||
980 | 1 | #!/usr/bin/python | ||
981 | 2 | |||
982 | 3 | import amulet | ||
983 | 4 | import swiftclient | ||
984 | 5 | |||
985 | 6 | from charmhelpers.contrib.openstack.amulet.deployment import ( | ||
986 | 7 | OpenStackAmuletDeployment | ||
987 | 8 | ) | ||
988 | 9 | |||
989 | 10 | from charmhelpers.contrib.openstack.amulet.utils import ( | ||
990 | 11 | OpenStackAmuletUtils, | ||
991 | 12 | DEBUG, # flake8: noqa | ||
992 | 13 | ERROR | ||
993 | 14 | ) | ||
994 | 15 | |||
995 | 16 | # Use DEBUG to turn on debug logging | ||
996 | 17 | u = OpenStackAmuletUtils(ERROR) | ||
997 | 18 | |||
998 | 19 | |||
999 | 20 | class SwiftStorageBasicDeployment(OpenStackAmuletDeployment): | ||
1000 | 21 | """Amulet tests on a basic swift-storage deployment.""" | ||
1001 | 22 | |||
1002 | 23 | def __init__(self, series, openstack=None, source=None): | ||
1003 | 24 | """Deploy the entire test environment.""" | ||
1004 | 25 | super(SwiftStorageBasicDeployment, self).__init__(series, openstack, | ||
1005 | 26 | source) | ||
1006 | 27 | self._add_services() | ||
1007 | 28 | self._add_relations() | ||
1008 | 29 | self._configure_services() | ||
1009 | 30 | self._deploy() | ||
1010 | 31 | self._initialize_tests() | ||
1011 | 32 | |||
1012 | 33 | def _add_services(self): | ||
1013 | 34 | """Add the service that we're testing, including the number of units, | ||
1014 | 35 | where swift-storage is local, and the other charms are from | ||
1015 | 36 | the charm store.""" | ||
1016 | 37 | this_service = ('swift-storage', 1) | ||
1017 | 38 | other_services = [('mysql', 1), | ||
1018 | 39 | ('keystone', 1), ('glance', 1), ('swift-proxy', 1)] | ||
1019 | 40 | super(SwiftStorageBasicDeployment, self)._add_services(this_service, | ||
1020 | 41 | other_services) | ||
1021 | 42 | |||
1022 | 43 | def _add_relations(self): | ||
1023 | 44 | """Add all of the relations for the services.""" | ||
1024 | 45 | relations = { | ||
1025 | 46 | 'keystone:shared-db': 'mysql:shared-db', | ||
1026 | 47 | 'swift-proxy:identity-service': 'keystone:identity-service', | ||
1027 | 48 | 'swift-storage:swift-storage': 'swift-proxy:swift-storage', | ||
1028 | 49 | 'glance:identity-service': 'keystone:identity-service', | ||
1029 | 50 | 'glance:shared-db': 'mysql:shared-db', | ||
1030 | 51 | 'glance:object-store': 'swift-proxy:object-store' | ||
1031 | 52 | } | ||
1032 | 53 | super(SwiftStorageBasicDeployment, self)._add_relations(relations) | ||
1033 | 54 | |||
1034 | 55 | def _configure_services(self): | ||
1035 | 56 | """Configure all of the services.""" | ||
1036 | 57 | keystone_config = {'admin-password': 'openstack', | ||
1037 | 58 | 'admin-token': 'ubuntutesting'} | ||
1038 | 59 | swift_proxy_config = {'zone-assignment': 'manual', | ||
1039 | 60 | 'replicas': '1', | ||
1040 | 61 | 'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae', | ||
1041 | 62 | 'use-https': 'no'} | ||
1042 | 63 | swift_storage_config = {'zone': '1', | ||
1043 | 64 | 'block-device': 'vdb', | ||
1044 | 65 | 'overwrite': 'true'} | ||
1045 | 66 | configs = {'keystone': keystone_config, | ||
1046 | 67 | 'swift-proxy': swift_proxy_config, | ||
1047 | 68 | 'swift-storage': swift_storage_config} | ||
1048 | 69 | super(SwiftStorageBasicDeployment, self)._configure_services(configs) | ||
1049 | 70 | |||
1050 | 71 | def _initialize_tests(self): | ||
1051 | 72 | """Perform final initialization before tests get run.""" | ||
1052 | 73 | # Access the sentries for inspecting service units | ||
1053 | 74 | self.mysql_sentry = self.d.sentry.unit['mysql/0'] | ||
1054 | 75 | self.keystone_sentry = self.d.sentry.unit['keystone/0'] | ||
1055 | 76 | self.glance_sentry = self.d.sentry.unit['glance/0'] | ||
1056 | 77 | self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0'] | ||
1057 | 78 | self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0'] | ||
1058 | 79 | |||
1059 | 80 | # Authenticate admin with keystone | ||
1060 | 81 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | ||
1061 | 82 | user='admin', | ||
1062 | 83 | password='openstack', | ||
1063 | 84 | tenant='admin') | ||
1064 | 85 | |||
1065 | 86 | # Authenticate admin with glance endpoint | ||
1066 | 87 | self.glance = u.authenticate_glance_admin(self.keystone) | ||
1067 | 88 | |||
1068 | 89 | # Authenticate swift user | ||
1069 | 90 | keystone_relation = self.keystone_sentry.relation('identity-service', | ||
1070 | 91 | 'swift-proxy:identity-service') | ||
1071 | 92 | ep = self.keystone.service_catalog.url_for(service_type='identity', | ||
1072 | 93 | endpoint_type='publicURL') | ||
1073 | 94 | self.swift = swiftclient.Connection(authurl=ep, | ||
1074 | 95 | user=keystone_relation['service_username'], | ||
1075 | 96 | key=keystone_relation['service_password'], | ||
1076 | 97 | tenant_name=keystone_relation['service_tenant'], | ||
1077 | 98 | auth_version='2.0') | ||
1078 | 99 | |||
1079 | 100 | # Create a demo tenant/role/user | ||
1080 | 101 | self.demo_tenant = 'demoTenant' | ||
1081 | 102 | self.demo_role = 'demoRole' | ||
1082 | 103 | self.demo_user = 'demoUser' | ||
1083 | 104 | if not u.tenant_exists(self.keystone, self.demo_tenant): | ||
1084 | 105 | tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, | ||
1085 | 106 | description='demo tenant', | ||
1086 | 107 | enabled=True) | ||
1087 | 108 | self.keystone.roles.create(name=self.demo_role) | ||
1088 | 109 | self.keystone.users.create(name=self.demo_user, | ||
1089 | 110 | password='password', | ||
1090 | 111 | tenant_id=tenant.id, | ||
1091 | 112 | email='demo@demo.com') | ||
1092 | 113 | |||
1093 | 114 | # Authenticate demo user with keystone | ||
1094 | 115 | self.keystone_demo = \ | ||
1095 | 116 | u.authenticate_keystone_user(self.keystone, user=self.demo_user, | ||
1096 | 117 | password='password', | ||
1097 | 118 | tenant=self.demo_tenant) | ||
1098 | 119 | |||
1099 | 120 | def test_services(self): | ||
1100 | 121 | """Verify the expected services are running on the corresponding | ||
1101 | 122 | service units.""" | ||
1102 | 123 | swift_storage_services = ['status swift-account', | ||
1103 | 124 | 'status swift-account-auditor', | ||
1104 | 125 | 'status swift-account-reaper', | ||
1105 | 126 | 'status swift-account-replicator', | ||
1106 | 127 | 'status swift-container', | ||
1107 | 128 | 'status swift-container-auditor', | ||
1108 | 129 | 'status swift-container-replicator', | ||
1109 | 130 | 'status swift-container-updater', | ||
1110 | 131 | 'status swift-object', | ||
1111 | 132 | 'status swift-object-auditor', | ||
1112 | 133 | 'status swift-object-replicator', | ||
1113 | 134 | 'status swift-object-updater'] | ||
1114 | 135 | if self._get_openstack_release() >= self.precise_icehouse: | ||
1115 | 136 | swift_storage_services.append('status swift-container-sync') | ||
1116 | 137 | commands = { | ||
1117 | 138 | self.mysql_sentry: ['status mysql'], | ||
1118 | 139 | self.keystone_sentry: ['status keystone'], | ||
1119 | 140 | self.glance_sentry: ['status glance-registry', 'status glance-api'], | ||
1120 | 141 | self.swift_proxy_sentry: ['status swift-proxy'], | ||
1121 | 142 | self.swift_storage_sentry: swift_storage_services | ||
1122 | 143 | } | ||
1123 | 144 | |||
1124 | 145 | ret = u.validate_services(commands) | ||
1125 | 146 | if ret: | ||
1126 | 147 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
1127 | 148 | |||
1128 | 149 | def test_users(self): | ||
1129 | 150 | """Verify all existing roles.""" | ||
1130 | 151 | user1 = {'name': 'demoUser', | ||
1131 | 152 | 'enabled': True, | ||
1132 | 153 | 'tenantId': u.not_null, | ||
1133 | 154 | 'id': u.not_null, | ||
1134 | 155 | 'email': 'demo@demo.com'} | ||
1135 | 156 | user2 = {'name': 'admin', | ||
1136 | 157 | 'enabled': True, | ||
1137 | 158 | 'tenantId': u.not_null, | ||
1138 | 159 | 'id': u.not_null, | ||
1139 | 160 | 'email': 'juju@localhost'} | ||
1140 | 161 | user3 = {'name': 'glance', | ||
1141 | 162 | 'enabled': True, | ||
1142 | 163 | 'tenantId': u.not_null, | ||
1143 | 164 | 'id': u.not_null, | ||
1144 | 165 | 'email': u'juju@localhost'} | ||
1145 | 166 | user4 = {'name': 'swift', | ||
1146 | 167 | 'enabled': True, | ||
1147 | 168 | 'tenantId': u.not_null, | ||
1148 | 169 | 'id': u.not_null, | ||
1149 | 170 | 'email': u'juju@localhost'} | ||
1150 | 171 | expected = [user1, user2, user3, user4] | ||
1151 | 172 | actual = self.keystone.users.list() | ||
1152 | 173 | |||
1153 | 174 | ret = u.validate_user_data(expected, actual) | ||
1154 | 175 | if ret: | ||
1155 | 176 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
1156 | 177 | |||
1157 | 178 | def test_service_catalog(self): | ||
1158 | 179 | """Verify that the service catalog endpoint data is valid.""" | ||
1159 | 180 | endpoint_vol = {'adminURL': u.valid_url, | ||
1160 | 181 | 'region': 'RegionOne', | ||
1161 | 182 | 'publicURL': u.valid_url, | ||
1162 | 183 | 'internalURL': u.valid_url} | ||
1163 | 184 | endpoint_id = {'adminURL': u.valid_url, | ||
1164 | 185 | 'region': 'RegionOne', | ||
1165 | 186 | 'publicURL': u.valid_url, | ||
1166 | 187 | 'internalURL': u.valid_url} | ||
1167 | 188 | if self._get_openstack_release() >= self.precise_folsom: | ||
1168 | 189 | endpoint_vol['id'] = u.not_null | ||
1169 | 190 | endpoint_id['id'] = u.not_null | ||
1170 | 191 | expected = {'image': [endpoint_id], 'object-store': [endpoint_id], | ||
1171 | 192 | 'identity': [endpoint_id]} | ||
1172 | 193 | actual = self.keystone_demo.service_catalog.get_endpoints() | ||
1173 | 194 | |||
1174 | 195 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | ||
1175 | 196 | if ret: | ||
1176 | 197 | amulet.raise_status(amulet.FAIL, msg=ret) | ||
1177 | 198 | |||
1178 | 199 | def test_openstack_object_store_endpoint(self): | ||
1179 | 200 | """Verify the swift object-store endpoint data.""" | ||
1180 | 201 | endpoints = self.keystone.endpoints.list() | ||
1181 | 202 | admin_port = internal_port = public_port = '8080' | ||
1182 | 203 | expected = {'id': u.not_null, | ||
1183 | 204 | 'region': 'RegionOne', | ||
1184 | 205 | 'adminurl': u.valid_url, | ||
1185 | 206 | 'internalurl': u.valid_url, | ||
1186 | 207 | 'publicurl': u.valid_url, | ||
1187 | 208 | 'service_id': u.not_null} | ||
1188 | 209 | |||
1189 | 210 | ret = u.validate_endpoint_data(endpoints, admin_port, internal_port, | ||
1190 | 211 | public_port, expected) | ||
1191 | 212 | if ret: | ||
1192 | 213 | message = 'object-store endpoint: {}'.format(ret) | ||
1193 | 214 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1194 | 215 | |||
1195 | 216 | def test_swift_storage_swift_storage_relation(self): | ||
1196 | 217 | """Verify the swift-storage to swift-proxy swift-storage relation | ||
1197 | 218 | data.""" | ||
1198 | 219 | unit = self.swift_storage_sentry | ||
1199 | 220 | relation = ['swift-storage', 'swift-proxy:swift-storage'] | ||
1200 | 221 | expected = { | ||
1201 | 222 | 'account_port': '6002', | ||
1202 | 223 | 'zone': '1', | ||
1203 | 224 | 'object_port': '6000', | ||
1204 | 225 | 'container_port': '6001', | ||
1205 | 226 | 'private-address': u.valid_ip, | ||
1206 | 227 | 'device': 'vdb' | ||
1207 | 228 | } | ||
1208 | 229 | |||
1209 | 230 | ret = u.validate_relation_data(unit, relation, expected) | ||
1210 | 231 | if ret: | ||
1211 | 232 | message = u.relation_error('swift-storage swift-storage', ret) | ||
1212 | 233 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1213 | 234 | |||
1214 | 235 | def test_swift_proxy_swift_storage_relation(self): | ||
1215 | 236 | """Verify the swift-proxy to swift-storage swift-storage relation | ||
1216 | 237 | data.""" | ||
1217 | 238 | unit = self.swift_proxy_sentry | ||
1218 | 239 | relation = ['swift-storage', 'swift-storage:swift-storage'] | ||
1219 | 240 | expected = { | ||
1220 | 241 | 'private-address': u.valid_ip, | ||
1221 | 242 | 'trigger': u.not_null, | ||
1222 | 243 | 'rings_url': u.valid_url, | ||
1223 | 244 | 'swift_hash': u.not_null | ||
1224 | 245 | } | ||
1225 | 246 | |||
1226 | 247 | ret = u.validate_relation_data(unit, relation, expected) | ||
1227 | 248 | if ret: | ||
1228 | 249 | message = u.relation_error('swift-proxy swift-storage', ret) | ||
1229 | 250 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1230 | 251 | |||
1231 | 252 | def test_restart_on_config_change(self): | ||
1232 | 253 | """Verify that the specified services are restarted when the config | ||
1233 | 254 | is changed.""" | ||
1234 | 255 | # NOTE(coreycb): Skipping failing test on until resolved. This test | ||
1235 | 256 | # fails because the config file's last mod time is | ||
1236 | 257 | # slightly after the process' last mod time. | ||
1237 | 258 | if self._get_openstack_release() >= self.precise_essex: | ||
1238 | 259 | u.log.error("Skipping failing test until resolved") | ||
1239 | 260 | return | ||
1240 | 261 | |||
1241 | 262 | services = {'swift-account-server': 'account-server.conf', | ||
1242 | 263 | 'swift-account-auditor': 'account-server.conf', | ||
1243 | 264 | 'swift-account-reaper': 'account-server.conf', | ||
1244 | 265 | 'swift-account-replicator': 'account-server.conf', | ||
1245 | 266 | 'swift-container-server': 'container-server.conf', | ||
1246 | 267 | 'swift-container-auditor': 'container-server.conf', | ||
1247 | 268 | 'swift-container-replicator': 'container-server.conf', | ||
1248 | 269 | 'swift-container-updater': 'container-server.conf', | ||
1249 | 270 | 'swift-object-server': 'object-server.conf', | ||
1250 | 271 | 'swift-object-auditor': 'object-server.conf', | ||
1251 | 272 | 'swift-object-replicator': 'object-server.conf', | ||
1252 | 273 | 'swift-object-updater': 'object-server.conf'} | ||
1253 | 274 | if self._get_openstack_release() >= self.precise_icehouse: | ||
1254 | 275 | services['swift-container-sync'] = 'container-server.conf' | ||
1255 | 276 | |||
1256 | 277 | self.d.configure('swift-storage', | ||
1257 | 278 | {'object-server-threads-per-disk': '2'}) | ||
1258 | 279 | |||
1259 | 280 | time = 20 | ||
1260 | 281 | for s, conf in services.iteritems(): | ||
1261 | 282 | config = '/etc/swift/{}'.format(conf) | ||
1262 | 283 | if not u.service_restarted(self.swift_storage_sentry, s, config, | ||
1263 | 284 | pgrep_full=True, sleep_time=time): | ||
1264 | 285 | msg = "service {} didn't restart after config change".format(s) | ||
1265 | 286 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1266 | 287 | time = 0 | ||
1267 | 288 | |||
1268 | 289 | self.d.configure('swift-storage', | ||
1269 | 290 | {'object-server-threads-per-disk': '4'}) | ||
1270 | 291 | |||
1271 | 292 | def test_swift_config(self): | ||
1272 | 293 | """Verify the data in the swift-hash section of the swift config | ||
1273 | 294 | file.""" | ||
1274 | 295 | unit = self.swift_storage_sentry | ||
1275 | 296 | conf = '/etc/swift/swift.conf' | ||
1276 | 297 | swift_proxy_relation = self.swift_proxy_sentry.relation('swift-storage', | ||
1277 | 298 | 'swift-storage:swift-storage') | ||
1278 | 299 | expected = { | ||
1279 | 300 | 'swift_hash_path_suffix': swift_proxy_relation['swift_hash'] | ||
1280 | 301 | } | ||
1281 | 302 | |||
1282 | 303 | ret = u.validate_config_data(unit, conf, 'swift-hash', expected) | ||
1283 | 304 | if ret: | ||
1284 | 305 | message = "swift config error: {}".format(ret) | ||
1285 | 306 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1286 | 307 | |||
1287 | 308 | def test_account_server_config(self): | ||
1288 | 309 | """Verify the data in the account server config file.""" | ||
1289 | 310 | unit = self.swift_storage_sentry | ||
1290 | 311 | conf = '/etc/swift/account-server.conf' | ||
1291 | 312 | expected = { | ||
1292 | 313 | 'DEFAULT': { | ||
1293 | 314 | 'bind_ip': '0.0.0.0', | ||
1294 | 315 | 'bind_port': '6002', | ||
1295 | 316 | 'workers': '1' | ||
1296 | 317 | }, | ||
1297 | 318 | 'pipeline:main': { | ||
1298 | 319 | 'pipeline': 'recon account-server' | ||
1299 | 320 | }, | ||
1300 | 321 | 'filter:recon': { | ||
1301 | 322 | 'use': 'egg:swift#recon', | ||
1302 | 323 | 'recon_cache_path': '/var/cache/swift' | ||
1303 | 324 | }, | ||
1304 | 325 | 'app:account-server': { | ||
1305 | 326 | 'use': 'egg:swift#account' | ||
1306 | 327 | } | ||
1307 | 328 | } | ||
1308 | 329 | |||
1309 | 330 | for section, pairs in expected.iteritems(): | ||
1310 | 331 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1311 | 332 | if ret: | ||
1312 | 333 | message = "account server config error: {}".format(ret) | ||
1313 | 334 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1314 | 335 | |||
1315 | 336 | def test_container_server_config(self): | ||
1316 | 337 | """Verify the data in the container server config file.""" | ||
1317 | 338 | unit = self.swift_storage_sentry | ||
1318 | 339 | conf = '/etc/swift/container-server.conf' | ||
1319 | 340 | expected = { | ||
1320 | 341 | 'DEFAULT': { | ||
1321 | 342 | 'bind_ip': '0.0.0.0', | ||
1322 | 343 | 'bind_port': '6001', | ||
1323 | 344 | 'workers': '1' | ||
1324 | 345 | }, | ||
1325 | 346 | 'pipeline:main': { | ||
1326 | 347 | 'pipeline': 'recon container-server' | ||
1327 | 348 | }, | ||
1328 | 349 | 'filter:recon': { | ||
1329 | 350 | 'use': 'egg:swift#recon', | ||
1330 | 351 | 'recon_cache_path': '/var/cache/swift' | ||
1331 | 352 | }, | ||
1332 | 353 | 'app:container-server': { | ||
1333 | 354 | 'use': 'egg:swift#container', | ||
1334 | 355 | 'allow_versions': 'true' | ||
1335 | 356 | } | ||
1336 | 357 | } | ||
1337 | 358 | |||
1338 | 359 | for section, pairs in expected.iteritems(): | ||
1339 | 360 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1340 | 361 | if ret: | ||
1341 | 362 | message = "container server config error: {}".format(ret) | ||
1342 | 363 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1343 | 364 | |||
1344 | 365 | def test_object_server_config(self): | ||
1345 | 366 | """Verify the data in the object server config file.""" | ||
1346 | 367 | unit = self.swift_storage_sentry | ||
1347 | 368 | conf = '/etc/swift/object-server.conf' | ||
1348 | 369 | expected = { | ||
1349 | 370 | 'DEFAULT': { | ||
1350 | 371 | 'bind_ip': '0.0.0.0', | ||
1351 | 372 | 'bind_port': '6000', | ||
1352 | 373 | 'workers': '1' | ||
1353 | 374 | }, | ||
1354 | 375 | 'pipeline:main': { | ||
1355 | 376 | 'pipeline': 'recon object-server' | ||
1356 | 377 | }, | ||
1357 | 378 | 'filter:recon': { | ||
1358 | 379 | 'use': 'egg:swift#recon', | ||
1359 | 380 | 'recon_cache_path': '/var/cache/swift' | ||
1360 | 381 | }, | ||
1361 | 382 | 'app:object-server': { | ||
1362 | 383 | 'use': 'egg:swift#object', | ||
1363 | 384 | 'threads_per_disk': '4' | ||
1364 | 385 | } | ||
1365 | 386 | } | ||
1366 | 387 | |||
1367 | 388 | for section, pairs in expected.iteritems(): | ||
1368 | 389 | ret = u.validate_config_data(unit, conf, section, pairs) | ||
1369 | 390 | if ret: | ||
1370 | 391 | message = "object server config error: {}".format(ret) | ||
1371 | 392 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1372 | 393 | |||
1373 | 394 | def test_image_create(self): | ||
1374 | 395 | """Create an instance in glance, which is backed by swift, and validate | ||
1375 | 396 | that some of the metadata for the image match in glance and swift.""" | ||
1376 | 397 | # NOTE(coreycb): Skipping failing test on folsom until resolved. On | ||
1377 | 398 | # folsom only, uploading an image to glance gets 400 Bad | ||
1378 | 399 | # Request - Error uploading image: (error): [Errno 111] | ||
1379 | 400 | # ECONNREFUSED (HTTP 400) | ||
1380 | 401 | if self._get_openstack_release() == self.precise_folsom: | ||
1381 | 402 | u.log.error("Skipping failing test until resolved") | ||
1382 | 403 | return | ||
1383 | 404 | |||
1384 | 405 | # Create glance image | ||
1385 | 406 | image = u.create_cirros_image(self.glance, "cirros-image") | ||
1386 | 407 | if not image: | ||
1387 | 408 | amulet.raise_status(amulet.FAIL, msg="Image create failed") | ||
1388 | 409 | |||
1389 | 410 | # Validate that cirros image exists in glance and get its checksum/size | ||
1390 | 411 | images = list(self.glance.images.list()) | ||
1391 | 412 | if len(images) != 1: | ||
1392 | 413 | msg = "Expected 1 glance image, found {}".format(len(images)) | ||
1393 | 414 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1394 | 415 | |||
1395 | 416 | if images[0].name != 'cirros-image': | ||
1396 | 417 | message = "cirros image does not exist" | ||
1397 | 418 | amulet.raise_status(amulet.FAIL, msg=message) | ||
1398 | 419 | |||
1399 | 420 | glance_image_md5 = image.checksum | ||
1400 | 421 | glance_image_size = image.size | ||
1401 | 422 | |||
1402 | 423 | # Validate that swift object's checksum/size match that from glance | ||
1403 | 424 | headers, containers = self.swift.get_account() | ||
1404 | 425 | if len(containers) != 1: | ||
1405 | 426 | msg = "Expected 1 swift container, found {}".format(len(containers)) | ||
1406 | 427 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1407 | 428 | |||
1408 | 429 | container_name = containers[0].get('name') | ||
1409 | 430 | |||
1410 | 431 | headers, objects = self.swift.get_container(container_name) | ||
1411 | 432 | if len(objects) != 1: | ||
1412 | 433 | msg = "Expected 1 swift object, found {}".format(len(objects)) | ||
1413 | 434 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1414 | 435 | |||
1415 | 436 | swift_object_size = objects[0].get('bytes') | ||
1416 | 437 | swift_object_md5 = objects[0].get('hash') | ||
1417 | 438 | |||
1418 | 439 | if glance_image_size != swift_object_size: | ||
1419 | 440 | msg = "Glance image size {} != swift object size {}".format( \ | ||
1420 | 441 | glance_image_size, swift_object_size) | ||
1421 | 442 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1422 | 443 | |||
1423 | 444 | if glance_image_md5 != swift_object_md5: | ||
1424 | 445 | msg = "Glance image hash {} != swift object hash {}".format( \ | ||
1425 | 446 | glance_image_md5, swift_object_md5) | ||
1426 | 447 | amulet.raise_status(amulet.FAIL, msg=msg) | ||
1427 | 448 | |||
1428 | 449 | # Cleanup | ||
1429 | 450 | u.delete_image(self.glance, image) | ||
1430 | 0 | 451 | ||
1431 | === added directory 'tests/charmhelpers' | |||
1432 | === added file 'tests/charmhelpers/__init__.py' | |||
1433 | === added directory 'tests/charmhelpers/contrib' | |||
1434 | === added file 'tests/charmhelpers/contrib/__init__.py' | |||
1435 | === added directory 'tests/charmhelpers/contrib/amulet' | |||
1436 | === added file 'tests/charmhelpers/contrib/amulet/__init__.py' | |||
1437 | === added file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
1438 | --- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1439 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-11 16:43:39 +0000 | |||
1440 | @@ -0,0 +1,63 @@ | |||
1441 | 1 | import amulet | ||
1442 | 2 | import re | ||
1443 | 3 | |||
1444 | 4 | |||
1445 | 5 | class AmuletDeployment(object): | ||
1446 | 6 | """This class provides generic Amulet deployment and test runner | ||
1447 | 7 | methods.""" | ||
1448 | 8 | |||
1449 | 9 | def __init__(self, series): | ||
1450 | 10 | """Initialize the deployment environment.""" | ||
1451 | 11 | self.series = series | ||
1452 | 12 | self.d = amulet.Deployment(series=self.series) | ||
1453 | 13 | |||
1454 | 14 | def _get_charm_name(self, service_name): | ||
1455 | 15 | """Gets the charm name from the service name. Unique service names can | ||
1456 | 16 | be specified with a '-service#' suffix (e.g. mysql-service1).""" | ||
1457 | 17 | if re.match(r"^.*-service\d{1,3}$", service_name): | ||
1458 | 18 | charm_name = re.sub('\-service\d{1,3}$', '', service_name) | ||
1459 | 19 | else: | ||
1460 | 20 | charm_name = service_name | ||
1461 | 21 | return charm_name | ||
1462 | 22 | |||
1463 | 23 | def _add_services(self, this_service, other_services): | ||
1464 | 24 | """Add services to the deployment where this_service is the local charm | ||
1465 | 25 | that we're focused on testing and other_services are the other | ||
1466 | 26 | charms that come from the charm store.""" | ||
1467 | 27 | name, units = range(2) | ||
1468 | 28 | |||
1469 | 29 | charm_name = self._get_charm_name(this_service[name]) | ||
1470 | 30 | self.d.add(this_service[name], | ||
1471 | 31 | units=this_service[units]) | ||
1472 | 32 | |||
1473 | 33 | for svc in other_services: | ||
1474 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
1475 | 35 | self.d.add(svc[name], | ||
1476 | 36 | charm='cs:{}/{}'.format(self.series, charm_name), | ||
1477 | 37 | units=svc[units]) | ||
1478 | 38 | |||
1479 | 39 | def _add_relations(self, relations): | ||
1480 | 40 | """Add all of the relations for the services.""" | ||
1481 | 41 | for k, v in relations.iteritems(): | ||
1482 | 42 | self.d.relate(k, v) | ||
1483 | 43 | |||
1484 | 44 | def _configure_services(self, configs): | ||
1485 | 45 | """Configure all of the services.""" | ||
1486 | 46 | for service, config in configs.iteritems(): | ||
1487 | 47 | self.d.configure(service, config) | ||
1488 | 48 | |||
1489 | 49 | def _deploy(self): | ||
1490 | 50 | """Deploy environment and wait for all hooks to finish executing.""" | ||
1491 | 51 | try: | ||
1492 | 52 | self.d.setup() | ||
1493 | 53 | self.d.sentry.wait() | ||
1494 | 54 | except amulet.helpers.TimeoutError: | ||
1495 | 55 | amulet.raise_status(amulet.FAIL, msg="Deployment timed out") | ||
1496 | 56 | except: | ||
1497 | 57 | raise | ||
1498 | 58 | |||
1499 | 59 | def run_tests(self): | ||
1500 | 60 | """Run all of the methods that are prefixed with 'test_'.""" | ||
1501 | 61 | for test in dir(self): | ||
1502 | 62 | if test.startswith('test_'): | ||
1503 | 63 | getattr(self, test)() | ||
1504 | 0 | 64 | ||
1505 | === added file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
1506 | --- tests/charmhelpers/contrib/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
1507 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-11 16:43:39 +0000 | |||
1508 | @@ -0,0 +1,157 @@ | |||
1509 | 1 | import ConfigParser | ||
1510 | 2 | import io | ||
1511 | 3 | import logging | ||
1512 | 4 | import re | ||
1513 | 5 | import sys | ||
1514 | 6 | from time import sleep | ||
1515 | 7 | |||
1516 | 8 | |||
1517 | 9 | class AmuletUtils(object): | ||
1518 | 10 | """This class provides common utility functions that are used by Amulet | ||
1519 | 11 | tests.""" | ||
1520 | 12 | |||
1521 | 13 | def __init__(self, log_level=logging.ERROR): | ||
1522 | 14 | self.log = self.get_logger(level=log_level) | ||
1523 | 15 | |||
1524 | 16 | def get_logger(self, name="amulet-logger", level=logging.DEBUG): | ||
1525 | 17 | """Get a logger object that will log to stdout.""" | ||
1526 | 18 | log = logging | ||
1527 | 19 | logger = log.getLogger(name) | ||
1528 | 20 | fmt = \ | ||
1529 | 21 | log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") | ||
1530 | 22 | |||
1531 | 23 | handler = log.StreamHandler(stream=sys.stdout) | ||
1532 | 24 | handler.setLevel(level) | ||
1533 | 25 | handler.setFormatter(fmt) | ||
1534 | 26 | |||
1535 | 27 | logger.addHandler(handler) | ||
1536 | 28 | logger.setLevel(level) | ||
1537 | 29 | |||
1538 | 30 | return logger | ||
1539 | 31 | |||
1540 | 32 | def valid_ip(self, ip): | ||
1541 | 33 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): | ||
1542 | 34 | return True | ||
1543 | 35 | else: | ||
1544 | 36 | return False | ||
1545 | 37 | |||
1546 | 38 | def valid_url(self, url): | ||
1547 | 39 | p = re.compile( | ||
1548 | 40 | r'^(?:http|ftp)s?://' | ||
1549 | 41 | r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa | ||
1550 | 42 | r'localhost|' | ||
1551 | 43 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' | ||
1552 | 44 | r'(?::\d+)?' | ||
1553 | 45 | r'(?:/?|[/?]\S+)$', | ||
1554 | 46 | re.IGNORECASE) | ||
1555 | 47 | if p.match(url): | ||
1556 | 48 | return True | ||
1557 | 49 | else: | ||
1558 | 50 | return False | ||
1559 | 51 | |||
1560 | 52 | def validate_services(self, commands): | ||
1561 | 53 | """Verify the specified services are running on the corresponding | ||
1562 | 54 | service units.""" | ||
1563 | 55 | for k, v in commands.iteritems(): | ||
1564 | 56 | for cmd in v: | ||
1565 | 57 | output, code = k.run(cmd) | ||
1566 | 58 | if code != 0: | ||
1567 | 59 | return "command `{}` returned {}".format(cmd, str(code)) | ||
1568 | 60 | return None | ||
1569 | 61 | |||
1570 | 62 | def _get_config(self, unit, filename): | ||
1571 | 63 | """Get a ConfigParser object for parsing a unit's config file.""" | ||
1572 | 64 | file_contents = unit.file_contents(filename) | ||
1573 | 65 | config = ConfigParser.ConfigParser() | ||
1574 | 66 | config.readfp(io.StringIO(file_contents)) | ||
1575 | 67 | return config | ||
1576 | 68 | |||
1577 | 69 | def validate_config_data(self, sentry_unit, config_file, section, expected): | ||
1578 | 70 | """Verify that the specified section of the config file contains | ||
1579 | 71 | the expected option key:value pairs.""" | ||
1580 | 72 | config = self._get_config(sentry_unit, config_file) | ||
1581 | 73 | |||
1582 | 74 | if section != 'DEFAULT' and not config.has_section(section): | ||
1583 | 75 | return "section [{}] does not exist".format(section) | ||
1584 | 76 | |||
1585 | 77 | for k in expected.keys(): | ||
1586 | 78 | if not config.has_option(section, k): | ||
1587 | 79 | return "section [{}] is missing option {}".format(section, k) | ||
1588 | 80 | if config.get(section, k) != expected[k]: | ||
1589 | 81 | return "section [{}] {}:{} != expected {}:{}".format(section, | ||
1590 | 82 | k, config.get(section, k), k, expected[k]) | ||
1591 | 83 | return None | ||
1592 | 84 | |||
1593 | 85 | def _validate_dict_data(self, expected, actual): | ||
1594 | 86 | """Compare expected dictionary data vs actual dictionary data. | ||
1595 | 87 | The values in the 'expected' dictionary can be strings, bools, ints, | ||
1596 | 88 | longs, or can be a function that evaluate a variable and returns a | ||
1597 | 89 | bool.""" | ||
1598 | 90 | for k, v in expected.iteritems(): | ||
1599 | 91 | if k in actual: | ||
1600 | 92 | if isinstance(v, basestring) or \ | ||
1601 | 93 | isinstance(v, bool) or \ | ||
1602 | 94 | isinstance(v, (int, long)): | ||
1603 | 95 | if v != actual[k]: | ||
1604 | 96 | return "{}:{}".format(k, actual[k]) | ||
1605 | 97 | elif not v(actual[k]): | ||
1606 | 98 | return "{}:{}".format(k, actual[k]) | ||
1607 | 99 | else: | ||
1608 | 100 | return "key '{}' does not exist".format(k) | ||
1609 | 101 | return None | ||
1610 | 102 | |||
1611 | 103 | def validate_relation_data(self, sentry_unit, relation, expected): | ||
1612 | 104 | """Validate actual relation data based on expected relation data.""" | ||
1613 | 105 | actual = sentry_unit.relation(relation[0], relation[1]) | ||
1614 | 106 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1615 | 107 | return self._validate_dict_data(expected, actual) | ||
1616 | 108 | |||
1617 | 109 | def _validate_list_data(self, expected, actual): | ||
1618 | 110 | """Compare expected list vs actual list data.""" | ||
1619 | 111 | for e in expected: | ||
1620 | 112 | if e not in actual: | ||
1621 | 113 | return "expected item {} not found in actual list".format(e) | ||
1622 | 114 | return None | ||
1623 | 115 | |||
1624 | 116 | def not_null(self, string): | ||
1625 | 117 | if string != None: | ||
1626 | 118 | return True | ||
1627 | 119 | else: | ||
1628 | 120 | return False | ||
1629 | 121 | |||
1630 | 122 | def _get_file_mtime(self, sentry_unit, filename): | ||
1631 | 123 | """Get last modification time of file.""" | ||
1632 | 124 | return sentry_unit.file_stat(filename)['mtime'] | ||
1633 | 125 | |||
1634 | 126 | def _get_dir_mtime(self, sentry_unit, directory): | ||
1635 | 127 | """Get last modification time of directory.""" | ||
1636 | 128 | return sentry_unit.directory_stat(directory)['mtime'] | ||
1637 | 129 | |||
1638 | 130 | def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): | ||
1639 | 131 | """Determine start time of the process based on the last modification | ||
1640 | 132 | time of the /proc/pid directory. If pgrep_full is True, the process | ||
1641 | 133 | name is matched against the full command line.""" | ||
1642 | 134 | if pgrep_full: | ||
1643 | 135 | cmd = 'pgrep -o -f {}'.format(service) | ||
1644 | 136 | else: | ||
1645 | 137 | cmd = 'pgrep -o {}'.format(service) | ||
1646 | 138 | proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) | ||
1647 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | ||
1648 | 140 | |||
1649 | 141 | def service_restarted(self, sentry_unit, service, filename, | ||
1650 | 142 | pgrep_full=False, sleep_time=20): | ||
1651 | 143 | """Compare a service's start time vs a file's last modification time | ||
1652 | 144 | (such as a config file for that service) to determine if the service | ||
1653 | 145 | has been restarted.""" | ||
1654 | 146 | sleep(sleep_time) | ||
1655 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | ||
1656 | 148 | self._get_file_mtime(sentry_unit, filename): | ||
1657 | 149 | return True | ||
1658 | 150 | else: | ||
1659 | 151 | return False | ||
1660 | 152 | |||
1661 | 153 | def relation_error(self, name, data): | ||
1662 | 154 | return 'unexpected relation data in {} - {}'.format(name, data) | ||
1663 | 155 | |||
1664 | 156 | def endpoint_error(self, name, data): | ||
1665 | 157 | return 'unexpected endpoint data in {} - {}'.format(name, data) | ||
1666 | 0 | 158 | ||
1667 | === added directory 'tests/charmhelpers/contrib/openstack' | |||
1668 | === added file 'tests/charmhelpers/contrib/openstack/__init__.py' | |||
1669 | === added directory 'tests/charmhelpers/contrib/openstack/amulet' | |||
1670 | === added file 'tests/charmhelpers/contrib/openstack/amulet/__init__.py' | |||
1671 | === added file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
1672 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000 | |||
1673 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-11 16:43:39 +0000 | |||
1674 | @@ -0,0 +1,57 @@ | |||
1675 | 1 | from charmhelpers.contrib.amulet.deployment import ( | ||
1676 | 2 | AmuletDeployment | ||
1677 | 3 | ) | ||
1678 | 4 | |||
1679 | 5 | |||
1680 | 6 | class OpenStackAmuletDeployment(AmuletDeployment): | ||
1681 | 7 | """This class inherits from AmuletDeployment and has additional support | ||
1682 | 8 | that is specifically for use by OpenStack charms.""" | ||
1683 | 9 | |||
1684 | 10 | def __init__(self, series, openstack=None, source=None): | ||
1685 | 11 | """Initialize the deployment environment.""" | ||
1686 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | ||
1687 | 13 | self.openstack = openstack | ||
1688 | 14 | self.source = source | ||
1689 | 15 | |||
1690 | 16 | def _add_services(self, this_service, other_services): | ||
1691 | 17 | """Add services to the deployment and set openstack-origin.""" | ||
1692 | 18 | super(OpenStackAmuletDeployment, self)._add_services(this_service, | ||
1693 | 19 | other_services) | ||
1694 | 20 | name = 0 | ||
1695 | 21 | services = other_services | ||
1696 | 22 | services.append(this_service) | ||
1697 | 23 | use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] | ||
1698 | 24 | |||
1699 | 25 | if self.openstack: | ||
1700 | 26 | for svc in services: | ||
1701 | 27 | charm_name = self._get_charm_name(svc[name]) | ||
1702 | 28 | if charm_name not in use_source: | ||
1703 | 29 | config = {'openstack-origin': self.openstack} | ||
1704 | 30 | self.d.configure(svc[name], config) | ||
1705 | 31 | |||
1706 | 32 | if self.source: | ||
1707 | 33 | for svc in services: | ||
1708 | 34 | charm_name = self._get_charm_name(svc[name]) | ||
1709 | 35 | if charm_name in use_source: | ||
1710 | 36 | config = {'source': self.source} | ||
1711 | 37 | self.d.configure(svc[name], config) | ||
1712 | 38 | |||
1713 | 39 | def _configure_services(self, configs): | ||
1714 | 40 | """Configure all of the services.""" | ||
1715 | 41 | for service, config in configs.iteritems(): | ||
1716 | 42 | self.d.configure(service, config) | ||
1717 | 43 | |||
1718 | 44 | def _get_openstack_release(self): | ||
1719 | 45 | """Return an integer representing the enum value of the openstack | ||
1720 | 46 | release.""" | ||
1721 | 47 | self.precise_essex, self.precise_folsom, self.precise_grizzly, \ | ||
1722 | 48 | self.precise_havana, self.precise_icehouse, \ | ||
1723 | 49 | self.trusty_icehouse = range(6) | ||
1724 | 50 | releases = { | ||
1725 | 51 | ('precise', None): self.precise_essex, | ||
1726 | 52 | ('precise', 'cloud:precise-folsom'): self.precise_folsom, | ||
1727 | 53 | ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, | ||
1728 | 54 | ('precise', 'cloud:precise-havana'): self.precise_havana, | ||
1729 | 55 | ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, | ||
1730 | 56 | ('trusty', None): self.trusty_icehouse} | ||
1731 | 57 | return releases[(self.series, self.openstack)] | ||
1732 | 0 | 58 | ||
1733 | === added file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
1734 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000 | |||
1735 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-11 16:43:39 +0000 | |||
1736 | @@ -0,0 +1,253 @@ | |||
1737 | 1 | import logging | ||
1738 | 2 | import os | ||
1739 | 3 | import time | ||
1740 | 4 | import urllib | ||
1741 | 5 | |||
1742 | 6 | import glanceclient.v1.client as glance_client | ||
1743 | 7 | import keystoneclient.v2_0 as keystone_client | ||
1744 | 8 | import novaclient.v1_1.client as nova_client | ||
1745 | 9 | |||
1746 | 10 | from charmhelpers.contrib.amulet.utils import ( | ||
1747 | 11 | AmuletUtils | ||
1748 | 12 | ) | ||
1749 | 13 | |||
1750 | 14 | DEBUG = logging.DEBUG | ||
1751 | 15 | ERROR = logging.ERROR | ||
1752 | 16 | |||
1753 | 17 | |||
1754 | 18 | class OpenStackAmuletUtils(AmuletUtils): | ||
1755 | 19 | """This class inherits from AmuletUtils and has additional support | ||
1756 | 20 | that is specifically for use by OpenStack charms.""" | ||
1757 | 21 | |||
1758 | 22 | def __init__(self, log_level=ERROR): | ||
1759 | 23 | """Initialize the deployment environment.""" | ||
1760 | 24 | super(OpenStackAmuletUtils, self).__init__(log_level) | ||
1761 | 25 | |||
1762 | 26 | def validate_endpoint_data(self, endpoints, admin_port, internal_port, | ||
1763 | 27 | public_port, expected): | ||
1764 | 28 | """Validate actual endpoint data vs expected endpoint data. The ports | ||
1765 | 29 | are used to find the matching endpoint.""" | ||
1766 | 30 | found = False | ||
1767 | 31 | for ep in endpoints: | ||
1768 | 32 | self.log.debug('endpoint: {}'.format(repr(ep))) | ||
1769 | 33 | if admin_port in ep.adminurl and internal_port in ep.internalurl \ | ||
1770 | 34 | and public_port in ep.publicurl: | ||
1771 | 35 | found = True | ||
1772 | 36 | actual = {'id': ep.id, | ||
1773 | 37 | 'region': ep.region, | ||
1774 | 38 | 'adminurl': ep.adminurl, | ||
1775 | 39 | 'internalurl': ep.internalurl, | ||
1776 | 40 | 'publicurl': ep.publicurl, | ||
1777 | 41 | 'service_id': ep.service_id} | ||
1778 | 42 | ret = self._validate_dict_data(expected, actual) | ||
1779 | 43 | if ret: | ||
1780 | 44 | return 'unexpected endpoint data - {}'.format(ret) | ||
1781 | 45 | |||
1782 | 46 | if not found: | ||
1783 | 47 | return 'endpoint not found' | ||
1784 | 48 | |||
1785 | 49 | def validate_svc_catalog_endpoint_data(self, expected, actual): | ||
1786 | 50 | """Validate a list of actual service catalog endpoints vs a list of | ||
1787 | 51 | expected service catalog endpoints.""" | ||
1788 | 52 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1789 | 53 | for k, v in expected.iteritems(): | ||
1790 | 54 | if k in actual: | ||
1791 | 55 | ret = self._validate_dict_data(expected[k][0], actual[k][0]) | ||
1792 | 56 | if ret: | ||
1793 | 57 | return self.endpoint_error(k, ret) | ||
1794 | 58 | else: | ||
1795 | 59 | return "endpoint {} does not exist".format(k) | ||
1796 | 60 | return ret | ||
1797 | 61 | |||
1798 | 62 | def validate_tenant_data(self, expected, actual): | ||
1799 | 63 | """Validate a list of actual tenant data vs list of expected tenant | ||
1800 | 64 | data.""" | ||
1801 | 65 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1802 | 66 | for e in expected: | ||
1803 | 67 | found = False | ||
1804 | 68 | for act in actual: | ||
1805 | 69 | a = {'enabled': act.enabled, 'description': act.description, | ||
1806 | 70 | 'name': act.name, 'id': act.id} | ||
1807 | 71 | if e['name'] == a['name']: | ||
1808 | 72 | found = True | ||
1809 | 73 | ret = self._validate_dict_data(e, a) | ||
1810 | 74 | if ret: | ||
1811 | 75 | return "unexpected tenant data - {}".format(ret) | ||
1812 | 76 | if not found: | ||
1813 | 77 | return "tenant {} does not exist".format(e['name']) | ||
1814 | 78 | return ret | ||
1815 | 79 | |||
1816 | 80 | def validate_role_data(self, expected, actual): | ||
1817 | 81 | """Validate a list of actual role data vs a list of expected role | ||
1818 | 82 | data.""" | ||
1819 | 83 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1820 | 84 | for e in expected: | ||
1821 | 85 | found = False | ||
1822 | 86 | for act in actual: | ||
1823 | 87 | a = {'name': act.name, 'id': act.id} | ||
1824 | 88 | if e['name'] == a['name']: | ||
1825 | 89 | found = True | ||
1826 | 90 | ret = self._validate_dict_data(e, a) | ||
1827 | 91 | if ret: | ||
1828 | 92 | return "unexpected role data - {}".format(ret) | ||
1829 | 93 | if not found: | ||
1830 | 94 | return "role {} does not exist".format(e['name']) | ||
1831 | 95 | return ret | ||
1832 | 96 | |||
1833 | 97 | def validate_user_data(self, expected, actual): | ||
1834 | 98 | """Validate a list of actual user data vs a list of expected user | ||
1835 | 99 | data.""" | ||
1836 | 100 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1837 | 101 | for e in expected: | ||
1838 | 102 | found = False | ||
1839 | 103 | for act in actual: | ||
1840 | 104 | a = {'enabled': act.enabled, 'name': act.name, | ||
1841 | 105 | 'email': act.email, 'tenantId': act.tenantId, | ||
1842 | 106 | 'id': act.id} | ||
1843 | 107 | if e['name'] == a['name']: | ||
1844 | 108 | found = True | ||
1845 | 109 | ret = self._validate_dict_data(e, a) | ||
1846 | 110 | if ret: | ||
1847 | 111 | return "unexpected user data - {}".format(ret) | ||
1848 | 112 | if not found: | ||
1849 | 113 | return "user {} does not exist".format(e['name']) | ||
1850 | 114 | return ret | ||
1851 | 115 | |||
1852 | 116 | def validate_flavor_data(self, expected, actual): | ||
1853 | 117 | """Validate a list of actual flavors vs a list of expected flavors.""" | ||
1854 | 118 | self.log.debug('actual: {}'.format(repr(actual))) | ||
1855 | 119 | act = [a.name for a in actual] | ||
1856 | 120 | return self._validate_list_data(expected, act) | ||
1857 | 121 | |||
1858 | 122 | def tenant_exists(self, keystone, tenant): | ||
1859 | 123 | """Return True if tenant exists""" | ||
1860 | 124 | return tenant in [t.name for t in keystone.tenants.list()] | ||
1861 | 125 | |||
1862 | 126 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | ||
1863 | 127 | tenant): | ||
1864 | 128 | """Authenticates admin user with the keystone admin endpoint.""" | ||
1865 | 129 | service_ip = \ | ||
1866 | 130 | keystone_sentry.relation('shared-db', | ||
1867 | 131 | 'mysql:shared-db')['private-address'] | ||
1868 | 132 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | ||
1869 | 133 | return keystone_client.Client(username=user, password=password, | ||
1870 | 134 | tenant_name=tenant, auth_url=ep) | ||
1871 | 135 | |||
1872 | 136 | def authenticate_keystone_user(self, keystone, user, password, tenant): | ||
1873 | 137 | """Authenticates a regular user with the keystone public endpoint.""" | ||
1874 | 138 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1875 | 139 | endpoint_type='publicURL') | ||
1876 | 140 | return keystone_client.Client(username=user, password=password, | ||
1877 | 141 | tenant_name=tenant, auth_url=ep) | ||
1878 | 142 | |||
1879 | 143 | def authenticate_glance_admin(self, keystone): | ||
1880 | 144 | """Authenticates admin user with glance.""" | ||
1881 | 145 | ep = keystone.service_catalog.url_for(service_type='image', | ||
1882 | 146 | endpoint_type='adminURL') | ||
1883 | 147 | return glance_client.Client(ep, token=keystone.auth_token) | ||
1884 | 148 | |||
1885 | 149 | def authenticate_nova_user(self, keystone, user, password, tenant): | ||
1886 | 150 | """Authenticates a regular user with nova-api.""" | ||
1887 | 151 | ep = keystone.service_catalog.url_for(service_type='identity', | ||
1888 | 152 | endpoint_type='publicURL') | ||
1889 | 153 | return nova_client.Client(username=user, api_key=password, | ||
1890 | 154 | project_id=tenant, auth_url=ep) | ||
1891 | 155 | |||
1892 | 156 | def create_cirros_image(self, glance, image_name): | ||
1893 | 157 | """Download the latest cirros image and upload it to glance.""" | ||
1894 | 158 | http_proxy = os.getenv('AMULET_HTTP_PROXY') | ||
1895 | 159 | self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) | ||
1896 | 160 | if http_proxy: | ||
1897 | 161 | proxies = {'http': http_proxy} | ||
1898 | 162 | opener = urllib.FancyURLopener(proxies) | ||
1899 | 163 | else: | ||
1900 | 164 | opener = urllib.FancyURLopener() | ||
1901 | 165 | |||
1902 | 166 | f = opener.open("http://download.cirros-cloud.net/version/released") | ||
1903 | 167 | version = f.read().strip() | ||
1904 | 168 | cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) | ||
1905 | 169 | |||
1906 | 170 | if not os.path.exists(cirros_img): | ||
1907 | 171 | cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", | ||
1908 | 172 | version, cirros_img) | ||
1909 | 173 | opener.retrieve(cirros_url, cirros_img) | ||
1910 | 174 | f.close() | ||
1911 | 175 | |||
1912 | 176 | with open(cirros_img) as f: | ||
1913 | 177 | image = glance.images.create(name=image_name, is_public=True, | ||
1914 | 178 | disk_format='qcow2', | ||
1915 | 179 | container_format='bare', data=f) | ||
1916 | 180 | count = 1 | ||
1917 | 181 | status = image.status | ||
1918 | 182 | while status != 'active' and count < 10: | ||
1919 | 183 | time.sleep(3) | ||
1920 | 184 | image = glance.images.get(image.id) | ||
1921 | 185 | status = image.status | ||
1922 | 186 | self.log.debug('image status: {}'.format(status)) | ||
1923 | 187 | count += 1 | ||
1924 | 188 | |||
1925 | 189 | if status != 'active': | ||
1926 | 190 | self.log.error('image creation timed out') | ||
1927 | 191 | return None | ||
1928 | 192 | |||
1929 | 193 | return image | ||
1930 | 194 | |||
1931 | 195 | def delete_image(self, glance, image): | ||
1932 | 196 | """Delete the specified image.""" | ||
1933 | 197 | num_before = len(list(glance.images.list())) | ||
1934 | 198 | glance.images.delete(image) | ||
1935 | 199 | |||
1936 | 200 | count = 1 | ||
1937 | 201 | num_after = len(list(glance.images.list())) | ||
1938 | 202 | while num_after != (num_before - 1) and count < 10: | ||
1939 | 203 | time.sleep(3) | ||
1940 | 204 | num_after = len(list(glance.images.list())) | ||
1941 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
1942 | 206 | count += 1 | ||
1943 | 207 | |||
1944 | 208 | if num_after != (num_before - 1): | ||
1945 | 209 | self.log.error('image deletion timed out') | ||
1946 | 210 | return False | ||
1947 | 211 | |||
1948 | 212 | return True | ||
1949 | 213 | |||
1950 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | ||
1951 | 215 | """Create the specified instance.""" | ||
1952 | 216 | image = nova.images.find(name=image_name) | ||
1953 | 217 | flavor = nova.flavors.find(name=flavor) | ||
1954 | 218 | instance = nova.servers.create(name=instance_name, image=image, | ||
1955 | 219 | flavor=flavor) | ||
1956 | 220 | |||
1957 | 221 | count = 1 | ||
1958 | 222 | status = instance.status | ||
1959 | 223 | while status != 'ACTIVE' and count < 60: | ||
1960 | 224 | time.sleep(3) | ||
1961 | 225 | instance = nova.servers.get(instance.id) | ||
1962 | 226 | status = instance.status | ||
1963 | 227 | self.log.debug('instance status: {}'.format(status)) | ||
1964 | 228 | count += 1 | ||
1965 | 229 | |||
1966 | 230 | if status != 'ACTIVE': | ||
1967 | 231 | self.log.error('instance creation timed out') | ||
1968 | 232 | return None | ||
1969 | 233 | |||
1970 | 234 | return instance | ||
1971 | 235 | |||
1972 | 236 | def delete_instance(self, nova, instance): | ||
1973 | 237 | """Delete the specified instance.""" | ||
1974 | 238 | num_before = len(list(nova.servers.list())) | ||
1975 | 239 | nova.servers.delete(instance) | ||
1976 | 240 | |||
1977 | 241 | count = 1 | ||
1978 | 242 | num_after = len(list(nova.servers.list())) | ||
1979 | 243 | while num_after != (num_before - 1) and count < 10: | ||
1980 | 244 | time.sleep(3) | ||
1981 | 245 | num_after = len(list(nova.servers.list())) | ||
1982 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
1983 | 247 | count += 1 | ||
1984 | 248 | |||
1985 | 249 | if num_after != (num_before - 1): | ||
1986 | 250 | self.log.error('instance deletion timed out') | ||
1987 | 251 | return False | ||
1988 | 252 | |||
1989 | 253 | return True |