Merge lp:~openstack-charmers/charms/trusty/keystone/keystonev3 into lp:~openstack-charmers-archive/charms/trusty/keystone/next
- Trusty Tahr (14.04)
- keystonev3
- Merge into next
Status: | Work in progress |
---|---|
Proposed branch: | lp:~openstack-charmers/charms/trusty/keystone/keystonev3 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/keystone/next |
Diff against target: |
3551 lines (+2443/-211) (has conflicts) 20 files modified
charm-helpers-tests.yaml (+2/-0) charmhelpers/contrib/openstack/neutron.py (+13/-0) charmhelpers/contrib/storage/linux/ceph.py (+38/-12) config.yaml (+6/-0) hooks/keystone_context.py (+8/-2) hooks/keystone_hooks.py (+6/-0) hooks/keystone_utils.py (+254/-98) hooks/manager.py (+183/-3) templates/liberty/policy.json (+382/-0) templates/liberty/policy.json.v2 (+184/-0) tests/basic_deployment.py (+208/-44) tests/charmhelpers/contrib/openstack/amulet/utils.py (+34/-11) tests/charmhelpers/core/__init__.py (+15/-0) tests/charmhelpers/core/decorators.py (+57/-0) tests/charmhelpers/core/hookenv.py (+978/-0) unit_tests/test_actions.py (+14/-7) unit_tests/test_actions_git_reinstall.py (+2/-1) unit_tests/test_actions_openstack_upgrade.py (+9/-4) unit_tests/test_keystone_hooks.py (+14/-5) unit_tests/test_keystone_utils.py (+36/-24) Text conflict in charmhelpers/contrib/openstack/neutron.py Text conflict in hooks/keystone_utils.py |
To merge this branch: | bzr merge lp:~openstack-charmers/charms/trusty/keystone/keystonev3 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+285694@code.launchpad.net |
Commit message
Description of the change
- 188. By Liam Young
-
Update unit tests to mock new method used for accessing keystone manager
- 189. By Liam Young
-
Lint fixes and unit tests for manager
- 190. By Liam Young
-
Fix charm-helper src
- 191. By Liam Young
-
Fix readme
- 192. By Liam Young
-
Update charm to render policy.json which is needed to enforce keystone v3 features like domains
- 193. By Liam Young
-
Fix getting domain info when keystone maybe offline
- 194. By Liam Young
-
Setup cloud_admin user
- 195. By Liam Young
-
v2 fixes
- 196. By Liam Young
-
Fix endpoint generation
- 197. By Liam Young
-
Transitioning to dynamic keystone ep api version discovery
- 198. By Liam Young
-
Lint tidy
- 199. By Liam Young
-
Update amulet tests
- 200. By Liam Young
-
More amulet updates
- 201. By Liam Young
-
Get v3 tests on openstack release > liberty
- 202. By Liam Young
-
POLICY_JSON is only managed for liberty and above so don't try and write it for earlier releases
- 203. By Liam Young
-
More amulet fixes
- 204. By Liam Young
-
Fixes for pure v3 deploy
- 205. By Liam Young
-
More fixes
- 206. By Liam Young
-
More fixes
- 207. By Liam Young
-
Catch exception when keystone is down
- 208. By Liam Young
-
Import problems
- 209. By Liam Young
-
Fix api v2
- 210. By Liam Young
-
Fix lint
- 211. By Liam Young
-
Fix typo
- 212. By Liam Young
-
Remove cloud_admin user
- 213. By Liam Young
-
Remove cloud_admin user from tests
- 214. By Liam Young
-
Need to specifiy domain for user lookups with v3
- 215. By Liam Young
-
Role needs to created after user or grant fails
- 216. By Liam Young
-
Fix unit_tests
Unmerged revisions
- 216. By Liam Young
-
Fix unit_tests
- 215. By Liam Young
-
Role needs to created after user or grant fails
- 214. By Liam Young
-
Need to specifiy domain for user lookups with v3
- 213. By Liam Young
-
Remove cloud_admin user from tests
- 212. By Liam Young
-
Remove cloud_admin user
- 211. By Liam Young
-
Fix typo
- 210. By Liam Young
-
Fix lint
- 209. By Liam Young
-
Fix api v2
- 208. By Liam Young
-
Import problems
- 207. By Liam Young
-
Catch exception when keystone is down
Preview Diff
1 | === modified file 'charm-helpers-tests.yaml' |
2 | --- charm-helpers-tests.yaml 2016-02-19 14:47:52 +0000 |
3 | +++ charm-helpers-tests.yaml 2016-03-05 15:43:48 +0000 |
4 | @@ -3,3 +3,5 @@ |
5 | include: |
6 | - contrib.amulet |
7 | - contrib.openstack.amulet |
8 | + - core.hookenv |
9 | + - core.decorators |
10 | |
11 | === modified file 'charmhelpers/contrib/openstack/neutron.py' |
12 | --- charmhelpers/contrib/openstack/neutron.py 2016-02-19 14:49:59 +0000 |
13 | +++ charmhelpers/contrib/openstack/neutron.py 2016-03-05 15:43:48 +0000 |
14 | @@ -233,6 +233,7 @@ |
15 | 'neutron-plugin-ml2'] |
16 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards |
17 | plugins['nvp'] = plugins['nsx'] |
18 | +<<<<<<< TREE |
19 | if release >= 'kilo': |
20 | plugins['midonet']['driver'] = ( |
21 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') |
22 | @@ -245,6 +246,18 @@ |
23 | 'python-neutron-plugin-midonet') |
24 | plugins['midonet']['server_packages'].append( |
25 | 'python-networking-midonet') |
26 | +======= |
27 | + if release >= 'kilo': |
28 | + plugins['midonet']['driver'] = ( |
29 | + 'neutron.plugins.midonet.plugin.MidonetPluginV2') |
30 | + if release >= 'liberty': |
31 | + plugins['midonet']['driver'] = ( |
32 | + 'midonet.neutron.plugin_v1.MidonetPluginV2') |
33 | + plugins['midonet']['server_packages'].remove( |
34 | + 'python-neutron-plugin-midonet') |
35 | + plugins['midonet']['server_packages'].append( |
36 | + 'python-networking-midonet') |
37 | +>>>>>>> MERGE-SOURCE |
38 | return plugins |
39 | |
40 | |
41 | |
42 | === modified file 'charmhelpers/contrib/storage/linux/ceph.py' |
43 | --- charmhelpers/contrib/storage/linux/ceph.py 2016-01-04 21:27:51 +0000 |
44 | +++ charmhelpers/contrib/storage/linux/ceph.py 2016-03-05 15:43:48 +0000 |
45 | @@ -120,6 +120,7 @@ |
46 | """ |
47 | A custom error to inform the caller that a pool creation failed. Provides an error message |
48 | """ |
49 | + |
50 | def __init__(self, message): |
51 | super(PoolCreationError, self).__init__(message) |
52 | |
53 | @@ -129,6 +130,7 @@ |
54 | An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. |
55 | Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). |
56 | """ |
57 | + |
58 | def __init__(self, service, name): |
59 | self.service = service |
60 | self.name = name |
61 | @@ -180,36 +182,41 @@ |
62 | :return: int. The number of pgs to use. |
63 | """ |
64 | validator(value=pool_size, valid_type=int) |
65 | - osds = get_osds(self.service) |
66 | - if not osds: |
67 | + osd_list = get_osds(self.service) |
68 | + if not osd_list: |
69 | # NOTE(james-page): Default to 200 for older ceph versions |
70 | # which don't support OSD query from cli |
71 | return 200 |
72 | |
73 | + osd_list_length = len(osd_list) |
74 | # Calculate based on Ceph best practices |
75 | - if osds < 5: |
76 | + if osd_list_length < 5: |
77 | return 128 |
78 | - elif 5 < osds < 10: |
79 | + elif 5 < osd_list_length < 10: |
80 | return 512 |
81 | - elif 10 < osds < 50: |
82 | + elif 10 < osd_list_length < 50: |
83 | return 4096 |
84 | else: |
85 | - estimate = (osds * 100) / pool_size |
86 | + estimate = (osd_list_length * 100) / pool_size |
87 | # Return the next nearest power of 2 |
88 | index = bisect.bisect_right(powers_of_two, estimate) |
89 | return powers_of_two[index] |
90 | |
91 | |
92 | class ReplicatedPool(Pool): |
93 | - def __init__(self, service, name, replicas=2): |
94 | + def __init__(self, service, name, pg_num=None, replicas=2): |
95 | super(ReplicatedPool, self).__init__(service=service, name=name) |
96 | self.replicas = replicas |
97 | + if pg_num is None: |
98 | + self.pg_num = self.get_pgs(self.replicas) |
99 | + else: |
100 | + self.pg_num = pg_num |
101 | |
102 | def create(self): |
103 | if not pool_exists(self.service, self.name): |
104 | # Create it |
105 | - pgs = self.get_pgs(self.replicas) |
106 | - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] |
107 | + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', |
108 | + self.name, str(self.pg_num)] |
109 | try: |
110 | check_call(cmd) |
111 | except CalledProcessError: |
112 | @@ -241,7 +248,7 @@ |
113 | |
114 | pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) |
115 | # Create it |
116 | - cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), |
117 | + cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), |
118 | 'erasure', self.erasure_code_profile] |
119 | try: |
120 | check_call(cmd) |
121 | @@ -322,7 +329,8 @@ |
122 | :return: None. Can raise CalledProcessError |
123 | """ |
124 | # Set a byte quota on a RADOS pool in ceph. |
125 | - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] |
126 | + cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, |
127 | + 'max_bytes', str(max_bytes)] |
128 | try: |
129 | check_call(cmd) |
130 | except CalledProcessError: |
131 | @@ -343,7 +351,25 @@ |
132 | raise |
133 | |
134 | |
135 | -def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', |
136 | +def remove_erasure_profile(service, profile_name): |
137 | + """ |
138 | + Create a new erasure code profile if one does not already exist for it. Updates |
139 | + the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ |
140 | + for more details |
141 | + :param service: six.string_types. The Ceph user name to run the command under |
142 | + :param profile_name: six.string_types |
143 | + :return: None. Can raise CalledProcessError |
144 | + """ |
145 | + cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', |
146 | + profile_name] |
147 | + try: |
148 | + check_call(cmd) |
149 | + except CalledProcessError: |
150 | + raise |
151 | + |
152 | + |
153 | +def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', |
154 | + failure_domain='host', |
155 | data_chunks=2, coding_chunks=1, |
156 | locality=None, durability_estimator=None): |
157 | """ |
158 | |
159 | === modified file 'config.yaml' |
160 | --- config.yaml 2016-02-18 09:59:57 +0000 |
161 | +++ config.yaml 2016-03-05 15:43:48 +0000 |
162 | @@ -298,6 +298,12 @@ |
163 | description: | |
164 | A comma-separated list of nagios servicegroups. |
165 | If left empty, the nagios_context will be used as the servicegroup |
166 | + preferred-api-version: |
167 | + default: 2 |
168 | + type: int |
169 | + description: | |
170 | + Use this keystone api version for keystone endpoints and advertise this |
171 | + version to identity client charms |
172 | action-managed-upgrade: |
173 | type: boolean |
174 | default: False |
175 | |
176 | === modified file 'hooks/keystone_context.py' |
177 | --- hooks/keystone_context.py 2016-02-18 09:59:57 +0000 |
178 | +++ hooks/keystone_context.py 2016-03-05 15:43:48 +0000 |
179 | @@ -190,9 +190,15 @@ |
180 | from keystone_utils import ( |
181 | api_port, set_admin_token, endpoint_url, resolve_address, |
182 | PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, |
183 | + get_admin_domain_id |
184 | ) |
185 | ctxt = {} |
186 | ctxt['token'] = set_admin_token(config('admin-token')) |
187 | + ctxt['api_version'] = int(config('preferred-api-version')) |
188 | + ctxt['admin_role'] = config('admin-role') |
189 | + if ctxt['api_version'] > 2: |
190 | + ctxt['admin_domain_id'] = \ |
191 | + get_admin_domain_id() or 'admin_domain_id' |
192 | ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), |
193 | singlenode_mode=True) |
194 | ctxt['public_port'] = determine_api_port(api_port('keystone-public'), |
195 | @@ -233,10 +239,10 @@ |
196 | # correct auth URL. |
197 | ctxt['public_endpoint'] = endpoint_url( |
198 | resolve_address(PUBLIC), |
199 | - api_port('keystone-public')).rstrip('v2.0') |
200 | + api_port('keystone-public')).replace('v2.0', '') |
201 | ctxt['admin_endpoint'] = endpoint_url( |
202 | resolve_address(ADMIN), |
203 | - api_port('keystone-admin')).rstrip('v2.0') |
204 | + api_port('keystone-admin')).replace('v2.0', '') |
205 | |
206 | return ctxt |
207 | |
208 | |
209 | === modified file 'hooks/keystone_hooks.py' |
210 | --- hooks/keystone_hooks.py 2016-01-19 16:54:03 +0000 |
211 | +++ hooks/keystone_hooks.py 2016-03-05 15:43:48 +0000 |
212 | @@ -47,6 +47,7 @@ |
213 | git_install_requested, |
214 | openstack_upgrade_available, |
215 | sync_db_with_multi_ipv6_addresses, |
216 | + os_release, |
217 | ) |
218 | |
219 | from keystone_utils import ( |
220 | @@ -64,6 +65,7 @@ |
221 | services, |
222 | CLUSTER_RES, |
223 | KEYSTONE_CONF, |
224 | + POLICY_JSON, |
225 | SSH_USER, |
226 | setup_ipv6, |
227 | send_notifications, |
228 | @@ -309,6 +311,8 @@ |
229 | else: |
230 | CONFIGS.write(KEYSTONE_CONF) |
231 | leader_init_db_if_ready(use_current_context=True) |
232 | + if os_release('keystone-common') >= 'liberty': |
233 | + CONFIGS.write(POLICY_JSON) |
234 | |
235 | |
236 | @hooks.hook('pgsql-db-relation-changed') |
237 | @@ -320,6 +324,8 @@ |
238 | else: |
239 | CONFIGS.write(KEYSTONE_CONF) |
240 | leader_init_db_if_ready(use_current_context=True) |
241 | + if os_release('keystone-common') >= 'liberty': |
242 | + CONFIGS.write(POLICY_JSON) |
243 | |
244 | |
245 | @hooks.hook('identity-service-relation-changed') |
246 | |
247 | === modified file 'hooks/keystone_utils.py' |
248 | --- hooks/keystone_utils.py 2016-02-19 14:49:59 +0000 |
249 | +++ hooks/keystone_utils.py 2016-03-05 15:43:48 +0000 |
250 | @@ -166,6 +166,7 @@ |
251 | KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF) |
252 | STORED_PASSWD = "/var/lib/keystone/keystone.passwd" |
253 | STORED_TOKEN = "/var/lib/keystone/keystone.token" |
254 | +STORED_ADMIN_DOMAIN_ID = "/var/lib/keystone/keystone.admin_domain_id" |
255 | SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd' |
256 | |
257 | HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
258 | @@ -184,6 +185,10 @@ |
259 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
260 | SSL_SYNC_SEMAPHORE = threading.Semaphore() |
261 | SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] |
262 | +ADMIN_DOMAIN = 'admin_domain' |
263 | +DEFAULT_DOMAIN = 'Default' |
264 | +POLICY_JSON = '/etc/keystone/policy.json' |
265 | + |
266 | BASE_RESOURCE_MAP = OrderedDict([ |
267 | (KEYSTONE_CONF, { |
268 | 'services': BASE_SERVICES, |
269 | @@ -212,6 +217,10 @@ |
270 | 'contexts': [keystone_context.ApacheSSLContext()], |
271 | 'services': ['apache2'], |
272 | }), |
273 | + (POLICY_JSON, { |
274 | + 'contexts': [keystone_context.KeystoneContext()], |
275 | + 'services': BASE_SERVICES, |
276 | + }), |
277 | ]) |
278 | |
279 | valid_services = { |
280 | @@ -329,6 +338,8 @@ |
281 | """ |
282 | resource_map = deepcopy(BASE_RESOURCE_MAP) |
283 | |
284 | + if os_release('keystone') < 'liberty': |
285 | + resource_map.pop(POLICY_JSON) |
286 | if os.path.exists('/etc/apache2/conf-available'): |
287 | resource_map.pop(APACHE_CONF) |
288 | else: |
289 | @@ -452,18 +463,30 @@ |
290 | # OLD |
291 | |
292 | |
293 | -def get_local_endpoint(): |
294 | +def get_api_suffix(): |
295 | + if get_api_version() == 2: |
296 | + api_suffix = 'v2.0' |
297 | + else: |
298 | + api_suffix = 'v3' |
299 | + return api_suffix |
300 | + |
301 | + |
302 | +def get_local_endpoint(api_suffix=None): |
303 | """Returns the URL for the local end-point bypassing haproxy/ssl""" |
304 | + if not api_suffix: |
305 | + api_suffix = get_api_suffix() |
306 | if config('prefer-ipv6'): |
307 | ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
308 | - endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr |
309 | + endpoint_url = 'http://[%s]:{}/{}/' % ipv6_addr |
310 | local_endpoint = endpoint_url.format( |
311 | determine_api_port(api_port('keystone-admin'), |
312 | - singlenode_mode=True)) |
313 | + singlenode_mode=True), |
314 | + api_suffix) |
315 | else: |
316 | - local_endpoint = 'http://localhost:{}/v2.0/'.format( |
317 | + local_endpoint = 'http://localhost:{}/{}/'.format( |
318 | determine_api_port(api_port('keystone-admin'), |
319 | - singlenode_mode=True)) |
320 | + singlenode_mode=True), |
321 | + api_suffix) |
322 | |
323 | return local_endpoint |
324 | |
325 | @@ -506,18 +529,14 @@ |
326 | |
327 | |
328 | def is_service_present(service_name, service_type): |
329 | - import manager |
330 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
331 | - token=get_admin_token()) |
332 | + manager = get_manager() |
333 | service_id = manager.resolve_service_id(service_name, service_type) |
334 | return service_id is not None |
335 | |
336 | |
337 | def delete_service_entry(service_name, service_type): |
338 | """ Delete a service from keystone""" |
339 | - import manager |
340 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
341 | - token=get_admin_token()) |
342 | + manager = get_manager() |
343 | service_id = manager.resolve_service_id(service_name, service_type) |
344 | if service_id: |
345 | manager.api.services.delete(service_id) |
346 | @@ -526,28 +545,34 @@ |
347 | |
348 | def create_service_entry(service_name, service_type, service_desc, owner=None): |
349 | """ Add a new service entry to keystone if one does not already exist """ |
350 | - import manager |
351 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
352 | - token=get_admin_token()) |
353 | + manager = get_manager() |
354 | for service in [s._info for s in manager.api.services.list()]: |
355 | if service['name'] == service_name: |
356 | log("Service entry for '%s' already exists." % service_name, |
357 | level=DEBUG) |
358 | return |
359 | |
360 | - manager.api.services.create(name=service_name, |
361 | - service_type=service_type, |
362 | + manager.api.services.create(service_name, |
363 | + service_type, |
364 | description=service_desc) |
365 | log("Created new service entry '%s'" % service_name, level=DEBUG) |
366 | |
367 | |
368 | def create_endpoint_template(region, service, publicurl, adminurl, |
369 | internalurl): |
370 | + manager = get_manager() |
371 | + if manager.api_version == 2: |
372 | + create_endpoint_template_v2(manager, region, service, publicurl, |
373 | + adminurl, internalurl) |
374 | + else: |
375 | + create_endpoint_template_v3(manager, region, service, publicurl, |
376 | + adminurl, internalurl) |
377 | + |
378 | + |
379 | +def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, |
380 | + internalurl): |
381 | """ Create a new endpoint template for service if one does not already |
382 | exist matching name *and* region """ |
383 | - import manager |
384 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
385 | - token=get_admin_token()) |
386 | service_id = manager.resolve_service_id(service) |
387 | for ep in [e._info for e in manager.api.endpoints.list()]: |
388 | if ep['service_id'] == service_id and ep['region'] == region: |
389 | @@ -566,34 +591,91 @@ |
390 | log("Updating endpoint template with new endpoint urls.") |
391 | manager.api.endpoints.delete(ep['id']) |
392 | |
393 | - manager.api.endpoints.create(region=region, |
394 | - service_id=service_id, |
395 | - publicurl=publicurl, |
396 | - adminurl=adminurl, |
397 | - internalurl=internalurl) |
398 | + manager.create_endpoints(region=region, |
399 | + service_id=service_id, |
400 | + publicurl=publicurl, |
401 | + adminurl=adminurl, |
402 | + internalurl=internalurl) |
403 | log("Created new endpoint template for '%s' in '%s'" % (region, service), |
404 | level=DEBUG) |
405 | |
406 | |
407 | +def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, |
408 | + internalurl): |
409 | + service_id = manager.resolve_service_id(service) |
410 | + endpoints = { |
411 | + 'public': publicurl, |
412 | + 'admin': adminurl, |
413 | + 'internal': internalurl, |
414 | + } |
415 | + for ep_type in endpoints.keys(): |
416 | + # Delete endpoint if its has changed |
417 | + ep_deleted = manager.delete_old_endpoint_v3( |
418 | + ep_type, |
419 | + service_id, |
420 | + region, |
421 | + endpoints[ep_type] |
422 | + ) |
423 | + ep_exists = manager.find_endpoint_v3( |
424 | + ep_type, |
425 | + service_id, |
426 | + region |
427 | + ) |
428 | + if ep_deleted or not ep_exists: |
429 | + manager.api.endpoints.create( |
430 | + service_id, |
431 | + endpoints[ep_type], |
432 | + interface=ep_type, |
433 | + region=region |
434 | + ) |
435 | + |
436 | + |
437 | def create_tenant(name): |
438 | """Creates a tenant if it does not already exist""" |
439 | - import manager |
440 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
441 | - token=get_admin_token()) |
442 | - tenants = [t._info for t in manager.api.tenants.list()] |
443 | - if not tenants or name not in [t['name'] for t in tenants]: |
444 | - manager.api.tenants.create(tenant_name=name, |
445 | - description='Created by Juju') |
446 | + manager = get_manager() |
447 | + tenants = manager.resolve_tenant_id(name) |
448 | + if not tenants: |
449 | + manager.create_tenant(tenant_name=name, |
450 | + description='Created by Juju') |
451 | log("Created new tenant: %s" % name, level=DEBUG) |
452 | return |
453 | |
454 | log("Tenant '%s' already exists." % name, level=DEBUG) |
455 | |
456 | |
457 | -def user_exists(name): |
458 | - import manager |
459 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
460 | - token=get_admin_token()) |
461 | +def create_or_show_domain(name): |
462 | + """Creates a tenant if it does not already exist""" |
463 | + manager = get_manager() |
464 | + domain_id = manager.resolve_domain_id(name) |
465 | + if domain_id: |
466 | + log("Domain '%s' already exists." % name, level=DEBUG) |
467 | + else: |
468 | + manager.create_domain(domain_name=name, |
469 | + description='Created by Juju') |
470 | + log("Created new domain: %s" % name, level=DEBUG) |
471 | + domain_id = manager.resolve_domain_id(name) |
472 | + return domain_id |
473 | + |
474 | + |
475 | +def user_exists(name, domain=None): |
476 | + manager = get_manager() |
477 | + if domain: |
478 | + domain_id = manager.resolve_domain_id(domain) |
479 | + for user in manager.api.users.list(): |
480 | + if user.name == name: |
481 | + # In v3 Domains are seperate user namespaces so need to check that |
482 | + # the domain matched if provided |
483 | + if domain: |
484 | + if domain_id == user.domain_id: |
485 | + return True |
486 | + else: |
487 | + return True |
488 | + |
489 | + return False |
490 | + |
491 | + |
492 | +def old_user_exists(name): |
493 | + manager = get_manager() |
494 | users = [u._info for u in manager.api.users.list()] |
495 | if not users or name not in [u['name'] for u in users]: |
496 | return False |
497 | @@ -601,32 +683,44 @@ |
498 | return True |
499 | |
500 | |
501 | -def create_user(name, password, tenant): |
502 | +def create_user(name, password, tenant=None, domain=None): |
503 | """Creates a user if it doesn't already exist, as a member of tenant""" |
504 | - import manager |
505 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
506 | - token=get_admin_token()) |
507 | - if user_exists(name): |
508 | + manager = get_manager() |
509 | + if user_exists(name, domain=domain): |
510 | log("A user named '%s' already exists" % name, level=DEBUG) |
511 | return |
512 | |
513 | - tenant_id = manager.resolve_tenant_id(tenant) |
514 | - if not tenant_id: |
515 | - error_out('Could not resolve tenant_id for tenant %s' % tenant) |
516 | - |
517 | - manager.api.users.create(name=name, |
518 | - password=password, |
519 | - email='juju@localhost', |
520 | - tenant_id=tenant_id) |
521 | + tenant_id = None |
522 | + if tenant: |
523 | + tenant_id = manager.resolve_tenant_id(tenant) |
524 | + if not tenant_id: |
525 | + error_out('Could not resolve tenant_id for tenant %s' % tenant) |
526 | + |
527 | + domain_id = None |
528 | + if domain: |
529 | + domain_id = manager.resolve_domain_id(domain) |
530 | + if not domain_id: |
531 | + error_out('Could not resolve domain_id for domain %s' % domain) |
532 | + |
533 | + manager.create_user(name=name, |
534 | + password=password, |
535 | + email='juju@localhost', |
536 | + tenant_id=tenant_id, |
537 | + domain_id=domain_id) |
538 | log("Created new user '%s' tenant: %s" % (name, tenant_id), |
539 | level=DEBUG) |
540 | |
541 | |
542 | -def create_role(name, user=None, tenant=None): |
543 | +def get_manager(api_version=None): |
544 | + """Return a keystonemanager for the correct API version""" |
545 | + from manager import get_keystone_manager |
546 | + return get_keystone_manager(get_local_endpoint(), get_admin_token(), |
547 | + api_version) |
548 | + |
549 | + |
550 | +def create_role(name, user=None, tenant=None, domain=None): |
551 | """Creates a role if it doesn't already exist. grants role to user""" |
552 | - import manager |
553 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
554 | - token=get_admin_token()) |
555 | + manager = get_manager() |
556 | roles = [r._info for r in manager.api.roles.list()] |
557 | if not roles or name not in [r['name'] for r in roles]: |
558 | manager.api.roles.create(name=name) |
559 | @@ -640,31 +734,36 @@ |
560 | # NOTE(adam_g): Keystone client requires id's for add_user_role, not names |
561 | user_id = manager.resolve_user_id(user) |
562 | role_id = manager.resolve_role_id(name) |
563 | - tenant_id = manager.resolve_tenant_id(tenant) |
564 | - |
565 | - if None in [user_id, role_id, tenant_id]: |
566 | - error_out("Could not resolve [%s, %s, %s]" % |
567 | - (user_id, role_id, tenant_id)) |
568 | - |
569 | - grant_role(user, name, tenant) |
570 | - |
571 | - |
572 | -def grant_role(user, role, tenant): |
573 | + |
574 | + if None in [user_id, role_id]: |
575 | + error_out("Could not resolve [%s, %s]" % |
576 | + (user_id, role_id)) |
577 | + |
578 | + grant_role(user, name, tenant, domain) |
579 | + |
580 | + |
581 | +def grant_role(user, role, tenant=None, domain=None): |
582 | """Grant user and tenant a specific role""" |
583 | - import manager |
584 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
585 | - token=get_admin_token()) |
586 | + manager = get_manager() |
587 | log("Granting user '%s' role '%s' on tenant '%s'" % |
588 | (user, role, tenant)) |
589 | user_id = manager.resolve_user_id(user) |
590 | role_id = manager.resolve_role_id(role) |
591 | - tenant_id = manager.resolve_tenant_id(tenant) |
592 | - |
593 | - cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id) |
594 | + tenant_id = None |
595 | + if tenant: |
596 | + tenant_id = manager.resolve_tenant_id(tenant) |
597 | + |
598 | + domain_id = None |
599 | + if domain: |
600 | + domain_id = manager.resolve_domain_id(domain) |
601 | + |
602 | + cur_roles = manager.roles_for_user(user_id, tenant_id=tenant_id, |
603 | + domain_id=domain_id) |
604 | if not cur_roles or role_id not in [r.id for r in cur_roles]: |
605 | - manager.api.roles.add_user_role(user=user_id, |
606 | - role=role_id, |
607 | - tenant=tenant_id) |
608 | + manager.add_user_role(user=user_id, |
609 | + role=role_id, |
610 | + tenant=tenant_id, |
611 | + domain=domain_id) |
612 | log("Granted user '%s' role '%s' on tenant '%s'" % |
613 | (user, role, tenant), level=DEBUG) |
614 | else: |
615 | @@ -677,6 +776,11 @@ |
616 | fd.writelines("%s\n" % passwd) |
617 | |
618 | |
619 | +def store_admin_domain_id(domain_id): |
620 | + with open(STORED_ADMIN_DOMAIN_ID, 'w+') as fd: |
621 | + fd.writelines("%s\n" % domain_id) |
622 | + |
623 | + |
624 | def get_admin_passwd(): |
625 | passwd = config("admin-password") |
626 | if passwd and passwd.lower() != "none": |
627 | @@ -708,6 +812,13 @@ |
628 | return passwd |
629 | |
630 | |
631 | +def get_api_version(): |
632 | + api_version = config('preferred-api-version') |
633 | + if api_version not in [2, 3]: |
634 | + raise ValueError('Bad preferred-api-version') |
635 | + return api_version |
636 | + |
637 | + |
638 | def ensure_initial_admin(config): |
639 | # Allow retry on fail since leader may not be ready yet. |
640 | # NOTE(hopem): ks client may not be installed at module import time so we |
641 | @@ -734,13 +845,27 @@ |
642 | """ |
643 | create_tenant("admin") |
644 | create_tenant(config("service-tenant")) |
645 | + if get_api_version() > 2: |
646 | + domain_id = create_or_show_domain(ADMIN_DOMAIN) |
647 | + store_admin_domain_id(domain_id) |
648 | # User is managed by ldap backend when using ldap identity |
649 | if not (config('identity-backend') == |
650 | 'ldap' and config('ldap-readonly')): |
651 | passwd = get_admin_passwd() |
652 | if passwd: |
653 | - create_user_credentials(config('admin-user'), 'admin', passwd, |
654 | - new_roles=[config('admin-role')]) |
655 | + if get_api_version() > 2: |
656 | + create_user_credentials(config('admin-user'), passwd, |
657 | + domain=ADMIN_DOMAIN) |
658 | + create_role(config('admin-role'), config('admin-user'), |
659 | + domain=ADMIN_DOMAIN) |
660 | + grant_role(config('admin-user'), config('admin-role'), |
661 | + tenant='admin') |
662 | + grant_role(config('admin-user'), config('admin-role'), |
663 | + domain=ADMIN_DOMAIN) |
664 | + else: |
665 | + create_user_credentials(config('admin-user'), passwd, |
666 | + tenant='admin', |
667 | + new_roles=[config('admin-role')]) |
668 | |
669 | create_service_entry("keystone", "identity", |
670 | "Keystone Identity Service") |
671 | @@ -751,39 +876,49 @@ |
672 | internal_ip=resolve_address(INTERNAL), |
673 | admin_ip=resolve_address(ADMIN), |
674 | auth_port=config("admin-port"), |
675 | - region=region) |
676 | + region=region, |
677 | + api_version=get_api_version()) |
678 | |
679 | return _ensure_initial_admin(config) |
680 | |
681 | |
682 | -def endpoint_url(ip, port): |
683 | +def endpoint_url(ip, port, suffix=None): |
684 | proto = 'http' |
685 | if https(): |
686 | proto = 'https' |
687 | if is_ipv6(ip): |
688 | ip = "[{}]".format(ip) |
689 | - return "%s://%s:%s/v2.0" % (proto, ip, port) |
690 | + if suffix: |
691 | + ep = "%s://%s:%s/%s" % (proto, ip, port, suffix) |
692 | + else: |
693 | + ep = "%s://%s:%s" % (proto, ip, port) |
694 | + return ep |
695 | |
696 | |
697 | def create_keystone_endpoint(public_ip, service_port, |
698 | - internal_ip, admin_ip, auth_port, region): |
699 | - create_endpoint_template(region, "keystone", |
700 | - endpoint_url(public_ip, service_port), |
701 | - endpoint_url(admin_ip, auth_port), |
702 | - endpoint_url(internal_ip, service_port)) |
703 | + internal_ip, admin_ip, auth_port, region, |
704 | + api_version): |
705 | + api_suffix = '' |
706 | + if api_version == 2: |
707 | + api_suffix = 'v2.0' |
708 | + api_suffix = get_api_suffix() |
709 | + create_endpoint_template( |
710 | + region, "keystone", |
711 | + endpoint_url(public_ip, service_port, suffix=api_suffix), |
712 | + endpoint_url(admin_ip, auth_port, suffix=api_suffix), |
713 | + endpoint_url(internal_ip, service_port, suffix=api_suffix), |
714 | + ) |
715 | |
716 | |
717 | def update_user_password(username, password): |
718 | - import manager |
719 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
720 | - token=get_admin_token()) |
721 | + manager = get_manager() |
722 | log("Updating password for user '%s'" % username) |
723 | |
724 | user_id = manager.resolve_user_id(username) |
725 | if user_id is None: |
726 | error_out("Could not resolve user id for '%s'" % username) |
727 | |
728 | - manager.api.users.update_password(user=user_id, password=password) |
729 | + manager.update_password(user=user_id, password=password) |
730 | log("Successfully updated password for user '%s'" % |
731 | username) |
732 | |
733 | @@ -1361,22 +1496,23 @@ |
734 | return result |
735 | |
736 | |
737 | -def create_user_credentials(user, tenant, passwd, new_roles=None, grants=None): |
738 | +def create_user_credentials(user, passwd, tenant=None, new_roles=None, |
739 | + grants=None, domain=None): |
740 | """Create user credentials. |
741 | |
742 | Optionally adds role grants to user and/or creates new roles. |
743 | """ |
744 | log("Creating service credentials for '%s'" % user, level=DEBUG) |
745 | - if user_exists(user): |
746 | + if user_exists(user, domain=domain): |
747 | log("User '%s' already exists - updating password" % (user), |
748 | level=DEBUG) |
749 | update_user_password(user, passwd) |
750 | else: |
751 | - create_user(user, passwd, tenant) |
752 | + create_user(user, passwd, tenant, domain) |
753 | |
754 | if grants: |
755 | for role in grants: |
756 | - grant_role(user, role, tenant) |
757 | + grant_role(user, role, tenant, domain) |
758 | else: |
759 | log("No role grants requested for user '%s'" % (user), level=DEBUG) |
760 | |
761 | @@ -1385,7 +1521,7 @@ |
762 | # Currently used by Swift and Ceilometer. |
763 | for role in new_roles: |
764 | log("Creating requested role '%s'" % role, level=DEBUG) |
765 | - create_role(role, user, tenant) |
766 | + create_role(role, user, tenant, domain) |
767 | |
768 | return passwd |
769 | |
770 | @@ -1400,15 +1536,20 @@ |
771 | if not tenant: |
772 | raise Exception("No service tenant provided in config") |
773 | |
774 | - return create_user_credentials(user, tenant, get_service_password(user), |
775 | - new_roles=new_roles, |
776 | - grants=[config('admin-role')]) |
777 | - |
778 | + if get_api_version() == 2: |
779 | + domain = None |
780 | + else: |
781 | + domain = DEFAULT_DOMAIN |
782 | + return create_user_credentials(user, get_service_password(user), |
783 | + tenant=tenant, new_roles=new_roles, |
784 | + grants=[config('admin-role')], |
785 | + domain=domain) |
786 | + |
787 | + |
788 | +# @retry_on_exception(5, base_delay=3) |
789 | |
790 | def add_service_to_keystone(relation_id=None, remote_unit=None): |
791 | - import manager |
792 | - manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
793 | - token=get_admin_token()) |
794 | + manager = get_manager() |
795 | settings = relation_get(rid=relation_id, unit=remote_unit) |
796 | # the minimum settings needed per endpoint |
797 | single = set(['service', 'region', 'public_url', 'admin_url', |
798 | @@ -1546,6 +1687,7 @@ |
799 | # we return a token, information about our API endpoints, and the generated |
800 | # service credentials |
801 | service_tenant = config('service-tenant') |
802 | + grant_role(service_username, 'Admin', service_tenant, 'default') |
803 | |
804 | # NOTE(dosaboy): we use __null__ to represent settings that are to be |
805 | # routed to relations via the cluster relation and set to None. |
806 | @@ -1565,6 +1707,7 @@ |
807 | "ca_cert": '__null__', |
808 | "auth_protocol": protocol, |
809 | "service_protocol": protocol, |
810 | + "api_version": get_api_version(), |
811 | } |
812 | |
813 | # generate or get a new cert/key for service if set to manage certs. |
814 | @@ -1863,7 +2006,6 @@ |
815 | |
816 | @param configs: a templating.OSConfigRenderer() object |
817 | """ |
818 | - |
819 | if is_paused(): |
820 | status_set("maintenance", |
821 | "Paused. Use 'resume' action to resume normal service.") |
822 | @@ -1871,5 +2013,19 @@ |
823 | |
824 | # set the status according to the current state of the contexts |
825 | set_os_workload_status( |
826 | +<<<<<<< TREE |
827 | configs, REQUIRED_INTERFACES, charm_func=check_optional_relations, |
828 | services=services(), ports=determine_ports()) |
829 | +======= |
830 | + configs, REQUIRED_INTERFACES, charm_func=check_optional_relations) |
831 | + |
832 | + |
833 | +def get_admin_domain_id(): |
834 | + domain_id = None |
835 | + if os.path.isfile(STORED_ADMIN_DOMAIN_ID): |
836 | + log("Loading stored domain id from %s" % STORED_ADMIN_DOMAIN_ID, |
837 | + level=INFO) |
838 | + with open(STORED_ADMIN_DOMAIN_ID, 'r') as fd: |
839 | + domain_id = fd.readline().strip('\n') |
840 | + return domain_id |
841 | +>>>>>>> MERGE-SOURCE |
842 | |
843 | === modified file 'hooks/manager.py' |
844 | --- hooks/manager.py 2016-01-12 11:09:46 +0000 |
845 | +++ hooks/manager.py 2016-03-05 15:43:48 +0000 |
846 | @@ -1,12 +1,57 @@ |
847 | #!/usr/bin/python |
848 | from keystoneclient.v2_0 import client |
849 | +from keystoneclient.v3 import client as keystoneclient_v3 |
850 | +from keystoneclient.auth import token_endpoint |
851 | +from keystoneclient import session |
852 | + |
853 | + |
854 | +def _get_keystone_manager_class(endpoint, token, api_version): |
855 | + """Return KeystoneManager class for the given API version""" |
856 | + if api_version == 2: |
857 | + return KeystoneManager2(endpoint, token) |
858 | + if api_version == 3: |
859 | + return KeystoneManager3(endpoint, token) |
860 | + raise ValueError('No manager found for api version {}'.format(api_version)) |
861 | + |
862 | + |
863 | +def get_keystone_manager(endpoint, token, api_version): |
864 | + """Return a keystonemanager for the correct API version""" |
865 | + if api_version: |
866 | + return _get_keystone_manager_class(endpoint, token, api_version) |
867 | + else: |
868 | + # If api_version has not been set then use the manager we have to query |
869 | + # the catalogue and determine which api version should be being used. |
870 | + # Then return the correct client based on that |
871 | + # XXX I think the keystone client should be able to do version |
872 | + # detection automatically so the code below could be greatly |
873 | + # simplified |
874 | + if 'v2.0' in endpoint.split('/'): |
875 | + manager = _get_keystone_manager_class(endpoint, token, 2) |
876 | + else: |
877 | + manager = _get_keystone_manager_class(endpoint, token, 3) |
878 | + if endpoint.endswith('/'): |
879 | + base_ep = endpoint.rsplit('/', 2)[0] |
880 | + else: |
881 | + base_ep = endpoint.rsplit('/', 1)[0] |
882 | + for svc in manager.api.services.list(): |
883 | + if svc.type == 'identity': |
884 | + svc_id = svc.id |
885 | + version = None |
886 | + for ep in manager.api.endpoints.list(): |
887 | + if ep.service_id == svc_id and hasattr(ep, 'adminurl'): |
888 | + version = ep.adminurl.split('/')[-1] |
889 | + if version and version == 'v2.0': |
890 | + new_ep = base_ep + "/" + 'v2.0' |
891 | + return _get_keystone_manager_class(new_ep, token, 2) |
892 | + elif version and version == 'v3': |
893 | + new_ep = base_ep + "/" + 'v3' |
894 | + return _get_keystone_manager_class(new_ep, token, 3) |
895 | + else: |
896 | + return manager |
897 | |
898 | |
899 | class KeystoneManager(object): |
900 | |
901 | - def __init__(self, endpoint, token): |
902 | - self.api = client.Client(endpoint=endpoint, token=token) |
903 | - |
904 | def resolve_tenant_id(self, name): |
905 | """Find the tenant_id of a given tenant""" |
906 | tenants = [t._info for t in self.api.tenants.list()] |
907 | @@ -14,6 +59,9 @@ |
908 | if name == t['name']: |
909 | return t['id'] |
910 | |
911 | + def resolve_domain_id(self, name): |
912 | + pass |
913 | + |
914 | def resolve_role_id(self, name): |
915 | """Find the role_id of a given role""" |
916 | roles = [r._info for r in self.api.roles.list()] |
917 | @@ -45,3 +93,135 @@ |
918 | for s in services: |
919 | if type == s['type']: |
920 | return s['id'] |
921 | + |
922 | + |
923 | +class KeystoneManager2(KeystoneManager): |
924 | + |
925 | + def __init__(self, endpoint, token): |
926 | + self.api_version = 2 |
927 | + self.api = client.Client(endpoint=endpoint, token=token) |
928 | + |
929 | + def create_endpoints(self, region, service_id, publicurl, adminurl, |
930 | + internalurl): |
931 | + self.api.endpoints.create(region=region, service_id=service_id, |
932 | + publicurl=publicurl, adminurl=adminurl, |
933 | + internalurl=internalurl) |
934 | + |
935 | + def tenants_list(self): |
936 | + return self.api.tenants.list() |
937 | + |
938 | + def create_tenant(self, tenant_name, description, domain='default'): |
939 | + self.api.tenants.create(tenant_name=tenant_name, |
940 | + description=description) |
941 | + |
942 | + def delete_tenant(self, tenant_id): |
943 | + self.api.tenants.delete(tenant_id) |
944 | + |
945 | + def create_user(self, name, password, email, tenant_id=None, |
946 | + domain_id=None): |
947 | + self.api.users.create(name=name, |
948 | + password=password, |
949 | + email=email, |
950 | + tenant_id=tenant_id) |
951 | + |
952 | + def update_password(self, user, password): |
953 | + self.api.users.update_password(user=user, password=password) |
954 | + |
955 | + def roles_for_user(self, user_id, tenant_id=None, domain_id=None): |
956 | + return self.api.roles.roles_for_user(user_id, tenant_id) |
957 | + |
958 | + def add_user_role(self, user, role, tenant, domain): |
959 | + self.api.roles.add_user_role(user=user, role=role, tenant=tenant) |
960 | + |
961 | + |
962 | +class KeystoneManager3(KeystoneManager): |
963 | + |
964 | + def __init__(self, endpoint, token): |
965 | + self.api_version = 3 |
966 | + keystone_auth_v3 = token_endpoint.Token(endpoint=endpoint, token=token) |
967 | + keystone_session_v3 = session.Session(auth=keystone_auth_v3) |
968 | + self.api = keystoneclient_v3.Client(session=keystone_session_v3) |
969 | + |
970 | + def resolve_tenant_id(self, name): |
971 | + """Find the tenant_id of a given tenant""" |
972 | + tenants = [t._info for t in self.api.projects.list()] |
973 | + for t in tenants: |
974 | + if name == t['name']: |
975 | + return t['id'] |
976 | + |
977 | + def resolve_domain_id(self, name): |
978 | + """Find the domain_id of a given domain""" |
979 | + domains = [d._info for d in self.api.domains.list()] |
980 | + for d in domains: |
981 | + if name == d['name']: |
982 | + return d['id'] |
983 | + |
984 | + def create_endpoints(self, region, service_id, publicurl, adminurl, |
985 | + internalurl): |
986 | + self.api.endpoints.create(service_id, publicurl, interface='public', |
987 | + region=region) |
988 | + self.api.endpoints.create(service_id, adminurl, interface='admin', |
989 | + region=region) |
990 | + self.api.endpoints.create(service_id, internalurl, |
991 | + interface='internal', region=region) |
992 | + |
993 | + def tenants_list(self): |
994 | + return self.api.projects.list() |
995 | + |
996 | + def create_domain(self, domain_name, description): |
997 | + self.api.domains.create(domain_name, description=description) |
998 | + |
999 | + def create_tenant(self, tenant_name, description, domain='default'): |
1000 | + self.api.projects.create(tenant_name, domain, description=description) |
1001 | + |
1002 | + def delete_tenant(self, tenant_id): |
1003 | + self.api.projects.delete(tenant_id) |
1004 | + |
1005 | + def create_user(self, name, password, email, tenant_id=None, |
1006 | + domain_id=None): |
1007 | + if not domain_id: |
1008 | + domain_id = self.resolve_domain_id('default') |
1009 | + if tenant_id: |
1010 | + self.api.users.create(name, |
1011 | + domain=domain_id, |
1012 | + password=password, |
1013 | + email=email, |
1014 | + project=tenant_id) |
1015 | + else: |
1016 | + self.api.users.create(name, |
1017 | + domain=domain_id, |
1018 | + password=password, |
1019 | + email=email) |
1020 | + |
1021 | + def update_password(self, user, password): |
1022 | + self.api.users.update(user, password=password) |
1023 | + |
1024 | + def roles_for_user(self, user_id, tenant_id=None, domain_id=None): |
1025 | + # Specify either a domain or project, not both |
1026 | + if domain_id: |
1027 | + return self.api.roles.list(user_id, domain=domain_id) |
1028 | + else: |
1029 | + return self.api.roles.list(user_id, project=tenant_id) |
1030 | + |
1031 | + def add_user_role(self, user, role, tenant, domain): |
1032 | + # Specify either a domain or project, not both |
1033 | + if domain: |
1034 | + self.api.roles.grant(role, user=user, domain=domain) |
1035 | + if tenant: |
1036 | + self.api.roles.grant(role, user=user, project=tenant) |
1037 | + |
1038 | + def find_endpoint_v3(self, interface, service_id, region): |
1039 | + found_eps = [] |
1040 | + for ep in self.api.endpoints.list(): |
1041 | + if ep.service_id == service_id and ep.region == region and \ |
1042 | + ep.interface == interface: |
1043 | + found_eps.append(ep) |
1044 | + return found_eps |
1045 | + |
1046 | + def delete_old_endpoint_v3(self, interface, service_id, region, url): |
1047 | + eps = self.find_endpoint_v3(interface, service_id, region) |
1048 | + for ep in eps: |
1049 | + if getattr(ep, 'url') != url: |
1050 | + self.api.endpoints.delete(ep.id) |
1051 | + return True |
1052 | + return False |
1053 | |
1054 | === added directory 'templates/liberty' |
1055 | === added file 'templates/liberty/policy.json' |
1056 | --- templates/liberty/policy.json 1970-01-01 00:00:00 +0000 |
1057 | +++ templates/liberty/policy.json 2016-03-05 15:43:48 +0000 |
1058 | @@ -0,0 +1,382 @@ |
1059 | +{% if api_version == 3 -%} |
1060 | +{ |
1061 | + "admin_required": "role:{{ admin_role }}", |
1062 | + "cloud_admin": "rule:admin_required and domain_id:{{ admin_domain_id }}", |
1063 | + "service_role": "role:service", |
1064 | + "service_or_admin": "rule:admin_required or rule:service_role", |
1065 | + "owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s", |
1066 | + "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner", |
1067 | + "admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin", |
1068 | + "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s", |
1069 | + "service_admin_or_owner": "rule:service_or_admin or rule:owner", |
1070 | + |
1071 | + "default": "rule:admin_required", |
1072 | + |
1073 | + "identity:get_region": "", |
1074 | + "identity:list_regions": "", |
1075 | + "identity:create_region": "rule:cloud_admin", |
1076 | + "identity:update_region": "rule:cloud_admin", |
1077 | + "identity:delete_region": "rule:cloud_admin", |
1078 | + |
1079 | + "identity:get_service": "rule:admin_or_cloud_admin", |
1080 | + "identity:list_services": "rule:admin_or_cloud_admin", |
1081 | + "identity:create_service": "rule:cloud_admin", |
1082 | + "identity:update_service": "rule:cloud_admin", |
1083 | + "identity:delete_service": "rule:cloud_admin", |
1084 | + |
1085 | + "identity:get_endpoint": "rule:admin_or_cloud_admin", |
1086 | + "identity:list_endpoints": "rule:admin_or_cloud_admin", |
1087 | + "identity:create_endpoint": "rule:cloud_admin", |
1088 | + "identity:update_endpoint": "rule:cloud_admin", |
1089 | + "identity:delete_endpoint": "rule:cloud_admin", |
1090 | + |
1091 | + "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id", |
1092 | + "identity:list_domains": "rule:cloud_admin", |
1093 | + "identity:create_domain": "rule:cloud_admin", |
1094 | + "identity:update_domain": "rule:cloud_admin", |
1095 | + "identity:delete_domain": "rule:cloud_admin", |
1096 | + |
1097 | + "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s", |
1098 | + "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s", |
1099 | + "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", |
1100 | + "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id", |
1101 | + "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id", |
1102 | + "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id", |
1103 | + "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", |
1104 | + "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", |
1105 | + |
1106 | + "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s", |
1107 | + "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s", |
1108 | + "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", |
1109 | + "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id", |
1110 | + "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id", |
1111 | + "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", |
1112 | + "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", |
1113 | + |
1114 | + "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s", |
1115 | + "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s", |
1116 | + "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1117 | + "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id", |
1118 | + "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_domain_id", |
1119 | + "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id", |
1120 | + "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1121 | + "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1122 | + "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1123 | + "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1124 | + "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1125 | + "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", |
1126 | + |
1127 | + "identity:get_credential": "rule:admin_required", |
1128 | + "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s", |
1129 | + "identity:create_credential": "rule:admin_required", |
1130 | + "identity:update_credential": "rule:admin_required", |
1131 | + "identity:delete_credential": "rule:admin_required", |
1132 | + |
1133 | + "identity:ec2_get_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", |
1134 | + "identity:ec2_list_credentials": "rule:admin_or_cloud_admin or rule:owner", |
1135 | + "identity:ec2_create_credential": "rule:admin_or_cloud_admin or rule:owner", |
1136 | + "identity:ec2_delete_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", |
1137 | + |
1138 | + "identity:get_role": "rule:admin_or_cloud_admin", |
1139 | + "identity:list_roles": "rule:admin_or_cloud_admin", |
1140 | + "identity:create_role": "rule:cloud_admin", |
1141 | + "identity:update_role": "rule:cloud_admin", |
1142 | + "identity:delete_role": "rule:cloud_admin", |
1143 | + |
1144 | + "domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)", |
1145 | + "project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s", |
1146 | + "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", |
1147 | + "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", |
1148 | + "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", |
1149 | + "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", |
1150 | + |
1151 | + "admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s", |
1152 | + "admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s", |
1153 | + "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter", |
1154 | + |
1155 | + "identity:get_policy": "rule:cloud_admin", |
1156 | + "identity:list_policies": "rule:cloud_admin", |
1157 | + "identity:create_policy": "rule:cloud_admin", |
1158 | + "identity:update_policy": "rule:cloud_admin", |
1159 | + "identity:delete_policy": "rule:cloud_admin", |
1160 | + |
1161 | + "identity:change_password": "rule:owner", |
1162 | + "identity:check_token": "rule:admin_or_owner", |
1163 | + "identity:validate_token": "rule:service_admin_or_owner", |
1164 | + "identity:validate_token_head": "rule:service_or_admin", |
1165 | + "identity:revocation_list": "rule:service_or_admin", |
1166 | + "identity:revoke_token": "rule:admin_or_owner", |
1167 | + |
1168 | + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", |
1169 | + "identity:list_trusts": "", |
1170 | + "identity:list_roles_for_trust": "", |
1171 | + "identity:get_role_for_trust": "", |
1172 | + "identity:delete_trust": "", |
1173 | + |
1174 | + "identity:create_consumer": "rule:admin_required", |
1175 | + "identity:get_consumer": "rule:admin_required", |
1176 | + "identity:list_consumers": "rule:admin_required", |
1177 | + "identity:delete_consumer": "rule:admin_required", |
1178 | + "identity:update_consumer": "rule:admin_required", |
1179 | + |
1180 | + "identity:authorize_request_token": "rule:admin_required", |
1181 | + "identity:list_access_token_roles": "rule:admin_required", |
1182 | + "identity:get_access_token_role": "rule:admin_required", |
1183 | + "identity:list_access_tokens": "rule:admin_required", |
1184 | + "identity:get_access_token": "rule:admin_required", |
1185 | + "identity:delete_access_token": "rule:admin_required", |
1186 | + |
1187 | + "identity:list_projects_for_endpoint": "rule:admin_required", |
1188 | + "identity:add_endpoint_to_project": "rule:admin_required", |
1189 | + "identity:check_endpoint_in_project": "rule:admin_required", |
1190 | + "identity:list_endpoints_for_project": "rule:admin_required", |
1191 | + "identity:remove_endpoint_from_project": "rule:admin_required", |
1192 | + |
1193 | + "identity:create_endpoint_group": "rule:admin_required", |
1194 | + "identity:list_endpoint_groups": "rule:admin_required", |
1195 | + "identity:get_endpoint_group": "rule:admin_required", |
1196 | + "identity:update_endpoint_group": "rule:admin_required", |
1197 | + "identity:delete_endpoint_group": "rule:admin_required", |
1198 | + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", |
1199 | + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", |
1200 | + "identity:get_endpoint_group_in_project": "rule:admin_required", |
1201 | + "identity:list_endpoint_groups_for_project": "rule:admin_required", |
1202 | + "identity:add_endpoint_group_to_project": "rule:admin_required", |
1203 | + "identity:remove_endpoint_group_from_project": "rule:admin_required", |
1204 | + |
1205 | + "identity:create_identity_provider": "rule:cloud_admin", |
1206 | + "identity:list_identity_providers": "rule:cloud_admin", |
1207 | + "identity:get_identity_providers": "rule:cloud_admin", |
1208 | + "identity:update_identity_provider": "rule:cloud_admin", |
1209 | + "identity:delete_identity_provider": "rule:cloud_admin", |
1210 | + |
1211 | + "identity:create_protocol": "rule:cloud_admin", |
1212 | + "identity:update_protocol": "rule:cloud_admin", |
1213 | + "identity:get_protocol": "rule:cloud_admin", |
1214 | + "identity:list_protocols": "rule:cloud_admin", |
1215 | + "identity:delete_protocol": "rule:cloud_admin", |
1216 | + |
1217 | + "identity:create_mapping": "rule:cloud_admin", |
1218 | + "identity:get_mapping": "rule:cloud_admin", |
1219 | + "identity:list_mappings": "rule:cloud_admin", |
1220 | + "identity:delete_mapping": "rule:cloud_admin", |
1221 | + "identity:update_mapping": "rule:cloud_admin", |
1222 | + |
1223 | + "identity:create_service_provider": "rule:cloud_admin", |
1224 | + "identity:list_service_providers": "rule:cloud_admin", |
1225 | + "identity:get_service_provider": "rule:cloud_admin", |
1226 | + "identity:update_service_provider": "rule:cloud_admin", |
1227 | + "identity:delete_service_provider": "rule:cloud_admin", |
1228 | + |
1229 | + "identity:get_auth_catalog": "", |
1230 | + "identity:get_auth_projects": "", |
1231 | + "identity:get_auth_domains": "", |
1232 | + |
1233 | + "identity:list_projects_for_groups": "", |
1234 | + "identity:list_domains_for_groups": "", |
1235 | + |
1236 | + "identity:list_revoke_events": "", |
1237 | + |
1238 | + "identity:create_policy_association_for_endpoint": "rule:cloud_admin", |
1239 | + "identity:check_policy_association_for_endpoint": "rule:cloud_admin", |
1240 | + "identity:delete_policy_association_for_endpoint": "rule:cloud_admin", |
1241 | + "identity:create_policy_association_for_service": "rule:cloud_admin", |
1242 | + "identity:check_policy_association_for_service": "rule:cloud_admin", |
1243 | + "identity:delete_policy_association_for_service": "rule:cloud_admin", |
1244 | + "identity:create_policy_association_for_region_and_service": "rule:cloud_admin", |
1245 | + "identity:check_policy_association_for_region_and_service": "rule:cloud_admin", |
1246 | + "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin", |
1247 | + "identity:get_policy_for_endpoint": "rule:cloud_admin", |
1248 | + "identity:list_endpoints_for_policy": "rule:cloud_admin", |
1249 | + |
1250 | + "identity:create_domain_config": "rule:cloud_admin", |
1251 | + "identity:get_domain_config": "rule:cloud_admin", |
1252 | + "identity:update_domain_config": "rule:cloud_admin", |
1253 | + "identity:delete_domain_config": "rule:cloud_admin" |
1254 | +} |
1255 | +{% else -%} |
1256 | +{ |
1257 | + "admin_required": "role:admin or is_admin:1", |
1258 | + "service_role": "role:service", |
1259 | + "service_or_admin": "rule:admin_required or rule:service_role", |
1260 | + "owner" : "user_id:%(user_id)s", |
1261 | + "admin_or_owner": "rule:admin_required or rule:owner", |
1262 | + "token_subject": "user_id:%(target.token.user_id)s", |
1263 | + "admin_or_token_subject": "rule:admin_required or rule:token_subject", |
1264 | + "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", |
1265 | + |
1266 | + "default": "rule:admin_required", |
1267 | + |
1268 | + "identity:get_region": "", |
1269 | + "identity:list_regions": "", |
1270 | + "identity:create_region": "rule:admin_required", |
1271 | + "identity:update_region": "rule:admin_required", |
1272 | + "identity:delete_region": "rule:admin_required", |
1273 | + |
1274 | + "identity:get_service": "rule:admin_required", |
1275 | + "identity:list_services": "rule:admin_required", |
1276 | + "identity:create_service": "rule:admin_required", |
1277 | + "identity:update_service": "rule:admin_required", |
1278 | + "identity:delete_service": "rule:admin_required", |
1279 | + |
1280 | + "identity:get_endpoint": "rule:admin_required", |
1281 | + "identity:list_endpoints": "rule:admin_required", |
1282 | + "identity:create_endpoint": "rule:admin_required", |
1283 | + "identity:update_endpoint": "rule:admin_required", |
1284 | + "identity:delete_endpoint": "rule:admin_required", |
1285 | + |
1286 | + "identity:get_domain": "rule:admin_required", |
1287 | + "identity:list_domains": "rule:admin_required", |
1288 | + "identity:create_domain": "rule:admin_required", |
1289 | + "identity:update_domain": "rule:admin_required", |
1290 | + "identity:delete_domain": "rule:admin_required", |
1291 | + |
1292 | + "identity:get_project": "rule:admin_required", |
1293 | + "identity:list_projects": "rule:admin_required", |
1294 | + "identity:list_user_projects": "rule:admin_or_owner", |
1295 | + "identity:create_project": "rule:admin_required", |
1296 | + "identity:update_project": "rule:admin_required", |
1297 | + "identity:delete_project": "rule:admin_required", |
1298 | + |
1299 | + "identity:get_user": "rule:admin_required", |
1300 | + "identity:list_users": "rule:admin_required", |
1301 | + "identity:create_user": "rule:admin_required", |
1302 | + "identity:update_user": "rule:admin_required", |
1303 | + "identity:delete_user": "rule:admin_required", |
1304 | + "identity:change_password": "rule:admin_or_owner", |
1305 | + |
1306 | + "identity:get_group": "rule:admin_required", |
1307 | + "identity:list_groups": "rule:admin_required", |
1308 | + "identity:list_groups_for_user": "rule:admin_or_owner", |
1309 | + "identity:create_group": "rule:admin_required", |
1310 | + "identity:update_group": "rule:admin_required", |
1311 | + "identity:delete_group": "rule:admin_required", |
1312 | + "identity:list_users_in_group": "rule:admin_required", |
1313 | + "identity:remove_user_from_group": "rule:admin_required", |
1314 | + "identity:check_user_in_group": "rule:admin_required", |
1315 | + "identity:add_user_to_group": "rule:admin_required", |
1316 | + |
1317 | + "identity:get_credential": "rule:admin_required", |
1318 | + "identity:list_credentials": "rule:admin_required", |
1319 | + "identity:create_credential": "rule:admin_required", |
1320 | + "identity:update_credential": "rule:admin_required", |
1321 | + "identity:delete_credential": "rule:admin_required", |
1322 | + |
1323 | + "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", |
1324 | + "identity:ec2_list_credentials": "rule:admin_or_owner", |
1325 | + "identity:ec2_create_credential": "rule:admin_or_owner", |
1326 | + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", |
1327 | + |
1328 | + "identity:get_role": "rule:admin_required", |
1329 | + "identity:list_roles": "rule:admin_required", |
1330 | + "identity:create_role": "rule:admin_required", |
1331 | + "identity:update_role": "rule:admin_required", |
1332 | + "identity:delete_role": "rule:admin_required", |
1333 | + |
1334 | + "identity:check_grant": "rule:admin_required", |
1335 | + "identity:list_grants": "rule:admin_required", |
1336 | + "identity:create_grant": "rule:admin_required", |
1337 | + "identity:revoke_grant": "rule:admin_required", |
1338 | + |
1339 | + "identity:list_role_assignments": "rule:admin_required", |
1340 | + |
1341 | + "identity:get_policy": "rule:admin_required", |
1342 | + "identity:list_policies": "rule:admin_required", |
1343 | + "identity:create_policy": "rule:admin_required", |
1344 | + "identity:update_policy": "rule:admin_required", |
1345 | + "identity:delete_policy": "rule:admin_required", |
1346 | + |
1347 | + "identity:check_token": "rule:admin_or_token_subject", |
1348 | + "identity:validate_token": "rule:service_admin_or_token_subject", |
1349 | + "identity:validate_token_head": "rule:service_or_admin", |
1350 | + "identity:revocation_list": "rule:service_or_admin", |
1351 | + "identity:revoke_token": "rule:admin_or_token_subject", |
1352 | + |
1353 | + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", |
1354 | + "identity:list_trusts": "", |
1355 | + "identity:list_roles_for_trust": "", |
1356 | + "identity:get_role_for_trust": "", |
1357 | + "identity:delete_trust": "", |
1358 | + |
1359 | + "identity:create_consumer": "rule:admin_required", |
1360 | + "identity:get_consumer": "rule:admin_required", |
1361 | + "identity:list_consumers": "rule:admin_required", |
1362 | + "identity:delete_consumer": "rule:admin_required", |
1363 | + "identity:update_consumer": "rule:admin_required", |
1364 | + |
1365 | + "identity:authorize_request_token": "rule:admin_required", |
1366 | + "identity:list_access_token_roles": "rule:admin_required", |
1367 | + "identity:get_access_token_role": "rule:admin_required", |
1368 | + "identity:list_access_tokens": "rule:admin_required", |
1369 | + "identity:get_access_token": "rule:admin_required", |
1370 | + "identity:delete_access_token": "rule:admin_required", |
1371 | + |
1372 | + "identity:list_projects_for_endpoint": "rule:admin_required", |
1373 | + "identity:add_endpoint_to_project": "rule:admin_required", |
1374 | + "identity:check_endpoint_in_project": "rule:admin_required", |
1375 | + "identity:list_endpoints_for_project": "rule:admin_required", |
1376 | + "identity:remove_endpoint_from_project": "rule:admin_required", |
1377 | + |
1378 | + "identity:create_endpoint_group": "rule:admin_required", |
1379 | + "identity:list_endpoint_groups": "rule:admin_required", |
1380 | + "identity:get_endpoint_group": "rule:admin_required", |
1381 | + "identity:update_endpoint_group": "rule:admin_required", |
1382 | + "identity:delete_endpoint_group": "rule:admin_required", |
1383 | + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", |
1384 | + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", |
1385 | + "identity:get_endpoint_group_in_project": "rule:admin_required", |
1386 | + "identity:list_endpoint_groups_for_project": "rule:admin_required", |
1387 | + "identity:add_endpoint_group_to_project": "rule:admin_required", |
1388 | + "identity:remove_endpoint_group_from_project": "rule:admin_required", |
1389 | + |
1390 | + "identity:create_identity_provider": "rule:admin_required", |
1391 | + "identity:list_identity_providers": "rule:admin_required", |
1392 | + "identity:get_identity_providers": "rule:admin_required", |
1393 | + "identity:update_identity_provider": "rule:admin_required", |
1394 | + "identity:delete_identity_provider": "rule:admin_required", |
1395 | + |
1396 | + "identity:create_protocol": "rule:admin_required", |
1397 | + "identity:update_protocol": "rule:admin_required", |
1398 | + "identity:get_protocol": "rule:admin_required", |
1399 | + "identity:list_protocols": "rule:admin_required", |
1400 | + "identity:delete_protocol": "rule:admin_required", |
1401 | + |
1402 | + "identity:create_mapping": "rule:admin_required", |
1403 | + "identity:get_mapping": "rule:admin_required", |
1404 | + "identity:list_mappings": "rule:admin_required", |
1405 | + "identity:delete_mapping": "rule:admin_required", |
1406 | + "identity:update_mapping": "rule:admin_required", |
1407 | + |
1408 | + "identity:create_service_provider": "rule:admin_required", |
1409 | + "identity:list_service_providers": "rule:admin_required", |
1410 | + "identity:get_service_provider": "rule:admin_required", |
1411 | + "identity:update_service_provider": "rule:admin_required", |
1412 | + "identity:delete_service_provider": "rule:admin_required", |
1413 | + |
1414 | + "identity:get_auth_catalog": "", |
1415 | + "identity:get_auth_projects": "", |
1416 | + "identity:get_auth_domains": "", |
1417 | + |
1418 | + "identity:list_projects_for_groups": "", |
1419 | + "identity:list_domains_for_groups": "", |
1420 | + |
1421 | + "identity:list_revoke_events": "", |
1422 | + |
1423 | + "identity:create_policy_association_for_endpoint": "rule:admin_required", |
1424 | + "identity:check_policy_association_for_endpoint": "rule:admin_required", |
1425 | + "identity:delete_policy_association_for_endpoint": "rule:admin_required", |
1426 | + "identity:create_policy_association_for_service": "rule:admin_required", |
1427 | + "identity:check_policy_association_for_service": "rule:admin_required", |
1428 | + "identity:delete_policy_association_for_service": "rule:admin_required", |
1429 | + "identity:create_policy_association_for_region_and_service": "rule:admin_required", |
1430 | + "identity:check_policy_association_for_region_and_service": "rule:admin_required", |
1431 | + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", |
1432 | + "identity:get_policy_for_endpoint": "rule:admin_required", |
1433 | + "identity:list_endpoints_for_policy": "rule:admin_required", |
1434 | + |
1435 | + "identity:create_domain_config": "rule:admin_required", |
1436 | + "identity:get_domain_config": "rule:admin_required", |
1437 | + "identity:update_domain_config": "rule:admin_required", |
1438 | + "identity:delete_domain_config": "rule:admin_required" |
1439 | +} |
1440 | +{% endif -%} |
1441 | |
1442 | === added file 'templates/liberty/policy.json.v2' |
1443 | --- templates/liberty/policy.json.v2 1970-01-01 00:00:00 +0000 |
1444 | +++ templates/liberty/policy.json.v2 2016-03-05 15:43:48 +0000 |
1445 | @@ -0,0 +1,184 @@ |
1446 | +{ |
1447 | + "admin_required": "role:admin or is_admin:1", |
1448 | + "service_role": "role:service", |
1449 | + "service_or_admin": "rule:admin_required or rule:service_role", |
1450 | + "owner" : "user_id:%(user_id)s", |
1451 | + "admin_or_owner": "rule:admin_required or rule:owner", |
1452 | + "token_subject": "user_id:%(target.token.user_id)s", |
1453 | + "admin_or_token_subject": "rule:admin_required or rule:token_subject", |
1454 | + "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", |
1455 | + |
1456 | + "default": "rule:admin_required", |
1457 | + |
1458 | + "identity:get_region": "", |
1459 | + "identity:list_regions": "", |
1460 | + "identity:create_region": "rule:admin_required", |
1461 | + "identity:update_region": "rule:admin_required", |
1462 | + "identity:delete_region": "rule:admin_required", |
1463 | + |
1464 | + "identity:get_service": "rule:admin_required", |
1465 | + "identity:list_services": "rule:admin_required", |
1466 | + "identity:create_service": "rule:admin_required", |
1467 | + "identity:update_service": "rule:admin_required", |
1468 | + "identity:delete_service": "rule:admin_required", |
1469 | + |
1470 | + "identity:get_endpoint": "rule:admin_required", |
1471 | + "identity:list_endpoints": "rule:admin_required", |
1472 | + "identity:create_endpoint": "rule:admin_required", |
1473 | + "identity:update_endpoint": "rule:admin_required", |
1474 | + "identity:delete_endpoint": "rule:admin_required", |
1475 | + |
1476 | + "identity:get_domain": "rule:admin_required", |
1477 | + "identity:list_domains": "rule:admin_required", |
1478 | + "identity:create_domain": "rule:admin_required", |
1479 | + "identity:update_domain": "rule:admin_required", |
1480 | + "identity:delete_domain": "rule:admin_required", |
1481 | + |
1482 | + "identity:get_project": "rule:admin_required", |
1483 | + "identity:list_projects": "rule:admin_required", |
1484 | + "identity:list_user_projects": "rule:admin_or_owner", |
1485 | + "identity:create_project": "rule:admin_required", |
1486 | + "identity:update_project": "rule:admin_required", |
1487 | + "identity:delete_project": "rule:admin_required", |
1488 | + |
1489 | + "identity:get_user": "rule:admin_required", |
1490 | + "identity:list_users": "rule:admin_required", |
1491 | + "identity:create_user": "rule:admin_required", |
1492 | + "identity:update_user": "rule:admin_required", |
1493 | + "identity:delete_user": "rule:admin_required", |
1494 | + "identity:change_password": "rule:admin_or_owner", |
1495 | + |
1496 | + "identity:get_group": "rule:admin_required", |
1497 | + "identity:list_groups": "rule:admin_required", |
1498 | + "identity:list_groups_for_user": "rule:admin_or_owner", |
1499 | + "identity:create_group": "rule:admin_required", |
1500 | + "identity:update_group": "rule:admin_required", |
1501 | + "identity:delete_group": "rule:admin_required", |
1502 | + "identity:list_users_in_group": "rule:admin_required", |
1503 | + "identity:remove_user_from_group": "rule:admin_required", |
1504 | + "identity:check_user_in_group": "rule:admin_required", |
1505 | + "identity:add_user_to_group": "rule:admin_required", |
1506 | + |
1507 | + "identity:get_credential": "rule:admin_required", |
1508 | + "identity:list_credentials": "rule:admin_required", |
1509 | + "identity:create_credential": "rule:admin_required", |
1510 | + "identity:update_credential": "rule:admin_required", |
1511 | + "identity:delete_credential": "rule:admin_required", |
1512 | + |
1513 | + "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", |
1514 | + "identity:ec2_list_credentials": "rule:admin_or_owner", |
1515 | + "identity:ec2_create_credential": "rule:admin_or_owner", |
1516 | + "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", |
1517 | + |
1518 | + "identity:get_role": "rule:admin_required", |
1519 | + "identity:list_roles": "rule:admin_required", |
1520 | + "identity:create_role": "rule:admin_required", |
1521 | + "identity:update_role": "rule:admin_required", |
1522 | + "identity:delete_role": "rule:admin_required", |
1523 | + |
1524 | + "identity:check_grant": "rule:admin_required", |
1525 | + "identity:list_grants": "rule:admin_required", |
1526 | + "identity:create_grant": "rule:admin_required", |
1527 | + "identity:revoke_grant": "rule:admin_required", |
1528 | + |
1529 | + "identity:list_role_assignments": "rule:admin_required", |
1530 | + |
1531 | + "identity:get_policy": "rule:admin_required", |
1532 | + "identity:list_policies": "rule:admin_required", |
1533 | + "identity:create_policy": "rule:admin_required", |
1534 | + "identity:update_policy": "rule:admin_required", |
1535 | + "identity:delete_policy": "rule:admin_required", |
1536 | + |
1537 | + "identity:check_token": "rule:admin_or_token_subject", |
1538 | + "identity:validate_token": "rule:service_admin_or_token_subject", |
1539 | + "identity:validate_token_head": "rule:service_or_admin", |
1540 | + "identity:revocation_list": "rule:service_or_admin", |
1541 | + "identity:revoke_token": "rule:admin_or_token_subject", |
1542 | + |
1543 | + "identity:create_trust": "user_id:%(trust.trustor_user_id)s", |
1544 | + "identity:list_trusts": "", |
1545 | + "identity:list_roles_for_trust": "", |
1546 | + "identity:get_role_for_trust": "", |
1547 | + "identity:delete_trust": "", |
1548 | + |
1549 | + "identity:create_consumer": "rule:admin_required", |
1550 | + "identity:get_consumer": "rule:admin_required", |
1551 | + "identity:list_consumers": "rule:admin_required", |
1552 | + "identity:delete_consumer": "rule:admin_required", |
1553 | + "identity:update_consumer": "rule:admin_required", |
1554 | + |
1555 | + "identity:authorize_request_token": "rule:admin_required", |
1556 | + "identity:list_access_token_roles": "rule:admin_required", |
1557 | + "identity:get_access_token_role": "rule:admin_required", |
1558 | + "identity:list_access_tokens": "rule:admin_required", |
1559 | + "identity:get_access_token": "rule:admin_required", |
1560 | + "identity:delete_access_token": "rule:admin_required", |
1561 | + |
1562 | + "identity:list_projects_for_endpoint": "rule:admin_required", |
1563 | + "identity:add_endpoint_to_project": "rule:admin_required", |
1564 | + "identity:check_endpoint_in_project": "rule:admin_required", |
1565 | + "identity:list_endpoints_for_project": "rule:admin_required", |
1566 | + "identity:remove_endpoint_from_project": "rule:admin_required", |
1567 | + |
1568 | + "identity:create_endpoint_group": "rule:admin_required", |
1569 | + "identity:list_endpoint_groups": "rule:admin_required", |
1570 | + "identity:get_endpoint_group": "rule:admin_required", |
1571 | + "identity:update_endpoint_group": "rule:admin_required", |
1572 | + "identity:delete_endpoint_group": "rule:admin_required", |
1573 | + "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", |
1574 | + "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", |
1575 | + "identity:get_endpoint_group_in_project": "rule:admin_required", |
1576 | + "identity:list_endpoint_groups_for_project": "rule:admin_required", |
1577 | + "identity:add_endpoint_group_to_project": "rule:admin_required", |
1578 | + "identity:remove_endpoint_group_from_project": "rule:admin_required", |
1579 | + |
1580 | + "identity:create_identity_provider": "rule:admin_required", |
1581 | + "identity:list_identity_providers": "rule:admin_required", |
1582 | + "identity:get_identity_providers": "rule:admin_required", |
1583 | + "identity:update_identity_provider": "rule:admin_required", |
1584 | + "identity:delete_identity_provider": "rule:admin_required", |
1585 | + |
1586 | + "identity:create_protocol": "rule:admin_required", |
1587 | + "identity:update_protocol": "rule:admin_required", |
1588 | + "identity:get_protocol": "rule:admin_required", |
1589 | + "identity:list_protocols": "rule:admin_required", |
1590 | + "identity:delete_protocol": "rule:admin_required", |
1591 | + |
1592 | + "identity:create_mapping": "rule:admin_required", |
1593 | + "identity:get_mapping": "rule:admin_required", |
1594 | + "identity:list_mappings": "rule:admin_required", |
1595 | + "identity:delete_mapping": "rule:admin_required", |
1596 | + "identity:update_mapping": "rule:admin_required", |
1597 | + |
1598 | + "identity:create_service_provider": "rule:admin_required", |
1599 | + "identity:list_service_providers": "rule:admin_required", |
1600 | + "identity:get_service_provider": "rule:admin_required", |
1601 | + "identity:update_service_provider": "rule:admin_required", |
1602 | + "identity:delete_service_provider": "rule:admin_required", |
1603 | + |
1604 | + "identity:get_auth_catalog": "", |
1605 | + "identity:get_auth_projects": "", |
1606 | + "identity:get_auth_domains": "", |
1607 | + |
1608 | + "identity:list_projects_for_groups": "", |
1609 | + "identity:list_domains_for_groups": "", |
1610 | + |
1611 | + "identity:list_revoke_events": "", |
1612 | + |
1613 | + "identity:create_policy_association_for_endpoint": "rule:admin_required", |
1614 | + "identity:check_policy_association_for_endpoint": "rule:admin_required", |
1615 | + "identity:delete_policy_association_for_endpoint": "rule:admin_required", |
1616 | + "identity:create_policy_association_for_service": "rule:admin_required", |
1617 | + "identity:check_policy_association_for_service": "rule:admin_required", |
1618 | + "identity:delete_policy_association_for_service": "rule:admin_required", |
1619 | + "identity:create_policy_association_for_region_and_service": "rule:admin_required", |
1620 | + "identity:check_policy_association_for_region_and_service": "rule:admin_required", |
1621 | + "identity:delete_policy_association_for_region_and_service": "rule:admin_required", |
1622 | + "identity:get_policy_for_endpoint": "rule:admin_required", |
1623 | + "identity:list_endpoints_for_policy": "rule:admin_required", |
1624 | + |
1625 | + "identity:create_domain_config": "rule:admin_required", |
1626 | + "identity:get_domain_config": "rule:admin_required", |
1627 | + "identity:update_domain_config": "rule:admin_required", |
1628 | + "identity:delete_domain_config": "rule:admin_required" |
1629 | +} |
1630 | |
1631 | === modified file 'tests/basic_deployment.py' |
1632 | --- tests/basic_deployment.py 2016-01-13 21:33:59 +0000 |
1633 | +++ tests/basic_deployment.py 2016-03-05 15:43:48 +0000 |
1634 | @@ -17,6 +17,8 @@ |
1635 | DEBUG, |
1636 | # ERROR |
1637 | ) |
1638 | +import keystoneclient |
1639 | +from charmhelpers.core.decorators import retry_on_exception |
1640 | |
1641 | # Use DEBUG to turn on debug logging |
1642 | u = OpenStackAmuletUtils(DEBUG) |
1643 | @@ -30,6 +32,7 @@ |
1644 | """Deploy the entire test environment.""" |
1645 | super(KeystoneBasicDeployment, self).__init__(series, openstack, |
1646 | source, stable) |
1647 | + self.keystone_api_version = 2 |
1648 | self.git = git |
1649 | self._add_services() |
1650 | self._add_relations() |
1651 | @@ -37,8 +40,8 @@ |
1652 | self._deploy() |
1653 | |
1654 | u.log.info('Waiting on extended status checks...') |
1655 | - exclude_services = ['mysql'] |
1656 | - self._auto_wait_for_status(exclude_services=exclude_services) |
1657 | + self.exclude_services = ['mysql'] |
1658 | + self._auto_wait_for_status(exclude_services=self.exclude_services) |
1659 | |
1660 | self._initialize_tests() |
1661 | |
1662 | @@ -72,7 +75,8 @@ |
1663 | def _configure_services(self): |
1664 | """Configure all of the services.""" |
1665 | keystone_config = {'admin-password': 'openstack', |
1666 | - 'admin-token': 'ubuntutesting'} |
1667 | + 'admin-token': 'ubuntutesting', |
1668 | + 'preferred-api-version': self.keystone_api_version} |
1669 | if self.git: |
1670 | amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') |
1671 | |
1672 | @@ -109,6 +113,103 @@ |
1673 | } |
1674 | super(KeystoneBasicDeployment, self)._configure_services(configs) |
1675 | |
1676 | + @retry_on_exception(5, base_delay=10) |
1677 | + def set_api_version(self, api_version): |
1678 | + set_alternate = {'preferred-api-version': api_version} |
1679 | + |
1680 | + # Make config change, check for service restarts |
1681 | + u.log.debug('Setting preferred-api-version={}'.format(api_version)) |
1682 | + self.d.configure('keystone', set_alternate) |
1683 | + self.keystone_api_version = api_version |
1684 | + client = self.get_keystone_client(api_version=api_version) |
1685 | + # List an artefact that needs authorisation to check admin user |
1686 | + # has been setup. If that is still in progess |
1687 | + # keystoneclient.exceptions.Unauthorized will be thrown and caught by |
1688 | + # @retry_on_exception |
1689 | + if api_version == 2: |
1690 | + client.tenants.list() |
1691 | + self.keystone_v2 = self.get_keystone_client(api_version=2) |
1692 | + else: |
1693 | + client.projects.list() |
1694 | + self.keystone_v3 = self.get_keystone_client(api_version=3) |
1695 | + |
1696 | + def get_keystone_client(self, api_version=None): |
1697 | + if api_version == 2: |
1698 | + return u.authenticate_keystone_admin(self.keystone_sentry, |
1699 | + user='admin', |
1700 | + password='openstack', |
1701 | + tenant='admin', |
1702 | + api_version=api_version, |
1703 | + keystone_ip=self.keystone_ip) |
1704 | + else: |
1705 | + return u.authenticate_keystone_admin(self.keystone_sentry, |
1706 | + user='admin', |
1707 | + password='openstack', |
1708 | + api_version=api_version, |
1709 | + keystone_ip=self.keystone_ip) |
1710 | + |
1711 | + def create_users_v2(self): |
1712 | + # Create a demo tenant/role/user |
1713 | + self.demo_tenant = 'demoTenant' |
1714 | + self.demo_role = 'demoRole' |
1715 | + self.demo_user = 'demoUser' |
1716 | + if not u.tenant_exists(self.keystone_v2, self.demo_tenant): |
1717 | + tenant = self.keystone_v2.tenants.create( |
1718 | + tenant_name=self.demo_tenant, |
1719 | + description='demo tenant', |
1720 | + enabled=True) |
1721 | + self.keystone_v2.roles.create(name=self.demo_role) |
1722 | + self.keystone_v2.users.create(name=self.demo_user, |
1723 | + password='password', |
1724 | + tenant_id=tenant.id, |
1725 | + email='demo@demo.com') |
1726 | + |
1727 | + # Authenticate keystone demo |
1728 | + self.keystone_demo = u.authenticate_keystone_user( |
1729 | + self.keystone_v2, user=self.demo_user, |
1730 | + password='password', tenant=self.demo_tenant) |
1731 | + |
1732 | + def create_users_v3(self): |
1733 | + # Create a demo tenant/role/user |
1734 | + self.demo_project = 'demoProject' |
1735 | + self.demo_user_v3 = 'demoUserV3' |
1736 | + self.demo_domain = 'demoDomain' |
1737 | + try: |
1738 | + domain = self.keystone_v3.domains.find(name=self.demo_domain) |
1739 | + except keystoneclient.exceptions.NotFound: |
1740 | + domain = self.keystone_v3.domains.create( |
1741 | + self.demo_domain, |
1742 | + description='Demo Domain', |
1743 | + enabled=True |
1744 | + ) |
1745 | + |
1746 | + try: |
1747 | + self.keystone_v3.projects.find(name=self.demo_project) |
1748 | + except keystoneclient.exceptions.NotFound: |
1749 | + self.keystone_v3.projects.create( |
1750 | + self.demo_project, |
1751 | + domain, |
1752 | + description='Demo Project', |
1753 | + enabled=True, |
1754 | + ) |
1755 | + |
1756 | + try: |
1757 | + self.keystone_v3.roles.find(name=self.demo_role) |
1758 | + except keystoneclient.exceptions.NotFound: |
1759 | + self.keystone_v3.roles.create(name=self.demo_role) |
1760 | + |
1761 | + try: |
1762 | + self.keystone_v3.users.find(name=self.demo_user_v3) |
1763 | + except keystoneclient.exceptions.NotFound: |
1764 | + self.keystone_v3.users.create( |
1765 | + self.demo_user_v3, |
1766 | + domain=domain.id, |
1767 | + project=self.demo_project, |
1768 | + password='password', |
1769 | + email='demov3@demo.com', |
1770 | + description='Demo', |
1771 | + enabled=True) |
1772 | + |
1773 | def _initialize_tests(self): |
1774 | """Perform final initialization before tests get run.""" |
1775 | # Access the sentries for inspecting service units |
1776 | @@ -119,31 +220,14 @@ |
1777 | self._get_openstack_release())) |
1778 | u.log.debug('openstack release str: {}'.format( |
1779 | self._get_openstack_release_string())) |
1780 | - |
1781 | + self.keystone_ip = self.keystone_sentry.relation( |
1782 | + 'shared-db', |
1783 | + 'mysql:shared-db')['private-address'] |
1784 | + self.set_api_version(2) |
1785 | # Authenticate keystone admin |
1786 | - self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, |
1787 | - user='admin', |
1788 | - password='openstack', |
1789 | - tenant='admin') |
1790 | - |
1791 | - # Create a demo tenant/role/user |
1792 | - self.demo_tenant = 'demoTenant' |
1793 | - self.demo_role = 'demoRole' |
1794 | - self.demo_user = 'demoUser' |
1795 | - if not u.tenant_exists(self.keystone, self.demo_tenant): |
1796 | - tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, |
1797 | - description='demo tenant', |
1798 | - enabled=True) |
1799 | - self.keystone.roles.create(name=self.demo_role) |
1800 | - self.keystone.users.create(name=self.demo_user, |
1801 | - password='password', |
1802 | - tenant_id=tenant.id, |
1803 | - email='demo@demo.com') |
1804 | - |
1805 | - # Authenticate keystone demo |
1806 | - self.keystone_demo = u.authenticate_keystone_user( |
1807 | - self.keystone, user=self.demo_user, |
1808 | - password='password', tenant=self.demo_tenant) |
1809 | + self.keystone_v2 = self.get_keystone_client(api_version=2) |
1810 | + self.keystone_v3 = self.get_keystone_client(api_version=3) |
1811 | + self.create_users_v2() |
1812 | |
1813 | def test_100_services(self): |
1814 | """Verify the expected services are running on the corresponding |
1815 | @@ -159,7 +243,7 @@ |
1816 | if ret: |
1817 | amulet.raise_status(amulet.FAIL, msg=ret) |
1818 | |
1819 | - def test_102_keystone_tenants(self): |
1820 | + def validate_keystone_tenants(self, client): |
1821 | """Verify all existing tenants.""" |
1822 | u.log.debug('Checking keystone tenants...') |
1823 | expected = [ |
1824 | @@ -176,13 +260,20 @@ |
1825 | 'description': 'Created by Juju', |
1826 | 'id': u.not_null} |
1827 | ] |
1828 | - actual = self.keystone.tenants.list() |
1829 | + if self.keystone_api_version == 2: |
1830 | + actual = client.tenants.list() |
1831 | + else: |
1832 | + actual = client.projects.list() |
1833 | |
1834 | ret = u.validate_tenant_data(expected, actual) |
1835 | if ret: |
1836 | amulet.raise_status(amulet.FAIL, msg=ret) |
1837 | |
1838 | - def test_104_keystone_roles(self): |
1839 | + def test_102_keystone_tenants(self): |
1840 | + self.set_api_version(2) |
1841 | + self.validate_keystone_tenants(self.keystone_v2) |
1842 | + |
1843 | + def validate_keystone_roles(self, client): |
1844 | """Verify all existing roles.""" |
1845 | u.log.debug('Checking keystone roles...') |
1846 | expected = [ |
1847 | @@ -191,40 +282,113 @@ |
1848 | {'name': 'Admin', |
1849 | 'id': u.not_null} |
1850 | ] |
1851 | - actual = self.keystone.roles.list() |
1852 | + actual = client.roles.list() |
1853 | |
1854 | ret = u.validate_role_data(expected, actual) |
1855 | if ret: |
1856 | amulet.raise_status(amulet.FAIL, msg=ret) |
1857 | |
1858 | - def test_106_keystone_users(self): |
1859 | + def test_104_keystone_roles(self): |
1860 | + self.set_api_version(2) |
1861 | + self.validate_keystone_roles(self.keystone_v2) |
1862 | + |
1863 | + def validate_keystone_users(self, client): |
1864 | """Verify all existing roles.""" |
1865 | u.log.debug('Checking keystone users...') |
1866 | - expected = [ |
1867 | + base = [ |
1868 | {'name': 'demoUser', |
1869 | 'enabled': True, |
1870 | - 'tenantId': u.not_null, |
1871 | 'id': u.not_null, |
1872 | 'email': 'demo@demo.com'}, |
1873 | {'name': 'admin', |
1874 | 'enabled': True, |
1875 | - 'tenantId': u.not_null, |
1876 | 'id': u.not_null, |
1877 | 'email': 'juju@localhost'}, |
1878 | {'name': 'cinder_cinderv2', |
1879 | 'enabled': True, |
1880 | - 'tenantId': u.not_null, |
1881 | 'id': u.not_null, |
1882 | 'email': u'juju@localhost'} |
1883 | ] |
1884 | - actual = self.keystone.users.list() |
1885 | - ret = u.validate_user_data(expected, actual) |
1886 | + expected = [] |
1887 | + for user_info in base: |
1888 | + if self.keystone_api_version == 2: |
1889 | + user_info['tenantId'] = u.not_null |
1890 | + else: |
1891 | + user_info['default_project_id'] = u.not_null |
1892 | + expected.append(user_info) |
1893 | + actual = client.users.list() |
1894 | + ret = u.validate_user_data(expected, actual, |
1895 | + api_version=self.keystone_api_version) |
1896 | if ret: |
1897 | amulet.raise_status(amulet.FAIL, msg=ret) |
1898 | |
1899 | - def test_108_service_catalog(self): |
1900 | + def test_106_keystone_users(self): |
1901 | + self.set_api_version(2) |
1902 | + self.validate_keystone_users(self.keystone_v2) |
1903 | + |
1904 | + def is_liberty_or_newer(self): |
1905 | + os_release = self._get_openstack_release_string() |
1906 | + if os_release >= 'liberty': |
1907 | + return True |
1908 | + else: |
1909 | + u.log.info('Skipping test, {} < liberty'.format(os_release)) |
1910 | + return False |
1911 | + |
1912 | + def test_112_keystone_tenants(self): |
1913 | + if self.is_liberty_or_newer(): |
1914 | + self.set_api_version(3) |
1915 | + self.validate_keystone_tenants(self.keystone_v3) |
1916 | + |
1917 | + def test_114_keystone_tenants(self): |
1918 | + if self.is_liberty_or_newer(): |
1919 | + self.set_api_version(3) |
1920 | + self.validate_keystone_roles(self.keystone_v3) |
1921 | + |
1922 | + def test_116_keystone_users(self): |
1923 | + if self.is_liberty_or_newer(): |
1924 | + self.set_api_version(3) |
1925 | + self.validate_keystone_users(self.keystone_v3) |
1926 | + |
1927 | + def test_118_keystone_users(self): |
1928 | + if self.is_liberty_or_newer(): |
1929 | + self.set_api_version(3) |
1930 | + self.create_users_v3() |
1931 | + actual_user = self.keystone_v3.users.find(name=self.demo_user_v3) |
1932 | + expect = { |
1933 | + 'default_project_id': self.demo_project, |
1934 | + 'email': 'demov3@demo.com', |
1935 | + 'name': self.demo_user_v3, |
1936 | + } |
1937 | + for key in expect.keys(): |
1938 | + u.log.debug('Checking user {} {} is {}'.format( |
1939 | + self.demo_user_v3, |
1940 | + key, |
1941 | + expect[key]) |
1942 | + ) |
1943 | + assert expect[key] == getattr(actual_user, key) |
1944 | + |
1945 | + def test_120_keystone_domains(self): |
1946 | + if self.is_liberty_or_newer(): |
1947 | + self.set_api_version(3) |
1948 | + self.create_users_v3() |
1949 | + actual_domain = self.keystone_v3.domains.find( |
1950 | + name=self.demo_domain |
1951 | + ) |
1952 | + expect = { |
1953 | + 'name': self.demo_domain, |
1954 | + } |
1955 | + for key in expect.keys(): |
1956 | + u.log.debug('Checking domain {} {} is {}'.format( |
1957 | + self.demo_domain, |
1958 | + key, |
1959 | + expect[key]) |
1960 | + ) |
1961 | + assert expect[key] == getattr(actual_domain, key) |
1962 | + |
1963 | + def test_138_service_catalog(self): |
1964 | """Verify that the service catalog endpoint data is valid.""" |
1965 | u.log.debug('Checking keystone service catalog...') |
1966 | + self.set_api_version(2) |
1967 | endpoint_check = { |
1968 | 'adminURL': u.valid_url, |
1969 | 'id': u.not_null, |
1970 | @@ -236,16 +400,16 @@ |
1971 | 'volume': [endpoint_check], |
1972 | 'identity': [endpoint_check] |
1973 | } |
1974 | - actual = self.keystone.service_catalog.get_endpoints() |
1975 | + actual = self.keystone_v2.service_catalog.get_endpoints() |
1976 | |
1977 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) |
1978 | if ret: |
1979 | amulet.raise_status(amulet.FAIL, msg=ret) |
1980 | |
1981 | - def test_110_keystone_endpoint(self): |
1982 | + def test_140_keystone_endpoint(self): |
1983 | """Verify the keystone endpoint data.""" |
1984 | u.log.debug('Checking keystone api endpoint data...') |
1985 | - endpoints = self.keystone.endpoints.list() |
1986 | + endpoints = self.keystone_v2.endpoints.list() |
1987 | admin_port = '35357' |
1988 | internal_port = public_port = '5000' |
1989 | expected = { |
1990 | @@ -262,10 +426,10 @@ |
1991 | amulet.raise_status(amulet.FAIL, |
1992 | msg='keystone endpoint: {}'.format(ret)) |
1993 | |
1994 | - def test_112_cinder_endpoint(self): |
1995 | + def test_142_cinder_endpoint(self): |
1996 | """Verify the cinder endpoint data.""" |
1997 | u.log.debug('Checking cinder endpoint...') |
1998 | - endpoints = self.keystone.endpoints.list() |
1999 | + endpoints = self.keystone_v2.endpoints.list() |
2000 | admin_port = internal_port = public_port = '8776' |
2001 | expected = { |
2002 | 'id': u.not_null, |
2003 | |
2004 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
2005 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-01-04 21:27:51 +0000 |
2006 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-03-05 15:43:48 +0000 |
2007 | @@ -27,6 +27,10 @@ |
2008 | import glanceclient.v1.client as glance_client |
2009 | import heatclient.v1.client as heat_client |
2010 | import keystoneclient.v2_0 as keystone_client |
2011 | +from keystoneclient.auth.identity import v3 as keystone_id_v3 |
2012 | +from keystoneclient import session as keystone_session |
2013 | +from keystoneclient.v3 import client as keystone_client_v3 |
2014 | + |
2015 | import novaclient.v1_1.client as nova_client |
2016 | import pika |
2017 | import swiftclient |
2018 | @@ -139,7 +143,7 @@ |
2019 | return "role {} does not exist".format(e['name']) |
2020 | return ret |
2021 | |
2022 | - def validate_user_data(self, expected, actual): |
2023 | + def validate_user_data(self, expected, actual, api_version=None): |
2024 | """Validate user data. |
2025 | |
2026 | Validate a list of actual user data vs a list of expected user |
2027 | @@ -150,10 +154,14 @@ |
2028 | for e in expected: |
2029 | found = False |
2030 | for act in actual: |
2031 | - a = {'enabled': act.enabled, 'name': act.name, |
2032 | - 'email': act.email, 'tenantId': act.tenantId, |
2033 | - 'id': act.id} |
2034 | - if e['name'] == a['name']: |
2035 | + if e['name'] == act.name: |
2036 | + a = {'enabled': act.enabled, 'name': act.name, |
2037 | + 'email': act.email, 'id': act.id} |
2038 | + if api_version == 2: |
2039 | + a['tenantId'] = act.tenantId |
2040 | + else: |
2041 | + a['default_project_id'] = getattr(act, |
2042 | + 'default_project_id', 'none') |
2043 | found = True |
2044 | ret = self._validate_dict_data(e, a) |
2045 | if ret: |
2046 | @@ -188,15 +196,30 @@ |
2047 | return cinder_client.Client(username, password, tenant, ept) |
2048 | |
2049 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
2050 | - tenant): |
2051 | + tenant=None, api_version=None, |
2052 | + keystone_ip=None): |
2053 | """Authenticates admin user with the keystone admin endpoint.""" |
2054 | self.log.debug('Authenticating keystone admin...') |
2055 | unit = keystone_sentry |
2056 | - service_ip = unit.relation('shared-db', |
2057 | - 'mysql:shared-db')['private-address'] |
2058 | - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) |
2059 | - return keystone_client.Client(username=user, password=password, |
2060 | - tenant_name=tenant, auth_url=ep) |
2061 | + if not keystone_ip: |
2062 | + keystone_ip = unit.relation('shared-db', |
2063 | + 'mysql:shared-db')['private-address'] |
2064 | + base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) |
2065 | + if not api_version or api_version == 2: |
2066 | + ep = base_ep + "/v2.0" |
2067 | + return keystone_client.Client(username=user, password=password, |
2068 | + tenant_name=tenant, auth_url=ep) |
2069 | + else: |
2070 | + ep = base_ep + "/v3" |
2071 | + auth = keystone_id_v3.Password( |
2072 | + user_domain_name='admin_domain', |
2073 | + username=user, |
2074 | + password=password, |
2075 | + domain_name='admin_domain', |
2076 | + auth_url=ep, |
2077 | + ) |
2078 | + sess = keystone_session.Session(auth=auth) |
2079 | + return keystone_client_v3.Client(session=sess) |
2080 | |
2081 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
2082 | """Authenticates a regular user with the keystone public endpoint.""" |
2083 | |
2084 | === added directory 'tests/charmhelpers/core' |
2085 | === added file 'tests/charmhelpers/core/__init__.py' |
2086 | --- tests/charmhelpers/core/__init__.py 1970-01-01 00:00:00 +0000 |
2087 | +++ tests/charmhelpers/core/__init__.py 2016-03-05 15:43:48 +0000 |
2088 | @@ -0,0 +1,15 @@ |
2089 | +# Copyright 2014-2015 Canonical Limited. |
2090 | +# |
2091 | +# This file is part of charm-helpers. |
2092 | +# |
2093 | +# charm-helpers is free software: you can redistribute it and/or modify |
2094 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2095 | +# published by the Free Software Foundation. |
2096 | +# |
2097 | +# charm-helpers is distributed in the hope that it will be useful, |
2098 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2099 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2100 | +# GNU Lesser General Public License for more details. |
2101 | +# |
2102 | +# You should have received a copy of the GNU Lesser General Public License |
2103 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2104 | |
2105 | === added file 'tests/charmhelpers/core/decorators.py' |
2106 | --- tests/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000 |
2107 | +++ tests/charmhelpers/core/decorators.py 2016-03-05 15:43:48 +0000 |
2108 | @@ -0,0 +1,57 @@ |
2109 | +# Copyright 2014-2015 Canonical Limited. |
2110 | +# |
2111 | +# This file is part of charm-helpers. |
2112 | +# |
2113 | +# charm-helpers is free software: you can redistribute it and/or modify |
2114 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2115 | +# published by the Free Software Foundation. |
2116 | +# |
2117 | +# charm-helpers is distributed in the hope that it will be useful, |
2118 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2119 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2120 | +# GNU Lesser General Public License for more details. |
2121 | +# |
2122 | +# You should have received a copy of the GNU Lesser General Public License |
2123 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2124 | + |
2125 | +# |
2126 | +# Copyright 2014 Canonical Ltd. |
2127 | +# |
2128 | +# Authors: |
2129 | +# Edward Hope-Morley <opentastic@gmail.com> |
2130 | +# |
2131 | + |
2132 | +import time |
2133 | + |
2134 | +from charmhelpers.core.hookenv import ( |
2135 | + log, |
2136 | + INFO, |
2137 | +) |
2138 | + |
2139 | + |
2140 | +def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): |
2141 | + """If the decorated function raises exception exc_type, allow num_retries |
2142 | + retry attempts before raise the exception. |
2143 | + """ |
2144 | + def _retry_on_exception_inner_1(f): |
2145 | + def _retry_on_exception_inner_2(*args, **kwargs): |
2146 | + retries = num_retries |
2147 | + multiplier = 1 |
2148 | + while True: |
2149 | + try: |
2150 | + return f(*args, **kwargs) |
2151 | + except exc_type: |
2152 | + if not retries: |
2153 | + raise |
2154 | + |
2155 | + delay = base_delay * multiplier |
2156 | + multiplier += 1 |
2157 | + log("Retrying '%s' %d more times (delay=%s)" % |
2158 | + (f.__name__, retries, delay), level=INFO) |
2159 | + retries -= 1 |
2160 | + if delay: |
2161 | + time.sleep(delay) |
2162 | + |
2163 | + return _retry_on_exception_inner_2 |
2164 | + |
2165 | + return _retry_on_exception_inner_1 |
2166 | |
2167 | === added file 'tests/charmhelpers/core/hookenv.py' |
2168 | --- tests/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000 |
2169 | +++ tests/charmhelpers/core/hookenv.py 2016-03-05 15:43:48 +0000 |
2170 | @@ -0,0 +1,978 @@ |
2171 | +# Copyright 2014-2015 Canonical Limited. |
2172 | +# |
2173 | +# This file is part of charm-helpers. |
2174 | +# |
2175 | +# charm-helpers is free software: you can redistribute it and/or modify |
2176 | +# it under the terms of the GNU Lesser General Public License version 3 as |
2177 | +# published by the Free Software Foundation. |
2178 | +# |
2179 | +# charm-helpers is distributed in the hope that it will be useful, |
2180 | +# but WITHOUT ANY WARRANTY; without even the implied warranty of |
2181 | +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
2182 | +# GNU Lesser General Public License for more details. |
2183 | +# |
2184 | +# You should have received a copy of the GNU Lesser General Public License |
2185 | +# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. |
2186 | + |
2187 | +"Interactions with the Juju environment" |
2188 | +# Copyright 2013 Canonical Ltd. |
2189 | +# |
2190 | +# Authors: |
2191 | +# Charm Helpers Developers <juju@lists.ubuntu.com> |
2192 | + |
2193 | +from __future__ import print_function |
2194 | +import copy |
2195 | +from distutils.version import LooseVersion |
2196 | +from functools import wraps |
2197 | +import glob |
2198 | +import os |
2199 | +import json |
2200 | +import yaml |
2201 | +import subprocess |
2202 | +import sys |
2203 | +import errno |
2204 | +import tempfile |
2205 | +from subprocess import CalledProcessError |
2206 | + |
2207 | +import six |
2208 | +if not six.PY3: |
2209 | + from UserDict import UserDict |
2210 | +else: |
2211 | + from collections import UserDict |
2212 | + |
2213 | +CRITICAL = "CRITICAL" |
2214 | +ERROR = "ERROR" |
2215 | +WARNING = "WARNING" |
2216 | +INFO = "INFO" |
2217 | +DEBUG = "DEBUG" |
2218 | +MARKER = object() |
2219 | + |
2220 | +cache = {} |
2221 | + |
2222 | + |
2223 | +def cached(func): |
2224 | + """Cache return values for multiple executions of func + args |
2225 | + |
2226 | + For example:: |
2227 | + |
2228 | + @cached |
2229 | + def unit_get(attribute): |
2230 | + pass |
2231 | + |
2232 | + unit_get('test') |
2233 | + |
2234 | + will cache the result of unit_get + 'test' for future calls. |
2235 | + """ |
2236 | + @wraps(func) |
2237 | + def wrapper(*args, **kwargs): |
2238 | + global cache |
2239 | + key = str((func, args, kwargs)) |
2240 | + try: |
2241 | + return cache[key] |
2242 | + except KeyError: |
2243 | + pass # Drop out of the exception handler scope. |
2244 | + res = func(*args, **kwargs) |
2245 | + cache[key] = res |
2246 | + return res |
2247 | + wrapper._wrapped = func |
2248 | + return wrapper |
2249 | + |
2250 | + |
2251 | +def flush(key): |
2252 | + """Flushes any entries from function cache where the |
2253 | + key is found in the function+args """ |
2254 | + flush_list = [] |
2255 | + for item in cache: |
2256 | + if key in item: |
2257 | + flush_list.append(item) |
2258 | + for item in flush_list: |
2259 | + del cache[item] |
2260 | + |
2261 | + |
2262 | +def log(message, level=None): |
2263 | + """Write a message to the juju log""" |
2264 | + command = ['juju-log'] |
2265 | + if level: |
2266 | + command += ['-l', level] |
2267 | + if not isinstance(message, six.string_types): |
2268 | + message = repr(message) |
2269 | + command += [message] |
2270 | + # Missing juju-log should not cause failures in unit tests |
2271 | + # Send log output to stderr |
2272 | + try: |
2273 | + subprocess.call(command) |
2274 | + except OSError as e: |
2275 | + if e.errno == errno.ENOENT: |
2276 | + if level: |
2277 | + message = "{}: {}".format(level, message) |
2278 | + message = "juju-log: {}".format(message) |
2279 | + print(message, file=sys.stderr) |
2280 | + else: |
2281 | + raise |
2282 | + |
2283 | + |
2284 | +class Serializable(UserDict): |
2285 | + """Wrapper, an object that can be serialized to yaml or json""" |
2286 | + |
2287 | + def __init__(self, obj): |
2288 | + # wrap the object |
2289 | + UserDict.__init__(self) |
2290 | + self.data = obj |
2291 | + |
2292 | + def __getattr__(self, attr): |
2293 | + # See if this object has attribute. |
2294 | + if attr in ("json", "yaml", "data"): |
2295 | + return self.__dict__[attr] |
2296 | + # Check for attribute in wrapped object. |
2297 | + got = getattr(self.data, attr, MARKER) |
2298 | + if got is not MARKER: |
2299 | + return got |
2300 | + # Proxy to the wrapped object via dict interface. |
2301 | + try: |
2302 | + return self.data[attr] |
2303 | + except KeyError: |
2304 | + raise AttributeError(attr) |
2305 | + |
2306 | + def __getstate__(self): |
2307 | + # Pickle as a standard dictionary. |
2308 | + return self.data |
2309 | + |
2310 | + def __setstate__(self, state): |
2311 | + # Unpickle into our wrapper. |
2312 | + self.data = state |
2313 | + |
2314 | + def json(self): |
2315 | + """Serialize the object to json""" |
2316 | + return json.dumps(self.data) |
2317 | + |
2318 | + def yaml(self): |
2319 | + """Serialize the object to yaml""" |
2320 | + return yaml.dump(self.data) |
2321 | + |
2322 | + |
2323 | +def execution_environment(): |
2324 | + """A convenient bundling of the current execution context""" |
2325 | + context = {} |
2326 | + context['conf'] = config() |
2327 | + if relation_id(): |
2328 | + context['reltype'] = relation_type() |
2329 | + context['relid'] = relation_id() |
2330 | + context['rel'] = relation_get() |
2331 | + context['unit'] = local_unit() |
2332 | + context['rels'] = relations() |
2333 | + context['env'] = os.environ |
2334 | + return context |
2335 | + |
2336 | + |
2337 | +def in_relation_hook(): |
2338 | + """Determine whether we're running in a relation hook""" |
2339 | + return 'JUJU_RELATION' in os.environ |
2340 | + |
2341 | + |
2342 | +def relation_type(): |
2343 | + """The scope for the current relation hook""" |
2344 | + return os.environ.get('JUJU_RELATION', None) |
2345 | + |
2346 | + |
2347 | +@cached |
2348 | +def relation_id(relation_name=None, service_or_unit=None): |
2349 | + """The relation ID for the current or a specified relation""" |
2350 | + if not relation_name and not service_or_unit: |
2351 | + return os.environ.get('JUJU_RELATION_ID', None) |
2352 | + elif relation_name and service_or_unit: |
2353 | + service_name = service_or_unit.split('/')[0] |
2354 | + for relid in relation_ids(relation_name): |
2355 | + remote_service = remote_service_name(relid) |
2356 | + if remote_service == service_name: |
2357 | + return relid |
2358 | + else: |
2359 | + raise ValueError('Must specify neither or both of relation_name and service_or_unit') |
2360 | + |
2361 | + |
2362 | +def local_unit(): |
2363 | + """Local unit ID""" |
2364 | + return os.environ['JUJU_UNIT_NAME'] |
2365 | + |
2366 | + |
2367 | +def remote_unit(): |
2368 | + """The remote unit for the current relation hook""" |
2369 | + return os.environ.get('JUJU_REMOTE_UNIT', None) |
2370 | + |
2371 | + |
2372 | +def service_name(): |
2373 | + """The name service group this unit belongs to""" |
2374 | + return local_unit().split('/')[0] |
2375 | + |
2376 | + |
2377 | +@cached |
2378 | +def remote_service_name(relid=None): |
2379 | + """The remote service name for a given relation-id (or the current relation)""" |
2380 | + if relid is None: |
2381 | + unit = remote_unit() |
2382 | + else: |
2383 | + units = related_units(relid) |
2384 | + unit = units[0] if units else None |
2385 | + return unit.split('/')[0] if unit else None |
2386 | + |
2387 | + |
2388 | +def hook_name(): |
2389 | + """The name of the currently executing hook""" |
2390 | + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) |
2391 | + |
2392 | + |
2393 | +class Config(dict): |
2394 | + """A dictionary representation of the charm's config.yaml, with some |
2395 | + extra features: |
2396 | + |
2397 | + - See which values in the dictionary have changed since the previous hook. |
2398 | + - For values that have changed, see what the previous value was. |
2399 | + - Store arbitrary data for use in a later hook. |
2400 | + |
2401 | + NOTE: Do not instantiate this object directly - instead call |
2402 | + ``hookenv.config()``, which will return an instance of :class:`Config`. |
2403 | + |
2404 | + Example usage:: |
2405 | + |
2406 | + >>> # inside a hook |
2407 | + >>> from charmhelpers.core import hookenv |
2408 | + >>> config = hookenv.config() |
2409 | + >>> config['foo'] |
2410 | + 'bar' |
2411 | + >>> # store a new key/value for later use |
2412 | + >>> config['mykey'] = 'myval' |
2413 | + |
2414 | + |
2415 | + >>> # user runs `juju set mycharm foo=baz` |
2416 | + >>> # now we're inside subsequent config-changed hook |
2417 | + >>> config = hookenv.config() |
2418 | + >>> config['foo'] |
2419 | + 'baz' |
2420 | + >>> # test to see if this val has changed since last hook |
2421 | + >>> config.changed('foo') |
2422 | + True |
2423 | + >>> # what was the previous value? |
2424 | + >>> config.previous('foo') |
2425 | + 'bar' |
2426 | + >>> # keys/values that we add are preserved across hooks |
2427 | + >>> config['mykey'] |
2428 | + 'myval' |
2429 | + |
2430 | + """ |
2431 | + CONFIG_FILE_NAME = '.juju-persistent-config' |
2432 | + |
2433 | + def __init__(self, *args, **kw): |
2434 | + super(Config, self).__init__(*args, **kw) |
2435 | + self.implicit_save = True |
2436 | + self._prev_dict = None |
2437 | + self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) |
2438 | + if os.path.exists(self.path): |
2439 | + self.load_previous() |
2440 | + atexit(self._implicit_save) |
2441 | + |
2442 | + def load_previous(self, path=None): |
2443 | + """Load previous copy of config from disk. |
2444 | + |
2445 | + In normal usage you don't need to call this method directly - it |
2446 | + is called automatically at object initialization. |
2447 | + |
2448 | + :param path: |
2449 | + |
2450 | + File path from which to load the previous config. If `None`, |
2451 | + config is loaded from the default location. If `path` is |
2452 | + specified, subsequent `save()` calls will write to the same |
2453 | + path. |
2454 | + |
2455 | + """ |
2456 | + self.path = path or self.path |
2457 | + with open(self.path) as f: |
2458 | + self._prev_dict = json.load(f) |
2459 | + for k, v in copy.deepcopy(self._prev_dict).items(): |
2460 | + if k not in self: |
2461 | + self[k] = v |
2462 | + |
2463 | + def changed(self, key): |
2464 | + """Return True if the current value for this key is different from |
2465 | + the previous value. |
2466 | + |
2467 | + """ |
2468 | + if self._prev_dict is None: |
2469 | + return True |
2470 | + return self.previous(key) != self.get(key) |
2471 | + |
2472 | + def previous(self, key): |
2473 | + """Return previous value for this key, or None if there |
2474 | + is no previous value. |
2475 | + |
2476 | + """ |
2477 | + if self._prev_dict: |
2478 | + return self._prev_dict.get(key) |
2479 | + return None |
2480 | + |
2481 | + def save(self): |
2482 | + """Save this config to disk. |
2483 | + |
2484 | + If the charm is using the :mod:`Services Framework <services.base>` |
2485 | + or :meth:'@hook <Hooks.hook>' decorator, this |
2486 | + is called automatically at the end of successful hook execution. |
2487 | + Otherwise, it should be called directly by user code. |
2488 | + |
2489 | + To disable automatic saves, set ``implicit_save=False`` on this |
2490 | + instance. |
2491 | + |
2492 | + """ |
2493 | + with open(self.path, 'w') as f: |
2494 | + json.dump(self, f) |
2495 | + |
2496 | + def _implicit_save(self): |
2497 | + if self.implicit_save: |
2498 | + self.save() |
2499 | + |
2500 | + |
2501 | +@cached |
2502 | +def config(scope=None): |
2503 | + """Juju charm configuration""" |
2504 | + config_cmd_line = ['config-get'] |
2505 | + if scope is not None: |
2506 | + config_cmd_line.append(scope) |
2507 | + config_cmd_line.append('--format=json') |
2508 | + try: |
2509 | + config_data = json.loads( |
2510 | + subprocess.check_output(config_cmd_line).decode('UTF-8')) |
2511 | + if scope is not None: |
2512 | + return config_data |
2513 | + return Config(config_data) |
2514 | + except ValueError: |
2515 | + return None |
2516 | + |
2517 | + |
2518 | +@cached |
2519 | +def relation_get(attribute=None, unit=None, rid=None): |
2520 | + """Get relation information""" |
2521 | + _args = ['relation-get', '--format=json'] |
2522 | + if rid: |
2523 | + _args.append('-r') |
2524 | + _args.append(rid) |
2525 | + _args.append(attribute or '-') |
2526 | + if unit: |
2527 | + _args.append(unit) |
2528 | + try: |
2529 | + return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
2530 | + except ValueError: |
2531 | + return None |
2532 | + except CalledProcessError as e: |
2533 | + if e.returncode == 2: |
2534 | + return None |
2535 | + raise |
2536 | + |
2537 | + |
2538 | +def relation_set(relation_id=None, relation_settings=None, **kwargs): |
2539 | + """Set relation information for the current unit""" |
2540 | + relation_settings = relation_settings if relation_settings else {} |
2541 | + relation_cmd_line = ['relation-set'] |
2542 | + accepts_file = "--file" in subprocess.check_output( |
2543 | + relation_cmd_line + ["--help"], universal_newlines=True) |
2544 | + if relation_id is not None: |
2545 | + relation_cmd_line.extend(('-r', relation_id)) |
2546 | + settings = relation_settings.copy() |
2547 | + settings.update(kwargs) |
2548 | + for key, value in settings.items(): |
2549 | + # Force value to be a string: it always should, but some call |
2550 | + # sites pass in things like dicts or numbers. |
2551 | + if value is not None: |
2552 | + settings[key] = "{}".format(value) |
2553 | + if accepts_file: |
2554 | + # --file was introduced in Juju 1.23.2. Use it by default if |
2555 | + # available, since otherwise we'll break if the relation data is |
2556 | + # too big. Ideally we should tell relation-set to read the data from |
2557 | + # stdin, but that feature is broken in 1.23.2: Bug #1454678. |
2558 | + with tempfile.NamedTemporaryFile(delete=False) as settings_file: |
2559 | + settings_file.write(yaml.safe_dump(settings).encode("utf-8")) |
2560 | + subprocess.check_call( |
2561 | + relation_cmd_line + ["--file", settings_file.name]) |
2562 | + os.remove(settings_file.name) |
2563 | + else: |
2564 | + for key, value in settings.items(): |
2565 | + if value is None: |
2566 | + relation_cmd_line.append('{}='.format(key)) |
2567 | + else: |
2568 | + relation_cmd_line.append('{}={}'.format(key, value)) |
2569 | + subprocess.check_call(relation_cmd_line) |
2570 | + # Flush cache of any relation-gets for local unit |
2571 | + flush(local_unit()) |
2572 | + |
2573 | + |
2574 | +def relation_clear(r_id=None): |
2575 | + ''' Clears any relation data already set on relation r_id ''' |
2576 | + settings = relation_get(rid=r_id, |
2577 | + unit=local_unit()) |
2578 | + for setting in settings: |
2579 | + if setting not in ['public-address', 'private-address']: |
2580 | + settings[setting] = None |
2581 | + relation_set(relation_id=r_id, |
2582 | + **settings) |
2583 | + |
2584 | + |
2585 | +@cached |
2586 | +def relation_ids(reltype=None): |
2587 | + """A list of relation_ids""" |
2588 | + reltype = reltype or relation_type() |
2589 | + relid_cmd_line = ['relation-ids', '--format=json'] |
2590 | + if reltype is not None: |
2591 | + relid_cmd_line.append(reltype) |
2592 | + return json.loads( |
2593 | + subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] |
2594 | + return [] |
2595 | + |
2596 | + |
2597 | +@cached |
2598 | +def related_units(relid=None): |
2599 | + """A list of related units""" |
2600 | + relid = relid or relation_id() |
2601 | + units_cmd_line = ['relation-list', '--format=json'] |
2602 | + if relid is not None: |
2603 | + units_cmd_line.extend(('-r', relid)) |
2604 | + return json.loads( |
2605 | + subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] |
2606 | + |
2607 | + |
2608 | +@cached |
2609 | +def relation_for_unit(unit=None, rid=None): |
2610 | + """Get the json represenation of a unit's relation""" |
2611 | + unit = unit or remote_unit() |
2612 | + relation = relation_get(unit=unit, rid=rid) |
2613 | + for key in relation: |
2614 | + if key.endswith('-list'): |
2615 | + relation[key] = relation[key].split() |
2616 | + relation['__unit__'] = unit |
2617 | + return relation |
2618 | + |
2619 | + |
2620 | +@cached |
2621 | +def relations_for_id(relid=None): |
2622 | + """Get relations of a specific relation ID""" |
2623 | + relation_data = [] |
2624 | + relid = relid or relation_ids() |
2625 | + for unit in related_units(relid): |
2626 | + unit_data = relation_for_unit(unit, relid) |
2627 | + unit_data['__relid__'] = relid |
2628 | + relation_data.append(unit_data) |
2629 | + return relation_data |
2630 | + |
2631 | + |
2632 | +@cached |
2633 | +def relations_of_type(reltype=None): |
2634 | + """Get relations of a specific type""" |
2635 | + relation_data = [] |
2636 | + reltype = reltype or relation_type() |
2637 | + for relid in relation_ids(reltype): |
2638 | + for relation in relations_for_id(relid): |
2639 | + relation['__relid__'] = relid |
2640 | + relation_data.append(relation) |
2641 | + return relation_data |
2642 | + |
2643 | + |
2644 | +@cached |
2645 | +def metadata(): |
2646 | + """Get the current charm metadata.yaml contents as a python object""" |
2647 | + with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: |
2648 | + return yaml.safe_load(md) |
2649 | + |
2650 | + |
2651 | +@cached |
2652 | +def relation_types(): |
2653 | + """Get a list of relation types supported by this charm""" |
2654 | + rel_types = [] |
2655 | + md = metadata() |
2656 | + for key in ('provides', 'requires', 'peers'): |
2657 | + section = md.get(key) |
2658 | + if section: |
2659 | + rel_types.extend(section.keys()) |
2660 | + return rel_types |
2661 | + |
2662 | + |
2663 | +@cached |
2664 | +def peer_relation_id(): |
2665 | + '''Get the peers relation id if a peers relation has been joined, else None.''' |
2666 | + md = metadata() |
2667 | + section = md.get('peers') |
2668 | + if section: |
2669 | + for key in section: |
2670 | + relids = relation_ids(key) |
2671 | + if relids: |
2672 | + return relids[0] |
2673 | + return None |
2674 | + |
2675 | + |
2676 | +@cached |
2677 | +def relation_to_interface(relation_name): |
2678 | + """ |
2679 | + Given the name of a relation, return the interface that relation uses. |
2680 | + |
2681 | + :returns: The interface name, or ``None``. |
2682 | + """ |
2683 | + return relation_to_role_and_interface(relation_name)[1] |
2684 | + |
2685 | + |
2686 | +@cached |
2687 | +def relation_to_role_and_interface(relation_name): |
2688 | + """ |
2689 | + Given the name of a relation, return the role and the name of the interface |
2690 | + that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). |
2691 | + |
2692 | + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. |
2693 | + """ |
2694 | + _metadata = metadata() |
2695 | + for role in ('provides', 'requires', 'peers'): |
2696 | + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') |
2697 | + if interface: |
2698 | + return role, interface |
2699 | + return None, None |
2700 | + |
2701 | + |
2702 | +@cached |
2703 | +def role_and_interface_to_relations(role, interface_name): |
2704 | + """ |
2705 | + Given a role and interface name, return a list of relation names for the |
2706 | + current charm that use that interface under that role (where role is one |
2707 | + of ``provides``, ``requires``, or ``peers``). |
2708 | + |
2709 | + :returns: A list of relation names. |
2710 | + """ |
2711 | + _metadata = metadata() |
2712 | + results = [] |
2713 | + for relation_name, relation in _metadata.get(role, {}).items(): |
2714 | + if relation['interface'] == interface_name: |
2715 | + results.append(relation_name) |
2716 | + return results |
2717 | + |
2718 | + |
2719 | +@cached |
2720 | +def interface_to_relations(interface_name): |
2721 | + """ |
2722 | + Given an interface, return a list of relation names for the current |
2723 | + charm that use that interface. |
2724 | + |
2725 | + :returns: A list of relation names. |
2726 | + """ |
2727 | + results = [] |
2728 | + for role in ('provides', 'requires', 'peers'): |
2729 | + results.extend(role_and_interface_to_relations(role, interface_name)) |
2730 | + return results |
2731 | + |
2732 | + |
2733 | +@cached |
2734 | +def charm_name(): |
2735 | + """Get the name of the current charm as is specified on metadata.yaml""" |
2736 | + return metadata().get('name') |
2737 | + |
2738 | + |
2739 | +@cached |
2740 | +def relations(): |
2741 | + """Get a nested dictionary of relation data for all related units""" |
2742 | + rels = {} |
2743 | + for reltype in relation_types(): |
2744 | + relids = {} |
2745 | + for relid in relation_ids(reltype): |
2746 | + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} |
2747 | + for unit in related_units(relid): |
2748 | + reldata = relation_get(unit=unit, rid=relid) |
2749 | + units[unit] = reldata |
2750 | + relids[relid] = units |
2751 | + rels[reltype] = relids |
2752 | + return rels |
2753 | + |
2754 | + |
2755 | +@cached |
2756 | +def is_relation_made(relation, keys='private-address'): |
2757 | + ''' |
2758 | + Determine whether a relation is established by checking for |
2759 | + presence of key(s). If a list of keys is provided, they |
2760 | + must all be present for the relation to be identified as made |
2761 | + ''' |
2762 | + if isinstance(keys, str): |
2763 | + keys = [keys] |
2764 | + for r_id in relation_ids(relation): |
2765 | + for unit in related_units(r_id): |
2766 | + context = {} |
2767 | + for k in keys: |
2768 | + context[k] = relation_get(k, rid=r_id, |
2769 | + unit=unit) |
2770 | + if None not in context.values(): |
2771 | + return True |
2772 | + return False |
2773 | + |
2774 | + |
2775 | +def open_port(port, protocol="TCP"): |
2776 | + """Open a service network port""" |
2777 | + _args = ['open-port'] |
2778 | + _args.append('{}/{}'.format(port, protocol)) |
2779 | + subprocess.check_call(_args) |
2780 | + |
2781 | + |
2782 | +def close_port(port, protocol="TCP"): |
2783 | + """Close a service network port""" |
2784 | + _args = ['close-port'] |
2785 | + _args.append('{}/{}'.format(port, protocol)) |
2786 | + subprocess.check_call(_args) |
2787 | + |
2788 | + |
2789 | +@cached |
2790 | +def unit_get(attribute): |
2791 | + """Get the unit ID for the remote unit""" |
2792 | + _args = ['unit-get', '--format=json', attribute] |
2793 | + try: |
2794 | + return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
2795 | + except ValueError: |
2796 | + return None |
2797 | + |
2798 | + |
2799 | +def unit_public_ip(): |
2800 | + """Get this unit's public IP address""" |
2801 | + return unit_get('public-address') |
2802 | + |
2803 | + |
2804 | +def unit_private_ip(): |
2805 | + """Get this unit's private IP address""" |
2806 | + return unit_get('private-address') |
2807 | + |
2808 | + |
2809 | +@cached |
2810 | +def storage_get(attribute=None, storage_id=None): |
2811 | + """Get storage attributes""" |
2812 | + _args = ['storage-get', '--format=json'] |
2813 | + if storage_id: |
2814 | + _args.extend(('-s', storage_id)) |
2815 | + if attribute: |
2816 | + _args.append(attribute) |
2817 | + try: |
2818 | + return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
2819 | + except ValueError: |
2820 | + return None |
2821 | + |
2822 | + |
2823 | +@cached |
2824 | +def storage_list(storage_name=None): |
2825 | + """List the storage IDs for the unit""" |
2826 | + _args = ['storage-list', '--format=json'] |
2827 | + if storage_name: |
2828 | + _args.append(storage_name) |
2829 | + try: |
2830 | + return json.loads(subprocess.check_output(_args).decode('UTF-8')) |
2831 | + except ValueError: |
2832 | + return None |
2833 | + except OSError as e: |
2834 | + import errno |
2835 | + if e.errno == errno.ENOENT: |
2836 | + # storage-list does not exist |
2837 | + return [] |
2838 | + raise |
2839 | + |
2840 | + |
2841 | +class UnregisteredHookError(Exception): |
2842 | + """Raised when an undefined hook is called""" |
2843 | + pass |
2844 | + |
2845 | + |
2846 | +class Hooks(object): |
2847 | + """A convenient handler for hook functions. |
2848 | + |
2849 | + Example:: |
2850 | + |
2851 | + hooks = Hooks() |
2852 | + |
2853 | + # register a hook, taking its name from the function name |
2854 | + @hooks.hook() |
2855 | + def install(): |
2856 | + pass # your code here |
2857 | + |
2858 | + # register a hook, providing a custom hook name |
2859 | + @hooks.hook("config-changed") |
2860 | + def config_changed(): |
2861 | + pass # your code here |
2862 | + |
2863 | + if __name__ == "__main__": |
2864 | + # execute a hook based on the name the program is called by |
2865 | + hooks.execute(sys.argv) |
2866 | + """ |
2867 | + |
2868 | + def __init__(self, config_save=None): |
2869 | + super(Hooks, self).__init__() |
2870 | + self._hooks = {} |
2871 | + |
2872 | + # For unknown reasons, we allow the Hooks constructor to override |
2873 | + # config().implicit_save. |
2874 | + if config_save is not None: |
2875 | + config().implicit_save = config_save |
2876 | + |
2877 | + def register(self, name, function): |
2878 | + """Register a hook""" |
2879 | + self._hooks[name] = function |
2880 | + |
2881 | + def execute(self, args): |
2882 | + """Execute a registered hook based on args[0]""" |
2883 | + _run_atstart() |
2884 | + hook_name = os.path.basename(args[0]) |
2885 | + if hook_name in self._hooks: |
2886 | + try: |
2887 | + self._hooks[hook_name]() |
2888 | + except SystemExit as x: |
2889 | + if x.code is None or x.code == 0: |
2890 | + _run_atexit() |
2891 | + raise |
2892 | + _run_atexit() |
2893 | + else: |
2894 | + raise UnregisteredHookError(hook_name) |
2895 | + |
2896 | + def hook(self, *hook_names): |
2897 | + """Decorator, registering them as hooks""" |
2898 | + def wrapper(decorated): |
2899 | + for hook_name in hook_names: |
2900 | + self.register(hook_name, decorated) |
2901 | + else: |
2902 | + self.register(decorated.__name__, decorated) |
2903 | + if '_' in decorated.__name__: |
2904 | + self.register( |
2905 | + decorated.__name__.replace('_', '-'), decorated) |
2906 | + return decorated |
2907 | + return wrapper |
2908 | + |
2909 | + |
2910 | +def charm_dir(): |
2911 | + """Return the root directory of the current charm""" |
2912 | + return os.environ.get('CHARM_DIR') |
2913 | + |
2914 | + |
2915 | +@cached |
2916 | +def action_get(key=None): |
2917 | + """Gets the value of an action parameter, or all key/value param pairs""" |
2918 | + cmd = ['action-get'] |
2919 | + if key is not None: |
2920 | + cmd.append(key) |
2921 | + cmd.append('--format=json') |
2922 | + action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
2923 | + return action_data |
2924 | + |
2925 | + |
2926 | +def action_set(values): |
2927 | + """Sets the values to be returned after the action finishes""" |
2928 | + cmd = ['action-set'] |
2929 | + for k, v in list(values.items()): |
2930 | + cmd.append('{}={}'.format(k, v)) |
2931 | + subprocess.check_call(cmd) |
2932 | + |
2933 | + |
2934 | +def action_fail(message): |
2935 | + """Sets the action status to failed and sets the error message. |
2936 | + |
2937 | + The results set by action_set are preserved.""" |
2938 | + subprocess.check_call(['action-fail', message]) |
2939 | + |
2940 | + |
2941 | +def action_name(): |
2942 | + """Get the name of the currently executing action.""" |
2943 | + return os.environ.get('JUJU_ACTION_NAME') |
2944 | + |
2945 | + |
2946 | +def action_uuid(): |
2947 | + """Get the UUID of the currently executing action.""" |
2948 | + return os.environ.get('JUJU_ACTION_UUID') |
2949 | + |
2950 | + |
2951 | +def action_tag(): |
2952 | + """Get the tag for the currently executing action.""" |
2953 | + return os.environ.get('JUJU_ACTION_TAG') |
2954 | + |
2955 | + |
2956 | +def status_set(workload_state, message): |
2957 | + """Set the workload state with a message |
2958 | + |
2959 | + Use status-set to set the workload state with a message which is visible |
2960 | + to the user via juju status. If the status-set command is not found then |
2961 | + assume this is juju < 1.23 and juju-log the message unstead. |
2962 | + |
2963 | + workload_state -- valid juju workload state. |
2964 | + message -- status update message |
2965 | + """ |
2966 | + valid_states = ['maintenance', 'blocked', 'waiting', 'active'] |
2967 | + if workload_state not in valid_states: |
2968 | + raise ValueError( |
2969 | + '{!r} is not a valid workload state'.format(workload_state) |
2970 | + ) |
2971 | + cmd = ['status-set', workload_state, message] |
2972 | + try: |
2973 | + ret = subprocess.call(cmd) |
2974 | + if ret == 0: |
2975 | + return |
2976 | + except OSError as e: |
2977 | + if e.errno != errno.ENOENT: |
2978 | + raise |
2979 | + log_message = 'status-set failed: {} {}'.format(workload_state, |
2980 | + message) |
2981 | + log(log_message, level='INFO') |
2982 | + |
2983 | + |
2984 | +def status_get(): |
2985 | + """Retrieve the previously set juju workload state and message |
2986 | + |
2987 | + If the status-get command is not found then assume this is juju < 1.23 and |
2988 | + return 'unknown', "" |
2989 | + |
2990 | + """ |
2991 | + cmd = ['status-get', "--format=json", "--include-data"] |
2992 | + try: |
2993 | + raw_status = subprocess.check_output(cmd) |
2994 | + except OSError as e: |
2995 | + if e.errno == errno.ENOENT: |
2996 | + return ('unknown', "") |
2997 | + else: |
2998 | + raise |
2999 | + else: |
3000 | + status = json.loads(raw_status.decode("UTF-8")) |
3001 | + return (status["status"], status["message"]) |
3002 | + |
3003 | + |
3004 | +def translate_exc(from_exc, to_exc): |
3005 | + def inner_translate_exc1(f): |
3006 | + @wraps(f) |
3007 | + def inner_translate_exc2(*args, **kwargs): |
3008 | + try: |
3009 | + return f(*args, **kwargs) |
3010 | + except from_exc: |
3011 | + raise to_exc |
3012 | + |
3013 | + return inner_translate_exc2 |
3014 | + |
3015 | + return inner_translate_exc1 |
3016 | + |
3017 | + |
3018 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3019 | +def is_leader(): |
3020 | + """Does the current unit hold the juju leadership |
3021 | + |
3022 | + Uses juju to determine whether the current unit is the leader of its peers |
3023 | + """ |
3024 | + cmd = ['is-leader', '--format=json'] |
3025 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
3026 | + |
3027 | + |
3028 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3029 | +def leader_get(attribute=None): |
3030 | + """Juju leader get value(s)""" |
3031 | + cmd = ['leader-get', '--format=json'] + [attribute or '-'] |
3032 | + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) |
3033 | + |
3034 | + |
3035 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3036 | +def leader_set(settings=None, **kwargs): |
3037 | + """Juju leader set value(s)""" |
3038 | + # Don't log secrets. |
3039 | + # log("Juju leader-set '%s'" % (settings), level=DEBUG) |
3040 | + cmd = ['leader-set'] |
3041 | + settings = settings or {} |
3042 | + settings.update(kwargs) |
3043 | + for k, v in settings.items(): |
3044 | + if v is None: |
3045 | + cmd.append('{}='.format(k)) |
3046 | + else: |
3047 | + cmd.append('{}={}'.format(k, v)) |
3048 | + subprocess.check_call(cmd) |
3049 | + |
3050 | + |
3051 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3052 | +def payload_register(ptype, klass, pid): |
3053 | + """ is used while a hook is running to let Juju know that a |
3054 | + payload has been started.""" |
3055 | + cmd = ['payload-register'] |
3056 | + for x in [ptype, klass, pid]: |
3057 | + cmd.append(x) |
3058 | + subprocess.check_call(cmd) |
3059 | + |
3060 | + |
3061 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3062 | +def payload_unregister(klass, pid): |
3063 | + """ is used while a hook is running to let Juju know |
3064 | + that a payload has been manually stopped. The <class> and <id> provided |
3065 | + must match a payload that has been previously registered with juju using |
3066 | + payload-register.""" |
3067 | + cmd = ['payload-unregister'] |
3068 | + for x in [klass, pid]: |
3069 | + cmd.append(x) |
3070 | + subprocess.check_call(cmd) |
3071 | + |
3072 | + |
3073 | +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) |
3074 | +def payload_status_set(klass, pid, status): |
3075 | + """is used to update the current status of a registered payload. |
3076 | + The <class> and <id> provided must match a payload that has been previously |
3077 | + registered with juju using payload-register. The <status> must be one of the |
3078 | + follow: starting, started, stopping, stopped""" |
3079 | + cmd = ['payload-status-set'] |
3080 | + for x in [klass, pid, status]: |
3081 | + cmd.append(x) |
3082 | + subprocess.check_call(cmd) |
3083 | + |
3084 | + |
3085 | +@cached |
3086 | +def juju_version(): |
3087 | + """Full version string (eg. '1.23.3.1-trusty-amd64')""" |
3088 | + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 |
3089 | + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] |
3090 | + return subprocess.check_output([jujud, 'version'], |
3091 | + universal_newlines=True).strip() |
3092 | + |
3093 | + |
3094 | +@cached |
3095 | +def has_juju_version(minimum_version): |
3096 | + """Return True if the Juju version is at least the provided version""" |
3097 | + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) |
3098 | + |
3099 | + |
3100 | +_atexit = [] |
3101 | +_atstart = [] |
3102 | + |
3103 | + |
3104 | +def atstart(callback, *args, **kwargs): |
3105 | + '''Schedule a callback to run before the main hook. |
3106 | + |
3107 | + Callbacks are run in the order they were added. |
3108 | + |
3109 | + This is useful for modules and classes to perform initialization |
3110 | + and inject behavior. In particular: |
3111 | + |
3112 | + - Run common code before all of your hooks, such as logging |
3113 | + the hook name or interesting relation data. |
3114 | + - Defer object or module initialization that requires a hook |
3115 | + context until we know there actually is a hook context, |
3116 | + making testing easier. |
3117 | + - Rather than requiring charm authors to include boilerplate to |
3118 | + invoke your helper's behavior, have it run automatically if |
3119 | + your object is instantiated or module imported. |
3120 | + |
3121 | + This is not at all useful after your hook framework as been launched. |
3122 | + ''' |
3123 | + global _atstart |
3124 | + _atstart.append((callback, args, kwargs)) |
3125 | + |
3126 | + |
3127 | +def atexit(callback, *args, **kwargs): |
3128 | + '''Schedule a callback to run on successful hook completion. |
3129 | + |
3130 | + Callbacks are run in the reverse order that they were added.''' |
3131 | + _atexit.append((callback, args, kwargs)) |
3132 | + |
3133 | + |
3134 | +def _run_atstart(): |
3135 | + '''Hook frameworks must invoke this before running the main hook body.''' |
3136 | + global _atstart |
3137 | + for callback, args, kwargs in _atstart: |
3138 | + callback(*args, **kwargs) |
3139 | + del _atstart[:] |
3140 | + |
3141 | + |
3142 | +def _run_atexit(): |
3143 | + '''Hook frameworks must invoke this after the main hook body has |
3144 | + successfully completed. Do not invoke it if the hook fails.''' |
3145 | + global _atexit |
3146 | + for callback, args, kwargs in reversed(_atexit): |
3147 | + callback(*args, **kwargs) |
3148 | + del _atexit[:] |
3149 | |
3150 | === modified file 'unit_tests/test_actions.py' |
3151 | --- unit_tests/test_actions.py 2016-01-13 15:13:10 +0000 |
3152 | +++ unit_tests/test_actions.py 2016-03-05 15:43:48 +0000 |
3153 | @@ -5,7 +5,8 @@ |
3154 | |
3155 | with patch('actions.hooks.keystone_utils.is_paused') as is_paused: |
3156 | with patch('actions.hooks.keystone_utils.register_configs') as configs: |
3157 | - import actions.actions |
3158 | + with patch('actions.hooks.keystone_utils.os_release') as os_release: |
3159 | + import actions.actions |
3160 | |
3161 | |
3162 | class PauseTestCase(CharmTestCase): |
3163 | @@ -15,7 +16,8 @@ |
3164 | actions.actions, ["service_pause", "HookData", "kv", |
3165 | "assess_status"]) |
3166 | |
3167 | - def test_pauses_services(self): |
3168 | + @patch('actions.hooks.keystone_utils.os_release') |
3169 | + def test_pauses_services(self, os_release): |
3170 | """Pause action pauses all Keystone services.""" |
3171 | pause_calls = [] |
3172 | |
3173 | @@ -29,7 +31,8 @@ |
3174 | self.assertItemsEqual( |
3175 | pause_calls, ['haproxy', 'keystone', 'apache2']) |
3176 | |
3177 | - def test_bails_out_early_on_error(self): |
3178 | + @patch('actions.hooks.keystone_utils.os_release') |
3179 | + def test_bails_out_early_on_error(self, os_release): |
3180 | """Pause action fails early if there are errors stopping a service.""" |
3181 | pause_calls = [] |
3182 | |
3183 | @@ -46,7 +49,8 @@ |
3184 | actions.actions.pause, []) |
3185 | self.assertEqual(pause_calls, ['haproxy']) |
3186 | |
3187 | - def test_pause_sets_value(self): |
3188 | + @patch('actions.hooks.keystone_utils.os_release') |
3189 | + def test_pause_sets_value(self, os_release): |
3190 | """Pause action sets the unit-paused value to True.""" |
3191 | self.HookData()().return_value = True |
3192 | |
3193 | @@ -61,7 +65,8 @@ |
3194 | actions.actions, ["service_resume", "HookData", "kv", |
3195 | "assess_status"]) |
3196 | |
3197 | - def test_resumes_services(self): |
3198 | + @patch('actions.hooks.keystone_utils.os_release') |
3199 | + def test_resumes_services(self, os_release): |
3200 | """Resume action resumes all Keystone services.""" |
3201 | resume_calls = [] |
3202 | |
3203 | @@ -73,7 +78,8 @@ |
3204 | actions.actions.resume([]) |
3205 | self.assertEqual(resume_calls, ['haproxy', 'keystone', 'apache2']) |
3206 | |
3207 | - def test_bails_out_early_on_error(self): |
3208 | + @patch('actions.hooks.keystone_utils.os_release') |
3209 | + def test_bails_out_early_on_error(self, os_release): |
3210 | """Resume action fails early if there are errors starting a service.""" |
3211 | resume_calls = [] |
3212 | |
3213 | @@ -90,7 +96,8 @@ |
3214 | actions.actions.resume, []) |
3215 | self.assertEqual(resume_calls, ['haproxy']) |
3216 | |
3217 | - def test_resume_sets_value(self): |
3218 | + @patch('actions.hooks.keystone_utils.os_release') |
3219 | + def test_resume_sets_value(self, os_release): |
3220 | """Resume action sets the unit-paused value to False.""" |
3221 | self.HookData()().return_value = True |
3222 | |
3223 | |
3224 | === modified file 'unit_tests/test_actions_git_reinstall.py' |
3225 | --- unit_tests/test_actions_git_reinstall.py 2015-10-30 23:30:09 +0000 |
3226 | +++ unit_tests/test_actions_git_reinstall.py 2016-03-05 15:43:48 +0000 |
3227 | @@ -1,7 +1,8 @@ |
3228 | from mock import patch |
3229 | |
3230 | with patch('hooks.keystone_utils.register_configs') as register_configs: |
3231 | - import git_reinstall |
3232 | + with patch('hooks.keystone_utils.os_release') as os_release: |
3233 | + import git_reinstall |
3234 | |
3235 | from test_utils import ( |
3236 | CharmTestCase |
3237 | |
3238 | === modified file 'unit_tests/test_actions_openstack_upgrade.py' |
3239 | --- unit_tests/test_actions_openstack_upgrade.py 2015-10-19 13:33:33 +0000 |
3240 | +++ unit_tests/test_actions_openstack_upgrade.py 2016-03-05 15:43:48 +0000 |
3241 | @@ -3,9 +3,12 @@ |
3242 | |
3243 | os.environ['JUJU_UNIT_NAME'] = 'keystone' |
3244 | |
3245 | +# with patch('charmhelpers.contrib.openstack.utils.os_release') as os_release: |
3246 | +# with patch('keystone_hooks.os_release') as os_release: |
3247 | with patch('keystone_utils.register_configs') as register_configs: |
3248 | - import openstack_upgrade |
3249 | - import keystone_hooks as hooks |
3250 | + with patch('keystone_utils.os_release') as os_release: |
3251 | + import openstack_upgrade |
3252 | + import keystone_hooks as hooks |
3253 | |
3254 | from test_utils import ( |
3255 | CharmTestCase |
3256 | @@ -23,13 +26,14 @@ |
3257 | super(TestKeystoneUpgradeActions, self).setUp(openstack_upgrade, |
3258 | TO_PATCH) |
3259 | |
3260 | + @patch.object(hooks, 'os_release') |
3261 | @patch.object(hooks, 'register_configs') |
3262 | @patch('charmhelpers.contrib.openstack.utils.config') |
3263 | @patch('charmhelpers.contrib.openstack.utils.action_set') |
3264 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') |
3265 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') |
3266 | def test_openstack_upgrade_true(self, upgrade_avail, git_requested, |
3267 | - action_set, config, reg_configs): |
3268 | + action_set, config, reg_configs, os_rel): |
3269 | git_requested.return_value = False |
3270 | upgrade_avail.return_value = True |
3271 | config.return_value = True |
3272 | @@ -40,13 +44,14 @@ |
3273 | self.os.execl.assert_called_with('./hooks/config-changed-postupgrade', |
3274 | '') |
3275 | |
3276 | + @patch.object(hooks, 'os_release') |
3277 | @patch.object(hooks, 'register_configs') |
3278 | @patch('charmhelpers.contrib.openstack.utils.config') |
3279 | @patch('charmhelpers.contrib.openstack.utils.action_set') |
3280 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') |
3281 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') |
3282 | def test_openstack_upgrade_false(self, upgrade_avail, git_requested, |
3283 | - action_set, config, reg_configs): |
3284 | + action_set, config, reg_configs, os_rel): |
3285 | git_requested.return_value = False |
3286 | upgrade_avail.return_value = True |
3287 | config.return_value = False |
3288 | |
3289 | === modified file 'unit_tests/test_keystone_hooks.py' |
3290 | --- unit_tests/test_keystone_hooks.py 2016-01-12 11:09:46 +0000 |
3291 | +++ unit_tests/test_keystone_hooks.py 2016-03-05 15:43:48 +0000 |
3292 | @@ -73,6 +73,7 @@ |
3293 | 'git_install', |
3294 | 'is_service_present', |
3295 | 'delete_service_entry', |
3296 | + 'os_release', |
3297 | ] |
3298 | |
3299 | |
3300 | @@ -83,9 +84,10 @@ |
3301 | self.config.side_effect = self.test_config.get |
3302 | self.ssh_user = 'juju_keystone' |
3303 | |
3304 | + @patch.object(utils, 'os_release') |
3305 | @patch.object(utils, 'git_install_requested') |
3306 | @patch.object(unison, 'ensure_user') |
3307 | - def test_install_hook(self, ensure_user, git_requested): |
3308 | + def test_install_hook(self, ensure_user, git_requested, os_release): |
3309 | git_requested.return_value = False |
3310 | repo = 'cloud:precise-grizzly' |
3311 | self.test_config.set('openstack-origin', repo) |
3312 | @@ -100,9 +102,10 @@ |
3313 | 'python-six', 'unison', 'uuid'], fatal=True) |
3314 | self.git_install.assert_called_with(None) |
3315 | |
3316 | + @patch.object(utils, 'os_release') |
3317 | @patch.object(utils, 'git_install_requested') |
3318 | @patch.object(unison, 'ensure_user') |
3319 | - def test_install_hook_git(self, ensure_user, git_requested): |
3320 | + def test_install_hook_git(self, ensure_user, git_requested, os_release): |
3321 | git_requested.return_value = True |
3322 | repo = 'cloud:trusty-juno' |
3323 | openstack_origin_git = { |
3324 | @@ -135,6 +138,7 @@ |
3325 | |
3326 | mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils' |
3327 | |
3328 | + @patch.object(utils, 'os_release') |
3329 | @patch.object(hooks, 'config') |
3330 | @patch('%s.config' % (mod_ch_openstack_utils)) |
3331 | @patch('%s.relation_set' % (mod_ch_openstack_utils)) |
3332 | @@ -143,7 +147,7 @@ |
3333 | @patch('%s.sync_db_with_multi_ipv6_addresses' % (mod_ch_openstack_utils)) |
3334 | def test_db_joined(self, mock_sync_db_with_multi, mock_get_ipv6_addr, |
3335 | mock_relation_ids, mock_relation_set, mock_config, |
3336 | - mock_hooks_config): |
3337 | + mock_hooks_config, os_release): |
3338 | |
3339 | cfg_dict = {'prefer-ipv6': False, |
3340 | 'database': 'keystone', |
3341 | @@ -317,6 +321,7 @@ |
3342 | mock_ensure_ssl_cert_master, mock_log, |
3343 | mock_peer_store, mock_peer_retrieve, |
3344 | mock_relation_ids): |
3345 | + self.os_release.return_value = 'kilo' |
3346 | mock_relation_ids.return_value = ['peer/0'] |
3347 | |
3348 | peer_settings = {} |
3349 | @@ -907,6 +912,7 @@ |
3350 | cmd = ['a2dissite', 'openstack_https_frontend'] |
3351 | self.check_call.assert_called_with(cmd) |
3352 | |
3353 | + @patch.object(utils, 'os_release') |
3354 | @patch.object(utils, 'git_install_requested') |
3355 | @patch.object(hooks, 'is_db_ready') |
3356 | @patch.object(hooks, 'is_db_initialised') |
3357 | @@ -926,7 +932,8 @@ |
3358 | mock_log, |
3359 | mock_is_db_initialised, |
3360 | mock_is_db_ready, |
3361 | - git_requested): |
3362 | + git_requested, |
3363 | + os_release): |
3364 | mock_is_db_initialised.return_value = True |
3365 | mock_is_db_ready.return_value = True |
3366 | mock_is_elected_leader.return_value = False |
3367 | @@ -949,6 +956,7 @@ |
3368 | 'Firing identity_changed hook for all related services.') |
3369 | self.assertTrue(self.ensure_initial_admin.called) |
3370 | |
3371 | + @patch.object(utils, 'os_release') |
3372 | @patch.object(utils, 'git_install_requested') |
3373 | @patch('keystone_utils.log') |
3374 | @patch('keystone_utils.relation_ids') |
3375 | @@ -959,7 +967,8 @@ |
3376 | mock_update_hash_from_path, |
3377 | mock_ensure_ssl_cert_master, |
3378 | mock_relation_ids, |
3379 | - mock_log, git_requested): |
3380 | + mock_log, git_requested, |
3381 | + os_release): |
3382 | mock_relation_ids.return_value = [] |
3383 | mock_ensure_ssl_cert_master.return_value = False |
3384 | # Ensure always returns diff |
3385 | |
3386 | === modified file 'unit_tests/test_keystone_utils.py' |
3387 | --- unit_tests/test_keystone_utils.py 2016-02-19 14:49:59 +0000 |
3388 | +++ unit_tests/test_keystone_utils.py 2016-03-05 15:43:48 +0000 |
3389 | @@ -1,7 +1,6 @@ |
3390 | from mock import patch, call, MagicMock, Mock |
3391 | from test_utils import CharmTestCase |
3392 | import os |
3393 | -import manager |
3394 | |
3395 | os.environ['JUJU_UNIT_NAME'] = 'keystone' |
3396 | with patch('charmhelpers.core.hookenv.config') as config: |
3397 | @@ -172,10 +171,11 @@ |
3398 | self.subprocess.check_output.assert_called_with(cmd) |
3399 | self.service_start.assert_called_with('keystone') |
3400 | |
3401 | + @patch.object(utils, 'get_manager') |
3402 | @patch.object(utils, 'resolve_address') |
3403 | @patch.object(utils, 'b64encode') |
3404 | def test_add_service_to_keystone_clustered_https_none_values( |
3405 | - self, b64encode, _resolve_address): |
3406 | + self, b64encode, _resolve_address, _get_manager): |
3407 | relation_id = 'identity-service:0' |
3408 | remote_unit = 'unit/0' |
3409 | _resolve_address.return_value = '10.10.10.10' |
3410 | @@ -214,7 +214,7 @@ |
3411 | @patch.object(utils, 'resolve_address') |
3412 | @patch.object(utils, 'ensure_valid_service') |
3413 | @patch.object(utils, 'add_endpoint') |
3414 | - @patch.object(manager, 'KeystoneManager') |
3415 | + @patch.object(utils, 'get_manager') |
3416 | def test_add_service_to_keystone_no_clustered_no_https_complete_values( |
3417 | self, KeystoneManager, add_endpoint, ensure_valid_service, |
3418 | _resolve_address): |
3419 | @@ -253,9 +253,12 @@ |
3420 | internalurl='192.168.1.2') |
3421 | self.assertTrue(self.get_admin_token.called) |
3422 | self.get_service_password.assert_called_with('keystone') |
3423 | - self.create_user.assert_called_with('keystone', 'password', 'tenant') |
3424 | - self.grant_role.assert_called_with('keystone', 'admin', 'tenant') |
3425 | - self.create_role.assert_called_with('role1', 'keystone', 'tenant') |
3426 | + self.create_user.assert_called_with('keystone', 'password', 'tenant', |
3427 | + None) |
3428 | + self.grant_role.assert_called_with('keystone', 'Admin', 'tenant', |
3429 | + 'default') |
3430 | + self.create_role.assert_called_with('role1', 'keystone', 'tenant', |
3431 | + None) |
3432 | |
3433 | relation_data = {'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', |
3434 | 'admin_token': 'token', 'service_port': 81, |
3435 | @@ -266,7 +269,7 @@ |
3436 | 'ssl_cert': '__null__', 'ssl_key': '__null__', |
3437 | 'ca_cert': '__null__', |
3438 | 'auth_protocol': 'http', 'service_protocol': 'http', |
3439 | - 'service_tenant_id': 'tenant_id'} |
3440 | + 'service_tenant_id': 'tenant_id', 'api_version': 2} |
3441 | |
3442 | filtered = {} |
3443 | for k, v in relation_data.iteritems(): |
3444 | @@ -284,7 +287,7 @@ |
3445 | @patch('charmhelpers.contrib.openstack.ip.config') |
3446 | @patch.object(utils, 'ensure_valid_service') |
3447 | @patch.object(utils, 'add_endpoint') |
3448 | - @patch.object(manager, 'KeystoneManager') |
3449 | + @patch.object(utils, 'get_manager') |
3450 | def test_add_service_to_keystone_nosubset( |
3451 | self, KeystoneManager, add_endpoint, ensure_valid_service, |
3452 | ip_config): |
3453 | @@ -317,8 +320,9 @@ |
3454 | mock_grant_role, |
3455 | mock_user_exists): |
3456 | mock_user_exists.return_value = False |
3457 | - utils.create_user_credentials('userA', 'tenantA', 'passA') |
3458 | - mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) |
3459 | + utils.create_user_credentials('userA', 'passA', tenant='tenantA') |
3460 | + mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', |
3461 | + None)]) |
3462 | mock_create_role.assert_has_calls([]) |
3463 | mock_grant_role.assert_has_calls([]) |
3464 | |
3465 | @@ -329,11 +333,14 @@ |
3466 | def test_create_user_credentials(self, mock_create_user, mock_create_role, |
3467 | mock_grant_role, mock_user_exists): |
3468 | mock_user_exists.return_value = False |
3469 | - utils.create_user_credentials('userA', 'tenantA', 'passA', |
3470 | + utils.create_user_credentials('userA', 'passA', tenant='tenantA', |
3471 | grants=['roleA'], new_roles=['roleB']) |
3472 | - mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) |
3473 | - mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) |
3474 | - mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) |
3475 | + mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', |
3476 | + None)]) |
3477 | + mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', |
3478 | + None)]) |
3479 | + mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', |
3480 | + None)]) |
3481 | |
3482 | @patch.object(utils, 'update_user_password') |
3483 | @patch.object(utils, 'user_exists') |
3484 | @@ -346,11 +353,13 @@ |
3485 | mock_user_exists, |
3486 | mock_update_user_password): |
3487 | mock_user_exists.return_value = True |
3488 | - utils.create_user_credentials('userA', 'tenantA', 'passA', |
3489 | + utils.create_user_credentials('userA', 'passA', tenant='tenantA', |
3490 | grants=['roleA'], new_roles=['roleB']) |
3491 | mock_create_user.assert_has_calls([]) |
3492 | - mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) |
3493 | - mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) |
3494 | + mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', |
3495 | + None)]) |
3496 | + mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', |
3497 | + None)]) |
3498 | mock_update_user_password.assert_has_calls([call('userA', 'passA')]) |
3499 | |
3500 | @patch.object(utils, 'get_service_password') |
3501 | @@ -358,10 +367,12 @@ |
3502 | def test_create_service_credentials(self, mock_create_user_credentials, |
3503 | mock_get_service_password): |
3504 | mock_get_service_password.return_value = 'passA' |
3505 | - cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin'} |
3506 | + cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin', |
3507 | + 'preferred-api-version': 2} |
3508 | self.config.side_effect = lambda key: cfg.get(key, None) |
3509 | - calls = [call('serviceA', 'tenantA', 'passA', grants=['Admin'], |
3510 | - new_roles=None)] |
3511 | + calls = [call('serviceA', 'passA', domain=None, grants=['Admin'], |
3512 | + new_roles=None, tenant='tenantA')] |
3513 | + |
3514 | utils.create_service_credentials('serviceA') |
3515 | mock_create_user_credentials.assert_has_calls(calls) |
3516 | |
3517 | @@ -594,7 +605,8 @@ |
3518 | internal_ip='10.0.0.1', |
3519 | admin_ip='10.0.0.1', |
3520 | auth_port=35357, |
3521 | - region='RegionOne' |
3522 | + region='RegionOne', |
3523 | + api_version=2, |
3524 | ) |
3525 | |
3526 | @patch.object(utils, 'peer_units') |
3527 | @@ -704,21 +716,21 @@ |
3528 | self.assertEquals(render.call_args_list, expected) |
3529 | service_restart.assert_called_with('keystone') |
3530 | |
3531 | - @patch.object(manager, 'KeystoneManager') |
3532 | + @patch.object(utils, 'get_manager') |
3533 | def test_is_service_present(self, KeystoneManager): |
3534 | mock_keystone = MagicMock() |
3535 | mock_keystone.resolve_service_id.return_value = 'sid1' |
3536 | KeystoneManager.return_value = mock_keystone |
3537 | self.assertTrue(utils.is_service_present('bob', 'bill')) |
3538 | |
3539 | - @patch.object(manager, 'KeystoneManager') |
3540 | + @patch.object(utils, 'get_manager') |
3541 | def test_is_service_present_false(self, KeystoneManager): |
3542 | mock_keystone = MagicMock() |
3543 | mock_keystone.resolve_service_id.return_value = None |
3544 | KeystoneManager.return_value = mock_keystone |
3545 | self.assertFalse(utils.is_service_present('bob', 'bill')) |
3546 | |
3547 | - @patch.object(manager, 'KeystoneManager') |
3548 | + @patch.object(utils, 'get_manager') |
3549 | def test_delete_service_entry(self, KeystoneManager): |
3550 | mock_keystone = MagicMock() |
3551 | mock_keystone.resolve_service_id.return_value = 'sid1' |