Merge lp:~openstack-charmers/charms/trusty/keystone/keystonev3 into lp:~openstack-charmers-archive/charms/trusty/keystone/next
- Trusty Tahr (14.04)
- keystonev3
- Merge into next
Status: | Work in progress |
---|---|
Proposed branch: | lp:~openstack-charmers/charms/trusty/keystone/keystonev3 |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/keystone/next |
Diff against target: |
3551 lines (+2443/-211) (has conflicts) 20 files modified
charm-helpers-tests.yaml (+2/-0) charmhelpers/contrib/openstack/neutron.py (+13/-0) charmhelpers/contrib/storage/linux/ceph.py (+38/-12) config.yaml (+6/-0) hooks/keystone_context.py (+8/-2) hooks/keystone_hooks.py (+6/-0) hooks/keystone_utils.py (+254/-98) hooks/manager.py (+183/-3) templates/liberty/policy.json (+382/-0) templates/liberty/policy.json.v2 (+184/-0) tests/basic_deployment.py (+208/-44) tests/charmhelpers/contrib/openstack/amulet/utils.py (+34/-11) tests/charmhelpers/core/__init__.py (+15/-0) tests/charmhelpers/core/decorators.py (+57/-0) tests/charmhelpers/core/hookenv.py (+978/-0) unit_tests/test_actions.py (+14/-7) unit_tests/test_actions_git_reinstall.py (+2/-1) unit_tests/test_actions_openstack_upgrade.py (+9/-4) unit_tests/test_keystone_hooks.py (+14/-5) unit_tests/test_keystone_utils.py (+36/-24) Text conflict in charmhelpers/contrib/openstack/neutron.py Text conflict in hooks/keystone_utils.py |
To merge this branch: | bzr merge lp:~openstack-charmers/charms/trusty/keystone/keystonev3 |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email:
|
Commit message
Description of the change
- 188. By Liam Young
-
Update unit tests to mock new method used for accessing keystone manager
- 189. By Liam Young
-
Lint fixes and unit tests for manager
- 190. By Liam Young
-
Fix charm-helper src
- 191. By Liam Young
-
Fix readme
- 192. By Liam Young
-
Update charm to render policy.json which is needed to enforce keystone v3 features like domains
- 193. By Liam Young
-
Fix getting domain info when keystone maybe offline
- 194. By Liam Young
-
Setup cloud_admin user
- 195. By Liam Young
-
v2 fixes
- 196. By Liam Young
-
Fix endpoint generation
- 197. By Liam Young
-
Transitioning to dynamic keystone ep api version discovery
- 198. By Liam Young
-
Lint tidy
- 199. By Liam Young
-
Update amulet tests
- 200. By Liam Young
-
More amulet updates
- 201. By Liam Young
-
Get v3 tests on openstack release > liberty
- 202. By Liam Young
-
POLICY_JSON is only managed for liberty and above so don't try and write it for earlier releases
- 203. By Liam Young
-
More amulet fixes
- 204. By Liam Young
-
Fixes for pure v3 deploy
- 205. By Liam Young
-
More fixes
- 206. By Liam Young
-
More fixes
- 207. By Liam Young
-
Catch exception when keystone is down
- 208. By Liam Young
-
Import problems
- 209. By Liam Young
-
Fix api v2
- 210. By Liam Young
-
Fix lint
- 211. By Liam Young
-
Fix typo
- 212. By Liam Young
-
Remove cloud_admin user
- 213. By Liam Young
-
Remove cloud_admin user from tests
- 214. By Liam Young
-
Need to specifiy domain for user lookups with v3
- 215. By Liam Young
-
Role needs to created after user or grant fails
- 216. By Liam Young
-
Fix unit_tests
Unmerged revisions
- 216. By Liam Young
-
Fix unit_tests
- 215. By Liam Young
-
Role needs to created after user or grant fails
- 214. By Liam Young
-
Need to specifiy domain for user lookups with v3
- 213. By Liam Young
-
Remove cloud_admin user from tests
- 212. By Liam Young
-
Remove cloud_admin user
- 211. By Liam Young
-
Fix typo
- 210. By Liam Young
-
Fix lint
- 209. By Liam Young
-
Fix api v2
- 208. By Liam Young
-
Import problems
- 207. By Liam Young
-
Catch exception when keystone is down
Preview Diff
1 | === modified file 'charm-helpers-tests.yaml' | |||
2 | --- charm-helpers-tests.yaml 2016-02-19 14:47:52 +0000 | |||
3 | +++ charm-helpers-tests.yaml 2016-03-05 15:43:48 +0000 | |||
4 | @@ -3,3 +3,5 @@ | |||
5 | 3 | include: | 3 | include: |
6 | 4 | - contrib.amulet | 4 | - contrib.amulet |
7 | 5 | - contrib.openstack.amulet | 5 | - contrib.openstack.amulet |
8 | 6 | - core.hookenv | ||
9 | 7 | - core.decorators | ||
10 | 6 | 8 | ||
11 | === modified file 'charmhelpers/contrib/openstack/neutron.py' | |||
12 | --- charmhelpers/contrib/openstack/neutron.py 2016-02-19 14:49:59 +0000 | |||
13 | +++ charmhelpers/contrib/openstack/neutron.py 2016-03-05 15:43:48 +0000 | |||
14 | @@ -233,6 +233,7 @@ | |||
15 | 233 | 'neutron-plugin-ml2'] | 233 | 'neutron-plugin-ml2'] |
16 | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards | 234 | # NOTE: patch in vmware renames nvp->nsx for icehouse onwards |
17 | 235 | plugins['nvp'] = plugins['nsx'] | 235 | plugins['nvp'] = plugins['nsx'] |
18 | 236 | <<<<<<< TREE | ||
19 | 236 | if release >= 'kilo': | 237 | if release >= 'kilo': |
20 | 237 | plugins['midonet']['driver'] = ( | 238 | plugins['midonet']['driver'] = ( |
21 | 238 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') | 239 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') |
22 | @@ -245,6 +246,18 @@ | |||
23 | 245 | 'python-neutron-plugin-midonet') | 246 | 'python-neutron-plugin-midonet') |
24 | 246 | plugins['midonet']['server_packages'].append( | 247 | plugins['midonet']['server_packages'].append( |
25 | 247 | 'python-networking-midonet') | 248 | 'python-networking-midonet') |
26 | 249 | ======= | ||
27 | 250 | if release >= 'kilo': | ||
28 | 251 | plugins['midonet']['driver'] = ( | ||
29 | 252 | 'neutron.plugins.midonet.plugin.MidonetPluginV2') | ||
30 | 253 | if release >= 'liberty': | ||
31 | 254 | plugins['midonet']['driver'] = ( | ||
32 | 255 | 'midonet.neutron.plugin_v1.MidonetPluginV2') | ||
33 | 256 | plugins['midonet']['server_packages'].remove( | ||
34 | 257 | 'python-neutron-plugin-midonet') | ||
35 | 258 | plugins['midonet']['server_packages'].append( | ||
36 | 259 | 'python-networking-midonet') | ||
37 | 260 | >>>>>>> MERGE-SOURCE | ||
38 | 248 | return plugins | 261 | return plugins |
39 | 249 | 262 | ||
40 | 250 | 263 | ||
41 | 251 | 264 | ||
42 | === modified file 'charmhelpers/contrib/storage/linux/ceph.py' | |||
43 | --- charmhelpers/contrib/storage/linux/ceph.py 2016-01-04 21:27:51 +0000 | |||
44 | +++ charmhelpers/contrib/storage/linux/ceph.py 2016-03-05 15:43:48 +0000 | |||
45 | @@ -120,6 +120,7 @@ | |||
46 | 120 | """ | 120 | """ |
47 | 121 | A custom error to inform the caller that a pool creation failed. Provides an error message | 121 | A custom error to inform the caller that a pool creation failed. Provides an error message |
48 | 122 | """ | 122 | """ |
49 | 123 | |||
50 | 123 | def __init__(self, message): | 124 | def __init__(self, message): |
51 | 124 | super(PoolCreationError, self).__init__(message) | 125 | super(PoolCreationError, self).__init__(message) |
52 | 125 | 126 | ||
53 | @@ -129,6 +130,7 @@ | |||
54 | 129 | An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. | 130 | An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. |
55 | 130 | Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). | 131 | Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). |
56 | 131 | """ | 132 | """ |
57 | 133 | |||
58 | 132 | def __init__(self, service, name): | 134 | def __init__(self, service, name): |
59 | 133 | self.service = service | 135 | self.service = service |
60 | 134 | self.name = name | 136 | self.name = name |
61 | @@ -180,36 +182,41 @@ | |||
62 | 180 | :return: int. The number of pgs to use. | 182 | :return: int. The number of pgs to use. |
63 | 181 | """ | 183 | """ |
64 | 182 | validator(value=pool_size, valid_type=int) | 184 | validator(value=pool_size, valid_type=int) |
67 | 183 | osds = get_osds(self.service) | 185 | osd_list = get_osds(self.service) |
68 | 184 | if not osds: | 186 | if not osd_list: |
69 | 185 | # NOTE(james-page): Default to 200 for older ceph versions | 187 | # NOTE(james-page): Default to 200 for older ceph versions |
70 | 186 | # which don't support OSD query from cli | 188 | # which don't support OSD query from cli |
71 | 187 | return 200 | 189 | return 200 |
72 | 188 | 190 | ||
73 | 191 | osd_list_length = len(osd_list) | ||
74 | 189 | # Calculate based on Ceph best practices | 192 | # Calculate based on Ceph best practices |
76 | 190 | if osds < 5: | 193 | if osd_list_length < 5: |
77 | 191 | return 128 | 194 | return 128 |
79 | 192 | elif 5 < osds < 10: | 195 | elif 5 < osd_list_length < 10: |
80 | 193 | return 512 | 196 | return 512 |
82 | 194 | elif 10 < osds < 50: | 197 | elif 10 < osd_list_length < 50: |
83 | 195 | return 4096 | 198 | return 4096 |
84 | 196 | else: | 199 | else: |
86 | 197 | estimate = (osds * 100) / pool_size | 200 | estimate = (osd_list_length * 100) / pool_size |
87 | 198 | # Return the next nearest power of 2 | 201 | # Return the next nearest power of 2 |
88 | 199 | index = bisect.bisect_right(powers_of_two, estimate) | 202 | index = bisect.bisect_right(powers_of_two, estimate) |
89 | 200 | return powers_of_two[index] | 203 | return powers_of_two[index] |
90 | 201 | 204 | ||
91 | 202 | 205 | ||
92 | 203 | class ReplicatedPool(Pool): | 206 | class ReplicatedPool(Pool): |
94 | 204 | def __init__(self, service, name, replicas=2): | 207 | def __init__(self, service, name, pg_num=None, replicas=2): |
95 | 205 | super(ReplicatedPool, self).__init__(service=service, name=name) | 208 | super(ReplicatedPool, self).__init__(service=service, name=name) |
96 | 206 | self.replicas = replicas | 209 | self.replicas = replicas |
97 | 210 | if pg_num is None: | ||
98 | 211 | self.pg_num = self.get_pgs(self.replicas) | ||
99 | 212 | else: | ||
100 | 213 | self.pg_num = pg_num | ||
101 | 207 | 214 | ||
102 | 208 | def create(self): | 215 | def create(self): |
103 | 209 | if not pool_exists(self.service, self.name): | 216 | if not pool_exists(self.service, self.name): |
104 | 210 | # Create it | 217 | # Create it |
107 | 211 | pgs = self.get_pgs(self.replicas) | 218 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', |
108 | 212 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] | 219 | self.name, str(self.pg_num)] |
109 | 213 | try: | 220 | try: |
110 | 214 | check_call(cmd) | 221 | check_call(cmd) |
111 | 215 | except CalledProcessError: | 222 | except CalledProcessError: |
112 | @@ -241,7 +248,7 @@ | |||
113 | 241 | 248 | ||
114 | 242 | pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) | 249 | pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) |
115 | 243 | # Create it | 250 | # Create it |
117 | 244 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), | 251 | cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs), |
118 | 245 | 'erasure', self.erasure_code_profile] | 252 | 'erasure', self.erasure_code_profile] |
119 | 246 | try: | 253 | try: |
120 | 247 | check_call(cmd) | 254 | check_call(cmd) |
121 | @@ -322,7 +329,8 @@ | |||
122 | 322 | :return: None. Can raise CalledProcessError | 329 | :return: None. Can raise CalledProcessError |
123 | 323 | """ | 330 | """ |
124 | 324 | # Set a byte quota on a RADOS pool in ceph. | 331 | # Set a byte quota on a RADOS pool in ceph. |
126 | 325 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] | 332 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, |
127 | 333 | 'max_bytes', str(max_bytes)] | ||
128 | 326 | try: | 334 | try: |
129 | 327 | check_call(cmd) | 335 | check_call(cmd) |
130 | 328 | except CalledProcessError: | 336 | except CalledProcessError: |
131 | @@ -343,7 +351,25 @@ | |||
132 | 343 | raise | 351 | raise |
133 | 344 | 352 | ||
134 | 345 | 353 | ||
136 | 346 | def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', | 354 | def remove_erasure_profile(service, profile_name): |
137 | 355 | """ | ||
138 | 356 | Create a new erasure code profile if one does not already exist for it. Updates | ||
139 | 357 | the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ | ||
140 | 358 | for more details | ||
141 | 359 | :param service: six.string_types. The Ceph user name to run the command under | ||
142 | 360 | :param profile_name: six.string_types | ||
143 | 361 | :return: None. Can raise CalledProcessError | ||
144 | 362 | """ | ||
145 | 363 | cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm', | ||
146 | 364 | profile_name] | ||
147 | 365 | try: | ||
148 | 366 | check_call(cmd) | ||
149 | 367 | except CalledProcessError: | ||
150 | 368 | raise | ||
151 | 369 | |||
152 | 370 | |||
153 | 371 | def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', | ||
154 | 372 | failure_domain='host', | ||
155 | 347 | data_chunks=2, coding_chunks=1, | 373 | data_chunks=2, coding_chunks=1, |
156 | 348 | locality=None, durability_estimator=None): | 374 | locality=None, durability_estimator=None): |
157 | 349 | """ | 375 | """ |
158 | 350 | 376 | ||
159 | === modified file 'config.yaml' | |||
160 | --- config.yaml 2016-02-18 09:59:57 +0000 | |||
161 | +++ config.yaml 2016-03-05 15:43:48 +0000 | |||
162 | @@ -298,6 +298,12 @@ | |||
163 | 298 | description: | | 298 | description: | |
164 | 299 | A comma-separated list of nagios servicegroups. | 299 | A comma-separated list of nagios servicegroups. |
165 | 300 | If left empty, the nagios_context will be used as the servicegroup | 300 | If left empty, the nagios_context will be used as the servicegroup |
166 | 301 | preferred-api-version: | ||
167 | 302 | default: 2 | ||
168 | 303 | type: int | ||
169 | 304 | description: | | ||
170 | 305 | Use this keystone api version for keystone endpoints and advertise this | ||
171 | 306 | version to identity client charms | ||
172 | 301 | action-managed-upgrade: | 307 | action-managed-upgrade: |
173 | 302 | type: boolean | 308 | type: boolean |
174 | 303 | default: False | 309 | default: False |
175 | 304 | 310 | ||
176 | === modified file 'hooks/keystone_context.py' | |||
177 | --- hooks/keystone_context.py 2016-02-18 09:59:57 +0000 | |||
178 | +++ hooks/keystone_context.py 2016-03-05 15:43:48 +0000 | |||
179 | @@ -190,9 +190,15 @@ | |||
180 | 190 | from keystone_utils import ( | 190 | from keystone_utils import ( |
181 | 191 | api_port, set_admin_token, endpoint_url, resolve_address, | 191 | api_port, set_admin_token, endpoint_url, resolve_address, |
182 | 192 | PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, | 192 | PUBLIC, ADMIN, PKI_CERTS_DIR, ensure_pki_cert_paths, |
183 | 193 | get_admin_domain_id | ||
184 | 193 | ) | 194 | ) |
185 | 194 | ctxt = {} | 195 | ctxt = {} |
186 | 195 | ctxt['token'] = set_admin_token(config('admin-token')) | 196 | ctxt['token'] = set_admin_token(config('admin-token')) |
187 | 197 | ctxt['api_version'] = int(config('preferred-api-version')) | ||
188 | 198 | ctxt['admin_role'] = config('admin-role') | ||
189 | 199 | if ctxt['api_version'] > 2: | ||
190 | 200 | ctxt['admin_domain_id'] = \ | ||
191 | 201 | get_admin_domain_id() or 'admin_domain_id' | ||
192 | 196 | ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), | 202 | ctxt['admin_port'] = determine_api_port(api_port('keystone-admin'), |
193 | 197 | singlenode_mode=True) | 203 | singlenode_mode=True) |
194 | 198 | ctxt['public_port'] = determine_api_port(api_port('keystone-public'), | 204 | ctxt['public_port'] = determine_api_port(api_port('keystone-public'), |
195 | @@ -233,10 +239,10 @@ | |||
196 | 233 | # correct auth URL. | 239 | # correct auth URL. |
197 | 234 | ctxt['public_endpoint'] = endpoint_url( | 240 | ctxt['public_endpoint'] = endpoint_url( |
198 | 235 | resolve_address(PUBLIC), | 241 | resolve_address(PUBLIC), |
200 | 236 | api_port('keystone-public')).rstrip('v2.0') | 242 | api_port('keystone-public')).replace('v2.0', '') |
201 | 237 | ctxt['admin_endpoint'] = endpoint_url( | 243 | ctxt['admin_endpoint'] = endpoint_url( |
202 | 238 | resolve_address(ADMIN), | 244 | resolve_address(ADMIN), |
204 | 239 | api_port('keystone-admin')).rstrip('v2.0') | 245 | api_port('keystone-admin')).replace('v2.0', '') |
205 | 240 | 246 | ||
206 | 241 | return ctxt | 247 | return ctxt |
207 | 242 | 248 | ||
208 | 243 | 249 | ||
209 | === modified file 'hooks/keystone_hooks.py' | |||
210 | --- hooks/keystone_hooks.py 2016-01-19 16:54:03 +0000 | |||
211 | +++ hooks/keystone_hooks.py 2016-03-05 15:43:48 +0000 | |||
212 | @@ -47,6 +47,7 @@ | |||
213 | 47 | git_install_requested, | 47 | git_install_requested, |
214 | 48 | openstack_upgrade_available, | 48 | openstack_upgrade_available, |
215 | 49 | sync_db_with_multi_ipv6_addresses, | 49 | sync_db_with_multi_ipv6_addresses, |
216 | 50 | os_release, | ||
217 | 50 | ) | 51 | ) |
218 | 51 | 52 | ||
219 | 52 | from keystone_utils import ( | 53 | from keystone_utils import ( |
220 | @@ -64,6 +65,7 @@ | |||
221 | 64 | services, | 65 | services, |
222 | 65 | CLUSTER_RES, | 66 | CLUSTER_RES, |
223 | 66 | KEYSTONE_CONF, | 67 | KEYSTONE_CONF, |
224 | 68 | POLICY_JSON, | ||
225 | 67 | SSH_USER, | 69 | SSH_USER, |
226 | 68 | setup_ipv6, | 70 | setup_ipv6, |
227 | 69 | send_notifications, | 71 | send_notifications, |
228 | @@ -309,6 +311,8 @@ | |||
229 | 309 | else: | 311 | else: |
230 | 310 | CONFIGS.write(KEYSTONE_CONF) | 312 | CONFIGS.write(KEYSTONE_CONF) |
231 | 311 | leader_init_db_if_ready(use_current_context=True) | 313 | leader_init_db_if_ready(use_current_context=True) |
232 | 314 | if os_release('keystone-common') >= 'liberty': | ||
233 | 315 | CONFIGS.write(POLICY_JSON) | ||
234 | 312 | 316 | ||
235 | 313 | 317 | ||
236 | 314 | @hooks.hook('pgsql-db-relation-changed') | 318 | @hooks.hook('pgsql-db-relation-changed') |
237 | @@ -320,6 +324,8 @@ | |||
238 | 320 | else: | 324 | else: |
239 | 321 | CONFIGS.write(KEYSTONE_CONF) | 325 | CONFIGS.write(KEYSTONE_CONF) |
240 | 322 | leader_init_db_if_ready(use_current_context=True) | 326 | leader_init_db_if_ready(use_current_context=True) |
241 | 327 | if os_release('keystone-common') >= 'liberty': | ||
242 | 328 | CONFIGS.write(POLICY_JSON) | ||
243 | 323 | 329 | ||
244 | 324 | 330 | ||
245 | 325 | @hooks.hook('identity-service-relation-changed') | 331 | @hooks.hook('identity-service-relation-changed') |
246 | 326 | 332 | ||
247 | === modified file 'hooks/keystone_utils.py' | |||
248 | --- hooks/keystone_utils.py 2016-02-19 14:49:59 +0000 | |||
249 | +++ hooks/keystone_utils.py 2016-03-05 15:43:48 +0000 | |||
250 | @@ -166,6 +166,7 @@ | |||
251 | 166 | KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF) | 166 | KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF) |
252 | 167 | STORED_PASSWD = "/var/lib/keystone/keystone.passwd" | 167 | STORED_PASSWD = "/var/lib/keystone/keystone.passwd" |
253 | 168 | STORED_TOKEN = "/var/lib/keystone/keystone.token" | 168 | STORED_TOKEN = "/var/lib/keystone/keystone.token" |
254 | 169 | STORED_ADMIN_DOMAIN_ID = "/var/lib/keystone/keystone.admin_domain_id" | ||
255 | 169 | SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd' | 170 | SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd' |
256 | 170 | 171 | ||
257 | 171 | HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' | 172 | HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
258 | @@ -184,6 +185,10 @@ | |||
259 | 184 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 185 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
260 | 185 | SSL_SYNC_SEMAPHORE = threading.Semaphore() | 186 | SSL_SYNC_SEMAPHORE = threading.Semaphore() |
261 | 186 | SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] | 187 | SSL_DIRS = [SSL_DIR, APACHE_SSL_DIR, CA_CERT_PATH] |
262 | 188 | ADMIN_DOMAIN = 'admin_domain' | ||
263 | 189 | DEFAULT_DOMAIN = 'Default' | ||
264 | 190 | POLICY_JSON = '/etc/keystone/policy.json' | ||
265 | 191 | |||
266 | 187 | BASE_RESOURCE_MAP = OrderedDict([ | 192 | BASE_RESOURCE_MAP = OrderedDict([ |
267 | 188 | (KEYSTONE_CONF, { | 193 | (KEYSTONE_CONF, { |
268 | 189 | 'services': BASE_SERVICES, | 194 | 'services': BASE_SERVICES, |
269 | @@ -212,6 +217,10 @@ | |||
270 | 212 | 'contexts': [keystone_context.ApacheSSLContext()], | 217 | 'contexts': [keystone_context.ApacheSSLContext()], |
271 | 213 | 'services': ['apache2'], | 218 | 'services': ['apache2'], |
272 | 214 | }), | 219 | }), |
273 | 220 | (POLICY_JSON, { | ||
274 | 221 | 'contexts': [keystone_context.KeystoneContext()], | ||
275 | 222 | 'services': BASE_SERVICES, | ||
276 | 223 | }), | ||
277 | 215 | ]) | 224 | ]) |
278 | 216 | 225 | ||
279 | 217 | valid_services = { | 226 | valid_services = { |
280 | @@ -329,6 +338,8 @@ | |||
281 | 329 | """ | 338 | """ |
282 | 330 | resource_map = deepcopy(BASE_RESOURCE_MAP) | 339 | resource_map = deepcopy(BASE_RESOURCE_MAP) |
283 | 331 | 340 | ||
284 | 341 | if os_release('keystone') < 'liberty': | ||
285 | 342 | resource_map.pop(POLICY_JSON) | ||
286 | 332 | if os.path.exists('/etc/apache2/conf-available'): | 343 | if os.path.exists('/etc/apache2/conf-available'): |
287 | 333 | resource_map.pop(APACHE_CONF) | 344 | resource_map.pop(APACHE_CONF) |
288 | 334 | else: | 345 | else: |
289 | @@ -452,18 +463,30 @@ | |||
290 | 452 | # OLD | 463 | # OLD |
291 | 453 | 464 | ||
292 | 454 | 465 | ||
294 | 455 | def get_local_endpoint(): | 466 | def get_api_suffix(): |
295 | 467 | if get_api_version() == 2: | ||
296 | 468 | api_suffix = 'v2.0' | ||
297 | 469 | else: | ||
298 | 470 | api_suffix = 'v3' | ||
299 | 471 | return api_suffix | ||
300 | 472 | |||
301 | 473 | |||
302 | 474 | def get_local_endpoint(api_suffix=None): | ||
303 | 456 | """Returns the URL for the local end-point bypassing haproxy/ssl""" | 475 | """Returns the URL for the local end-point bypassing haproxy/ssl""" |
304 | 476 | if not api_suffix: | ||
305 | 477 | api_suffix = get_api_suffix() | ||
306 | 457 | if config('prefer-ipv6'): | 478 | if config('prefer-ipv6'): |
307 | 458 | ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0] | 479 | ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
309 | 459 | endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr | 480 | endpoint_url = 'http://[%s]:{}/{}/' % ipv6_addr |
310 | 460 | local_endpoint = endpoint_url.format( | 481 | local_endpoint = endpoint_url.format( |
311 | 461 | determine_api_port(api_port('keystone-admin'), | 482 | determine_api_port(api_port('keystone-admin'), |
313 | 462 | singlenode_mode=True)) | 483 | singlenode_mode=True), |
314 | 484 | api_suffix) | ||
315 | 463 | else: | 485 | else: |
317 | 464 | local_endpoint = 'http://localhost:{}/v2.0/'.format( | 486 | local_endpoint = 'http://localhost:{}/{}/'.format( |
318 | 465 | determine_api_port(api_port('keystone-admin'), | 487 | determine_api_port(api_port('keystone-admin'), |
320 | 466 | singlenode_mode=True)) | 488 | singlenode_mode=True), |
321 | 489 | api_suffix) | ||
322 | 467 | 490 | ||
323 | 468 | return local_endpoint | 491 | return local_endpoint |
324 | 469 | 492 | ||
325 | @@ -506,18 +529,14 @@ | |||
326 | 506 | 529 | ||
327 | 507 | 530 | ||
328 | 508 | def is_service_present(service_name, service_type): | 531 | def is_service_present(service_name, service_type): |
332 | 509 | import manager | 532 | manager = get_manager() |
330 | 510 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
331 | 511 | token=get_admin_token()) | ||
333 | 512 | service_id = manager.resolve_service_id(service_name, service_type) | 533 | service_id = manager.resolve_service_id(service_name, service_type) |
334 | 513 | return service_id is not None | 534 | return service_id is not None |
335 | 514 | 535 | ||
336 | 515 | 536 | ||
337 | 516 | def delete_service_entry(service_name, service_type): | 537 | def delete_service_entry(service_name, service_type): |
338 | 517 | """ Delete a service from keystone""" | 538 | """ Delete a service from keystone""" |
342 | 518 | import manager | 539 | manager = get_manager() |
340 | 519 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
341 | 520 | token=get_admin_token()) | ||
343 | 521 | service_id = manager.resolve_service_id(service_name, service_type) | 540 | service_id = manager.resolve_service_id(service_name, service_type) |
344 | 522 | if service_id: | 541 | if service_id: |
345 | 523 | manager.api.services.delete(service_id) | 542 | manager.api.services.delete(service_id) |
346 | @@ -526,28 +545,34 @@ | |||
347 | 526 | 545 | ||
348 | 527 | def create_service_entry(service_name, service_type, service_desc, owner=None): | 546 | def create_service_entry(service_name, service_type, service_desc, owner=None): |
349 | 528 | """ Add a new service entry to keystone if one does not already exist """ | 547 | """ Add a new service entry to keystone if one does not already exist """ |
353 | 529 | import manager | 548 | manager = get_manager() |
351 | 530 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
352 | 531 | token=get_admin_token()) | ||
354 | 532 | for service in [s._info for s in manager.api.services.list()]: | 549 | for service in [s._info for s in manager.api.services.list()]: |
355 | 533 | if service['name'] == service_name: | 550 | if service['name'] == service_name: |
356 | 534 | log("Service entry for '%s' already exists." % service_name, | 551 | log("Service entry for '%s' already exists." % service_name, |
357 | 535 | level=DEBUG) | 552 | level=DEBUG) |
358 | 536 | return | 553 | return |
359 | 537 | 554 | ||
362 | 538 | manager.api.services.create(name=service_name, | 555 | manager.api.services.create(service_name, |
363 | 539 | service_type=service_type, | 556 | service_type, |
364 | 540 | description=service_desc) | 557 | description=service_desc) |
365 | 541 | log("Created new service entry '%s'" % service_name, level=DEBUG) | 558 | log("Created new service entry '%s'" % service_name, level=DEBUG) |
366 | 542 | 559 | ||
367 | 543 | 560 | ||
368 | 544 | def create_endpoint_template(region, service, publicurl, adminurl, | 561 | def create_endpoint_template(region, service, publicurl, adminurl, |
369 | 545 | internalurl): | 562 | internalurl): |
370 | 563 | manager = get_manager() | ||
371 | 564 | if manager.api_version == 2: | ||
372 | 565 | create_endpoint_template_v2(manager, region, service, publicurl, | ||
373 | 566 | adminurl, internalurl) | ||
374 | 567 | else: | ||
375 | 568 | create_endpoint_template_v3(manager, region, service, publicurl, | ||
376 | 569 | adminurl, internalurl) | ||
377 | 570 | |||
378 | 571 | |||
379 | 572 | def create_endpoint_template_v2(manager, region, service, publicurl, adminurl, | ||
380 | 573 | internalurl): | ||
381 | 546 | """ Create a new endpoint template for service if one does not already | 574 | """ Create a new endpoint template for service if one does not already |
382 | 547 | exist matching name *and* region """ | 575 | exist matching name *and* region """ |
383 | 548 | import manager | ||
384 | 549 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
385 | 550 | token=get_admin_token()) | ||
386 | 551 | service_id = manager.resolve_service_id(service) | 576 | service_id = manager.resolve_service_id(service) |
387 | 552 | for ep in [e._info for e in manager.api.endpoints.list()]: | 577 | for ep in [e._info for e in manager.api.endpoints.list()]: |
388 | 553 | if ep['service_id'] == service_id and ep['region'] == region: | 578 | if ep['service_id'] == service_id and ep['region'] == region: |
389 | @@ -566,34 +591,91 @@ | |||
390 | 566 | log("Updating endpoint template with new endpoint urls.") | 591 | log("Updating endpoint template with new endpoint urls.") |
391 | 567 | manager.api.endpoints.delete(ep['id']) | 592 | manager.api.endpoints.delete(ep['id']) |
392 | 568 | 593 | ||
398 | 569 | manager.api.endpoints.create(region=region, | 594 | manager.create_endpoints(region=region, |
399 | 570 | service_id=service_id, | 595 | service_id=service_id, |
400 | 571 | publicurl=publicurl, | 596 | publicurl=publicurl, |
401 | 572 | adminurl=adminurl, | 597 | adminurl=adminurl, |
402 | 573 | internalurl=internalurl) | 598 | internalurl=internalurl) |
403 | 574 | log("Created new endpoint template for '%s' in '%s'" % (region, service), | 599 | log("Created new endpoint template for '%s' in '%s'" % (region, service), |
404 | 575 | level=DEBUG) | 600 | level=DEBUG) |
405 | 576 | 601 | ||
406 | 577 | 602 | ||
407 | 603 | def create_endpoint_template_v3(manager, region, service, publicurl, adminurl, | ||
408 | 604 | internalurl): | ||
409 | 605 | service_id = manager.resolve_service_id(service) | ||
410 | 606 | endpoints = { | ||
411 | 607 | 'public': publicurl, | ||
412 | 608 | 'admin': adminurl, | ||
413 | 609 | 'internal': internalurl, | ||
414 | 610 | } | ||
415 | 611 | for ep_type in endpoints.keys(): | ||
416 | 612 | # Delete endpoint if its has changed | ||
417 | 613 | ep_deleted = manager.delete_old_endpoint_v3( | ||
418 | 614 | ep_type, | ||
419 | 615 | service_id, | ||
420 | 616 | region, | ||
421 | 617 | endpoints[ep_type] | ||
422 | 618 | ) | ||
423 | 619 | ep_exists = manager.find_endpoint_v3( | ||
424 | 620 | ep_type, | ||
425 | 621 | service_id, | ||
426 | 622 | region | ||
427 | 623 | ) | ||
428 | 624 | if ep_deleted or not ep_exists: | ||
429 | 625 | manager.api.endpoints.create( | ||
430 | 626 | service_id, | ||
431 | 627 | endpoints[ep_type], | ||
432 | 628 | interface=ep_type, | ||
433 | 629 | region=region | ||
434 | 630 | ) | ||
435 | 631 | |||
436 | 632 | |||
437 | 578 | def create_tenant(name): | 633 | def create_tenant(name): |
438 | 579 | """Creates a tenant if it does not already exist""" | 634 | """Creates a tenant if it does not already exist""" |
446 | 580 | import manager | 635 | manager = get_manager() |
447 | 581 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | 636 | tenants = manager.resolve_tenant_id(name) |
448 | 582 | token=get_admin_token()) | 637 | if not tenants: |
449 | 583 | tenants = [t._info for t in manager.api.tenants.list()] | 638 | manager.create_tenant(tenant_name=name, |
450 | 584 | if not tenants or name not in [t['name'] for t in tenants]: | 639 | description='Created by Juju') |
444 | 585 | manager.api.tenants.create(tenant_name=name, | ||
445 | 586 | description='Created by Juju') | ||
451 | 587 | log("Created new tenant: %s" % name, level=DEBUG) | 640 | log("Created new tenant: %s" % name, level=DEBUG) |
452 | 588 | return | 641 | return |
453 | 589 | 642 | ||
454 | 590 | log("Tenant '%s' already exists." % name, level=DEBUG) | 643 | log("Tenant '%s' already exists." % name, level=DEBUG) |
455 | 591 | 644 | ||
456 | 592 | 645 | ||
461 | 593 | def user_exists(name): | 646 | def create_or_show_domain(name): |
462 | 594 | import manager | 647 | """Creates a tenant if it does not already exist""" |
463 | 595 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | 648 | manager = get_manager() |
464 | 596 | token=get_admin_token()) | 649 | domain_id = manager.resolve_domain_id(name) |
465 | 650 | if domain_id: | ||
466 | 651 | log("Domain '%s' already exists." % name, level=DEBUG) | ||
467 | 652 | else: | ||
468 | 653 | manager.create_domain(domain_name=name, | ||
469 | 654 | description='Created by Juju') | ||
470 | 655 | log("Created new domain: %s" % name, level=DEBUG) | ||
471 | 656 | domain_id = manager.resolve_domain_id(name) | ||
472 | 657 | return domain_id | ||
473 | 658 | |||
474 | 659 | |||
475 | 660 | def user_exists(name, domain=None): | ||
476 | 661 | manager = get_manager() | ||
477 | 662 | if domain: | ||
478 | 663 | domain_id = manager.resolve_domain_id(domain) | ||
479 | 664 | for user in manager.api.users.list(): | ||
480 | 665 | if user.name == name: | ||
481 | 666 | # In v3 Domains are seperate user namespaces so need to check that | ||
482 | 667 | # the domain matched if provided | ||
483 | 668 | if domain: | ||
484 | 669 | if domain_id == user.domain_id: | ||
485 | 670 | return True | ||
486 | 671 | else: | ||
487 | 672 | return True | ||
488 | 673 | |||
489 | 674 | return False | ||
490 | 675 | |||
491 | 676 | |||
492 | 677 | def old_user_exists(name): | ||
493 | 678 | manager = get_manager() | ||
494 | 597 | users = [u._info for u in manager.api.users.list()] | 679 | users = [u._info for u in manager.api.users.list()] |
495 | 598 | if not users or name not in [u['name'] for u in users]: | 680 | if not users or name not in [u['name'] for u in users]: |
496 | 599 | return False | 681 | return False |
497 | @@ -601,32 +683,44 @@ | |||
498 | 601 | return True | 683 | return True |
499 | 602 | 684 | ||
500 | 603 | 685 | ||
502 | 604 | def create_user(name, password, tenant): | 686 | def create_user(name, password, tenant=None, domain=None): |
503 | 605 | """Creates a user if it doesn't already exist, as a member of tenant""" | 687 | """Creates a user if it doesn't already exist, as a member of tenant""" |
508 | 606 | import manager | 688 | manager = get_manager() |
509 | 607 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | 689 | if user_exists(name, domain=domain): |
506 | 608 | token=get_admin_token()) | ||
507 | 609 | if user_exists(name): | ||
510 | 610 | log("A user named '%s' already exists" % name, level=DEBUG) | 690 | log("A user named '%s' already exists" % name, level=DEBUG) |
511 | 611 | return | 691 | return |
512 | 612 | 692 | ||
521 | 613 | tenant_id = manager.resolve_tenant_id(tenant) | 693 | tenant_id = None |
522 | 614 | if not tenant_id: | 694 | if tenant: |
523 | 615 | error_out('Could not resolve tenant_id for tenant %s' % tenant) | 695 | tenant_id = manager.resolve_tenant_id(tenant) |
524 | 616 | 696 | if not tenant_id: | |
525 | 617 | manager.api.users.create(name=name, | 697 | error_out('Could not resolve tenant_id for tenant %s' % tenant) |
526 | 618 | password=password, | 698 | |
527 | 619 | email='juju@localhost', | 699 | domain_id = None |
528 | 620 | tenant_id=tenant_id) | 700 | if domain: |
529 | 701 | domain_id = manager.resolve_domain_id(domain) | ||
530 | 702 | if not domain_id: | ||
531 | 703 | error_out('Could not resolve domain_id for domain %s' % domain) | ||
532 | 704 | |||
533 | 705 | manager.create_user(name=name, | ||
534 | 706 | password=password, | ||
535 | 707 | email='juju@localhost', | ||
536 | 708 | tenant_id=tenant_id, | ||
537 | 709 | domain_id=domain_id) | ||
538 | 621 | log("Created new user '%s' tenant: %s" % (name, tenant_id), | 710 | log("Created new user '%s' tenant: %s" % (name, tenant_id), |
539 | 622 | level=DEBUG) | 711 | level=DEBUG) |
540 | 623 | 712 | ||
541 | 624 | 713 | ||
543 | 625 | def create_role(name, user=None, tenant=None): | 714 | def get_manager(api_version=None): |
544 | 715 | """Return a keystonemanager for the correct API version""" | ||
545 | 716 | from manager import get_keystone_manager | ||
546 | 717 | return get_keystone_manager(get_local_endpoint(), get_admin_token(), | ||
547 | 718 | api_version) | ||
548 | 719 | |||
549 | 720 | |||
550 | 721 | def create_role(name, user=None, tenant=None, domain=None): | ||
551 | 626 | """Creates a role if it doesn't already exist. grants role to user""" | 722 | """Creates a role if it doesn't already exist. grants role to user""" |
555 | 627 | import manager | 723 | manager = get_manager() |
553 | 628 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
554 | 629 | token=get_admin_token()) | ||
556 | 630 | roles = [r._info for r in manager.api.roles.list()] | 724 | roles = [r._info for r in manager.api.roles.list()] |
557 | 631 | if not roles or name not in [r['name'] for r in roles]: | 725 | if not roles or name not in [r['name'] for r in roles]: |
558 | 632 | manager.api.roles.create(name=name) | 726 | manager.api.roles.create(name=name) |
559 | @@ -640,31 +734,36 @@ | |||
560 | 640 | # NOTE(adam_g): Keystone client requires id's for add_user_role, not names | 734 | # NOTE(adam_g): Keystone client requires id's for add_user_role, not names |
561 | 641 | user_id = manager.resolve_user_id(user) | 735 | user_id = manager.resolve_user_id(user) |
562 | 642 | role_id = manager.resolve_role_id(name) | 736 | role_id = manager.resolve_role_id(name) |
573 | 643 | tenant_id = manager.resolve_tenant_id(tenant) | 737 | |
574 | 644 | 738 | if None in [user_id, role_id]: | |
575 | 645 | if None in [user_id, role_id, tenant_id]: | 739 | error_out("Could not resolve [%s, %s]" % |
576 | 646 | error_out("Could not resolve [%s, %s, %s]" % | 740 | (user_id, role_id)) |
577 | 647 | (user_id, role_id, tenant_id)) | 741 | |
578 | 648 | 742 | grant_role(user, name, tenant, domain) | |
579 | 649 | grant_role(user, name, tenant) | 743 | |
580 | 650 | 744 | ||
581 | 651 | 745 | def grant_role(user, role, tenant=None, domain=None): | |
572 | 652 | def grant_role(user, role, tenant): | ||
582 | 653 | """Grant user and tenant a specific role""" | 746 | """Grant user and tenant a specific role""" |
586 | 654 | import manager | 747 | manager = get_manager() |
584 | 655 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
585 | 656 | token=get_admin_token()) | ||
587 | 657 | log("Granting user '%s' role '%s' on tenant '%s'" % | 748 | log("Granting user '%s' role '%s' on tenant '%s'" % |
588 | 658 | (user, role, tenant)) | 749 | (user, role, tenant)) |
589 | 659 | user_id = manager.resolve_user_id(user) | 750 | user_id = manager.resolve_user_id(user) |
590 | 660 | role_id = manager.resolve_role_id(role) | 751 | role_id = manager.resolve_role_id(role) |
594 | 661 | tenant_id = manager.resolve_tenant_id(tenant) | 752 | tenant_id = None |
595 | 662 | 753 | if tenant: | |
596 | 663 | cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id) | 754 | tenant_id = manager.resolve_tenant_id(tenant) |
597 | 755 | |||
598 | 756 | domain_id = None | ||
599 | 757 | if domain: | ||
600 | 758 | domain_id = manager.resolve_domain_id(domain) | ||
601 | 759 | |||
602 | 760 | cur_roles = manager.roles_for_user(user_id, tenant_id=tenant_id, | ||
603 | 761 | domain_id=domain_id) | ||
604 | 664 | if not cur_roles or role_id not in [r.id for r in cur_roles]: | 762 | if not cur_roles or role_id not in [r.id for r in cur_roles]: |
608 | 665 | manager.api.roles.add_user_role(user=user_id, | 763 | manager.add_user_role(user=user_id, |
609 | 666 | role=role_id, | 764 | role=role_id, |
610 | 667 | tenant=tenant_id) | 765 | tenant=tenant_id, |
611 | 766 | domain=domain_id) | ||
612 | 668 | log("Granted user '%s' role '%s' on tenant '%s'" % | 767 | log("Granted user '%s' role '%s' on tenant '%s'" % |
613 | 669 | (user, role, tenant), level=DEBUG) | 768 | (user, role, tenant), level=DEBUG) |
614 | 670 | else: | 769 | else: |
615 | @@ -677,6 +776,11 @@ | |||
616 | 677 | fd.writelines("%s\n" % passwd) | 776 | fd.writelines("%s\n" % passwd) |
617 | 678 | 777 | ||
618 | 679 | 778 | ||
619 | 779 | def store_admin_domain_id(domain_id): | ||
620 | 780 | with open(STORED_ADMIN_DOMAIN_ID, 'w+') as fd: | ||
621 | 781 | fd.writelines("%s\n" % domain_id) | ||
622 | 782 | |||
623 | 783 | |||
624 | 680 | def get_admin_passwd(): | 784 | def get_admin_passwd(): |
625 | 681 | passwd = config("admin-password") | 785 | passwd = config("admin-password") |
626 | 682 | if passwd and passwd.lower() != "none": | 786 | if passwd and passwd.lower() != "none": |
627 | @@ -708,6 +812,13 @@ | |||
628 | 708 | return passwd | 812 | return passwd |
629 | 709 | 813 | ||
630 | 710 | 814 | ||
631 | 815 | def get_api_version(): | ||
632 | 816 | api_version = config('preferred-api-version') | ||
633 | 817 | if api_version not in [2, 3]: | ||
634 | 818 | raise ValueError('Bad preferred-api-version') | ||
635 | 819 | return api_version | ||
636 | 820 | |||
637 | 821 | |||
638 | 711 | def ensure_initial_admin(config): | 822 | def ensure_initial_admin(config): |
639 | 712 | # Allow retry on fail since leader may not be ready yet. | 823 | # Allow retry on fail since leader may not be ready yet. |
640 | 713 | # NOTE(hopem): ks client may not be installed at module import time so we | 824 | # NOTE(hopem): ks client may not be installed at module import time so we |
641 | @@ -734,13 +845,27 @@ | |||
642 | 734 | """ | 845 | """ |
643 | 735 | create_tenant("admin") | 846 | create_tenant("admin") |
644 | 736 | create_tenant(config("service-tenant")) | 847 | create_tenant(config("service-tenant")) |
645 | 848 | if get_api_version() > 2: | ||
646 | 849 | domain_id = create_or_show_domain(ADMIN_DOMAIN) | ||
647 | 850 | store_admin_domain_id(domain_id) | ||
648 | 737 | # User is managed by ldap backend when using ldap identity | 851 | # User is managed by ldap backend when using ldap identity |
649 | 738 | if not (config('identity-backend') == | 852 | if not (config('identity-backend') == |
650 | 739 | 'ldap' and config('ldap-readonly')): | 853 | 'ldap' and config('ldap-readonly')): |
651 | 740 | passwd = get_admin_passwd() | 854 | passwd = get_admin_passwd() |
652 | 741 | if passwd: | 855 | if passwd: |
655 | 742 | create_user_credentials(config('admin-user'), 'admin', passwd, | 856 | if get_api_version() > 2: |
656 | 743 | new_roles=[config('admin-role')]) | 857 | create_user_credentials(config('admin-user'), passwd, |
657 | 858 | domain=ADMIN_DOMAIN) | ||
658 | 859 | create_role(config('admin-role'), config('admin-user'), | ||
659 | 860 | domain=ADMIN_DOMAIN) | ||
660 | 861 | grant_role(config('admin-user'), config('admin-role'), | ||
661 | 862 | tenant='admin') | ||
662 | 863 | grant_role(config('admin-user'), config('admin-role'), | ||
663 | 864 | domain=ADMIN_DOMAIN) | ||
664 | 865 | else: | ||
665 | 866 | create_user_credentials(config('admin-user'), passwd, | ||
666 | 867 | tenant='admin', | ||
667 | 868 | new_roles=[config('admin-role')]) | ||
668 | 744 | 869 | ||
669 | 745 | create_service_entry("keystone", "identity", | 870 | create_service_entry("keystone", "identity", |
670 | 746 | "Keystone Identity Service") | 871 | "Keystone Identity Service") |
671 | @@ -751,39 +876,49 @@ | |||
672 | 751 | internal_ip=resolve_address(INTERNAL), | 876 | internal_ip=resolve_address(INTERNAL), |
673 | 752 | admin_ip=resolve_address(ADMIN), | 877 | admin_ip=resolve_address(ADMIN), |
674 | 753 | auth_port=config("admin-port"), | 878 | auth_port=config("admin-port"), |
676 | 754 | region=region) | 879 | region=region, |
677 | 880 | api_version=get_api_version()) | ||
678 | 755 | 881 | ||
679 | 756 | return _ensure_initial_admin(config) | 882 | return _ensure_initial_admin(config) |
680 | 757 | 883 | ||
681 | 758 | 884 | ||
683 | 759 | def endpoint_url(ip, port): | 885 | def endpoint_url(ip, port, suffix=None): |
684 | 760 | proto = 'http' | 886 | proto = 'http' |
685 | 761 | if https(): | 887 | if https(): |
686 | 762 | proto = 'https' | 888 | proto = 'https' |
687 | 763 | if is_ipv6(ip): | 889 | if is_ipv6(ip): |
688 | 764 | ip = "[{}]".format(ip) | 890 | ip = "[{}]".format(ip) |
690 | 765 | return "%s://%s:%s/v2.0" % (proto, ip, port) | 891 | if suffix: |
691 | 892 | ep = "%s://%s:%s/%s" % (proto, ip, port, suffix) | ||
692 | 893 | else: | ||
693 | 894 | ep = "%s://%s:%s" % (proto, ip, port) | ||
694 | 895 | return ep | ||
695 | 766 | 896 | ||
696 | 767 | 897 | ||
697 | 768 | def create_keystone_endpoint(public_ip, service_port, | 898 | def create_keystone_endpoint(public_ip, service_port, |
703 | 769 | internal_ip, admin_ip, auth_port, region): | 899 | internal_ip, admin_ip, auth_port, region, |
704 | 770 | create_endpoint_template(region, "keystone", | 900 | api_version): |
705 | 771 | endpoint_url(public_ip, service_port), | 901 | api_suffix = '' |
706 | 772 | endpoint_url(admin_ip, auth_port), | 902 | if api_version == 2: |
707 | 773 | endpoint_url(internal_ip, service_port)) | 903 | api_suffix = 'v2.0' |
708 | 904 | api_suffix = get_api_suffix() | ||
709 | 905 | create_endpoint_template( | ||
710 | 906 | region, "keystone", | ||
711 | 907 | endpoint_url(public_ip, service_port, suffix=api_suffix), | ||
712 | 908 | endpoint_url(admin_ip, auth_port, suffix=api_suffix), | ||
713 | 909 | endpoint_url(internal_ip, service_port, suffix=api_suffix), | ||
714 | 910 | ) | ||
715 | 774 | 911 | ||
716 | 775 | 912 | ||
717 | 776 | def update_user_password(username, password): | 913 | def update_user_password(username, password): |
721 | 777 | import manager | 914 | manager = get_manager() |
719 | 778 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
720 | 779 | token=get_admin_token()) | ||
722 | 780 | log("Updating password for user '%s'" % username) | 915 | log("Updating password for user '%s'" % username) |
723 | 781 | 916 | ||
724 | 782 | user_id = manager.resolve_user_id(username) | 917 | user_id = manager.resolve_user_id(username) |
725 | 783 | if user_id is None: | 918 | if user_id is None: |
726 | 784 | error_out("Could not resolve user id for '%s'" % username) | 919 | error_out("Could not resolve user id for '%s'" % username) |
727 | 785 | 920 | ||
729 | 786 | manager.api.users.update_password(user=user_id, password=password) | 921 | manager.update_password(user=user_id, password=password) |
730 | 787 | log("Successfully updated password for user '%s'" % | 922 | log("Successfully updated password for user '%s'" % |
731 | 788 | username) | 923 | username) |
732 | 789 | 924 | ||
733 | @@ -1361,22 +1496,23 @@ | |||
734 | 1361 | return result | 1496 | return result |
735 | 1362 | 1497 | ||
736 | 1363 | 1498 | ||
738 | 1364 | def create_user_credentials(user, tenant, passwd, new_roles=None, grants=None): | 1499 | def create_user_credentials(user, passwd, tenant=None, new_roles=None, |
739 | 1500 | grants=None, domain=None): | ||
740 | 1365 | """Create user credentials. | 1501 | """Create user credentials. |
741 | 1366 | 1502 | ||
742 | 1367 | Optionally adds role grants to user and/or creates new roles. | 1503 | Optionally adds role grants to user and/or creates new roles. |
743 | 1368 | """ | 1504 | """ |
744 | 1369 | log("Creating service credentials for '%s'" % user, level=DEBUG) | 1505 | log("Creating service credentials for '%s'" % user, level=DEBUG) |
746 | 1370 | if user_exists(user): | 1506 | if user_exists(user, domain=domain): |
747 | 1371 | log("User '%s' already exists - updating password" % (user), | 1507 | log("User '%s' already exists - updating password" % (user), |
748 | 1372 | level=DEBUG) | 1508 | level=DEBUG) |
749 | 1373 | update_user_password(user, passwd) | 1509 | update_user_password(user, passwd) |
750 | 1374 | else: | 1510 | else: |
752 | 1375 | create_user(user, passwd, tenant) | 1511 | create_user(user, passwd, tenant, domain) |
753 | 1376 | 1512 | ||
754 | 1377 | if grants: | 1513 | if grants: |
755 | 1378 | for role in grants: | 1514 | for role in grants: |
757 | 1379 | grant_role(user, role, tenant) | 1515 | grant_role(user, role, tenant, domain) |
758 | 1380 | else: | 1516 | else: |
759 | 1381 | log("No role grants requested for user '%s'" % (user), level=DEBUG) | 1517 | log("No role grants requested for user '%s'" % (user), level=DEBUG) |
760 | 1382 | 1518 | ||
761 | @@ -1385,7 +1521,7 @@ | |||
762 | 1385 | # Currently used by Swift and Ceilometer. | 1521 | # Currently used by Swift and Ceilometer. |
763 | 1386 | for role in new_roles: | 1522 | for role in new_roles: |
764 | 1387 | log("Creating requested role '%s'" % role, level=DEBUG) | 1523 | log("Creating requested role '%s'" % role, level=DEBUG) |
766 | 1388 | create_role(role, user, tenant) | 1524 | create_role(role, user, tenant, domain) |
767 | 1389 | 1525 | ||
768 | 1390 | return passwd | 1526 | return passwd |
769 | 1391 | 1527 | ||
770 | @@ -1400,15 +1536,20 @@ | |||
771 | 1400 | if not tenant: | 1536 | if not tenant: |
772 | 1401 | raise Exception("No service tenant provided in config") | 1537 | raise Exception("No service tenant provided in config") |
773 | 1402 | 1538 | ||
778 | 1403 | return create_user_credentials(user, tenant, get_service_password(user), | 1539 | if get_api_version() == 2: |
779 | 1404 | new_roles=new_roles, | 1540 | domain = None |
780 | 1405 | grants=[config('admin-role')]) | 1541 | else: |
781 | 1406 | 1542 | domain = DEFAULT_DOMAIN | |
782 | 1543 | return create_user_credentials(user, get_service_password(user), | ||
783 | 1544 | tenant=tenant, new_roles=new_roles, | ||
784 | 1545 | grants=[config('admin-role')], | ||
785 | 1546 | domain=domain) | ||
786 | 1547 | |||
787 | 1548 | |||
788 | 1549 | # @retry_on_exception(5, base_delay=3) | ||
789 | 1407 | 1550 | ||
790 | 1408 | def add_service_to_keystone(relation_id=None, remote_unit=None): | 1551 | def add_service_to_keystone(relation_id=None, remote_unit=None): |
794 | 1409 | import manager | 1552 | manager = get_manager() |
792 | 1410 | manager = manager.KeystoneManager(endpoint=get_local_endpoint(), | ||
793 | 1411 | token=get_admin_token()) | ||
795 | 1412 | settings = relation_get(rid=relation_id, unit=remote_unit) | 1553 | settings = relation_get(rid=relation_id, unit=remote_unit) |
796 | 1413 | # the minimum settings needed per endpoint | 1554 | # the minimum settings needed per endpoint |
797 | 1414 | single = set(['service', 'region', 'public_url', 'admin_url', | 1555 | single = set(['service', 'region', 'public_url', 'admin_url', |
798 | @@ -1546,6 +1687,7 @@ | |||
799 | 1546 | # we return a token, information about our API endpoints, and the generated | 1687 | # we return a token, information about our API endpoints, and the generated |
800 | 1547 | # service credentials | 1688 | # service credentials |
801 | 1548 | service_tenant = config('service-tenant') | 1689 | service_tenant = config('service-tenant') |
802 | 1690 | grant_role(service_username, 'Admin', service_tenant, 'default') | ||
803 | 1549 | 1691 | ||
804 | 1550 | # NOTE(dosaboy): we use __null__ to represent settings that are to be | 1692 | # NOTE(dosaboy): we use __null__ to represent settings that are to be |
805 | 1551 | # routed to relations via the cluster relation and set to None. | 1693 | # routed to relations via the cluster relation and set to None. |
806 | @@ -1565,6 +1707,7 @@ | |||
807 | 1565 | "ca_cert": '__null__', | 1707 | "ca_cert": '__null__', |
808 | 1566 | "auth_protocol": protocol, | 1708 | "auth_protocol": protocol, |
809 | 1567 | "service_protocol": protocol, | 1709 | "service_protocol": protocol, |
810 | 1710 | "api_version": get_api_version(), | ||
811 | 1568 | } | 1711 | } |
812 | 1569 | 1712 | ||
813 | 1570 | # generate or get a new cert/key for service if set to manage certs. | 1713 | # generate or get a new cert/key for service if set to manage certs. |
814 | @@ -1863,7 +2006,6 @@ | |||
815 | 1863 | 2006 | ||
816 | 1864 | @param configs: a templating.OSConfigRenderer() object | 2007 | @param configs: a templating.OSConfigRenderer() object |
817 | 1865 | """ | 2008 | """ |
818 | 1866 | |||
819 | 1867 | if is_paused(): | 2009 | if is_paused(): |
820 | 1868 | status_set("maintenance", | 2010 | status_set("maintenance", |
821 | 1869 | "Paused. Use 'resume' action to resume normal service.") | 2011 | "Paused. Use 'resume' action to resume normal service.") |
822 | @@ -1871,5 +2013,19 @@ | |||
823 | 1871 | 2013 | ||
824 | 1872 | # set the status according to the current state of the contexts | 2014 | # set the status according to the current state of the contexts |
825 | 1873 | set_os_workload_status( | 2015 | set_os_workload_status( |
826 | 2016 | <<<<<<< TREE | ||
827 | 1874 | configs, REQUIRED_INTERFACES, charm_func=check_optional_relations, | 2017 | configs, REQUIRED_INTERFACES, charm_func=check_optional_relations, |
828 | 1875 | services=services(), ports=determine_ports()) | 2018 | services=services(), ports=determine_ports()) |
829 | 2019 | ======= | ||
830 | 2020 | configs, REQUIRED_INTERFACES, charm_func=check_optional_relations) | ||
831 | 2021 | |||
832 | 2022 | |||
833 | 2023 | def get_admin_domain_id(): | ||
834 | 2024 | domain_id = None | ||
835 | 2025 | if os.path.isfile(STORED_ADMIN_DOMAIN_ID): | ||
836 | 2026 | log("Loading stored domain id from %s" % STORED_ADMIN_DOMAIN_ID, | ||
837 | 2027 | level=INFO) | ||
838 | 2028 | with open(STORED_ADMIN_DOMAIN_ID, 'r') as fd: | ||
839 | 2029 | domain_id = fd.readline().strip('\n') | ||
840 | 2030 | return domain_id | ||
841 | 2031 | >>>>>>> MERGE-SOURCE | ||
842 | 1876 | 2032 | ||
843 | === modified file 'hooks/manager.py' | |||
844 | --- hooks/manager.py 2016-01-12 11:09:46 +0000 | |||
845 | +++ hooks/manager.py 2016-03-05 15:43:48 +0000 | |||
846 | @@ -1,12 +1,57 @@ | |||
847 | 1 | #!/usr/bin/python | 1 | #!/usr/bin/python |
848 | 2 | from keystoneclient.v2_0 import client | 2 | from keystoneclient.v2_0 import client |
849 | 3 | from keystoneclient.v3 import client as keystoneclient_v3 | ||
850 | 4 | from keystoneclient.auth import token_endpoint | ||
851 | 5 | from keystoneclient import session | ||
852 | 6 | |||
853 | 7 | |||
854 | 8 | def _get_keystone_manager_class(endpoint, token, api_version): | ||
855 | 9 | """Return KeystoneManager class for the given API version""" | ||
856 | 10 | if api_version == 2: | ||
857 | 11 | return KeystoneManager2(endpoint, token) | ||
858 | 12 | if api_version == 3: | ||
859 | 13 | return KeystoneManager3(endpoint, token) | ||
860 | 14 | raise ValueError('No manager found for api version {}'.format(api_version)) | ||
861 | 15 | |||
862 | 16 | |||
863 | 17 | def get_keystone_manager(endpoint, token, api_version): | ||
864 | 18 | """Return a keystonemanager for the correct API version""" | ||
865 | 19 | if api_version: | ||
866 | 20 | return _get_keystone_manager_class(endpoint, token, api_version) | ||
867 | 21 | else: | ||
868 | 22 | # If api_version has not been set then use the manager we have to query | ||
869 | 23 | # the catalogue and determine which api version should be being used. | ||
870 | 24 | # Then return the correct client based on that | ||
871 | 25 | # XXX I think the keystone client should be able to do version | ||
872 | 26 | # detection automatically so the code below could be greatly | ||
873 | 27 | # simplified | ||
874 | 28 | if 'v2.0' in endpoint.split('/'): | ||
875 | 29 | manager = _get_keystone_manager_class(endpoint, token, 2) | ||
876 | 30 | else: | ||
877 | 31 | manager = _get_keystone_manager_class(endpoint, token, 3) | ||
878 | 32 | if endpoint.endswith('/'): | ||
879 | 33 | base_ep = endpoint.rsplit('/', 2)[0] | ||
880 | 34 | else: | ||
881 | 35 | base_ep = endpoint.rsplit('/', 1)[0] | ||
882 | 36 | for svc in manager.api.services.list(): | ||
883 | 37 | if svc.type == 'identity': | ||
884 | 38 | svc_id = svc.id | ||
885 | 39 | version = None | ||
886 | 40 | for ep in manager.api.endpoints.list(): | ||
887 | 41 | if ep.service_id == svc_id and hasattr(ep, 'adminurl'): | ||
888 | 42 | version = ep.adminurl.split('/')[-1] | ||
889 | 43 | if version and version == 'v2.0': | ||
890 | 44 | new_ep = base_ep + "/" + 'v2.0' | ||
891 | 45 | return _get_keystone_manager_class(new_ep, token, 2) | ||
892 | 46 | elif version and version == 'v3': | ||
893 | 47 | new_ep = base_ep + "/" + 'v3' | ||
894 | 48 | return _get_keystone_manager_class(new_ep, token, 3) | ||
895 | 49 | else: | ||
896 | 50 | return manager | ||
897 | 3 | 51 | ||
898 | 4 | 52 | ||
899 | 5 | class KeystoneManager(object): | 53 | class KeystoneManager(object): |
900 | 6 | 54 | ||
901 | 7 | def __init__(self, endpoint, token): | ||
902 | 8 | self.api = client.Client(endpoint=endpoint, token=token) | ||
903 | 9 | |||
904 | 10 | def resolve_tenant_id(self, name): | 55 | def resolve_tenant_id(self, name): |
905 | 11 | """Find the tenant_id of a given tenant""" | 56 | """Find the tenant_id of a given tenant""" |
906 | 12 | tenants = [t._info for t in self.api.tenants.list()] | 57 | tenants = [t._info for t in self.api.tenants.list()] |
907 | @@ -14,6 +59,9 @@ | |||
908 | 14 | if name == t['name']: | 59 | if name == t['name']: |
909 | 15 | return t['id'] | 60 | return t['id'] |
910 | 16 | 61 | ||
911 | 62 | def resolve_domain_id(self, name): | ||
912 | 63 | pass | ||
913 | 64 | |||
914 | 17 | def resolve_role_id(self, name): | 65 | def resolve_role_id(self, name): |
915 | 18 | """Find the role_id of a given role""" | 66 | """Find the role_id of a given role""" |
916 | 19 | roles = [r._info for r in self.api.roles.list()] | 67 | roles = [r._info for r in self.api.roles.list()] |
917 | @@ -45,3 +93,135 @@ | |||
918 | 45 | for s in services: | 93 | for s in services: |
919 | 46 | if type == s['type']: | 94 | if type == s['type']: |
920 | 47 | return s['id'] | 95 | return s['id'] |
921 | 96 | |||
922 | 97 | |||
923 | 98 | class KeystoneManager2(KeystoneManager): | ||
924 | 99 | |||
925 | 100 | def __init__(self, endpoint, token): | ||
926 | 101 | self.api_version = 2 | ||
927 | 102 | self.api = client.Client(endpoint=endpoint, token=token) | ||
928 | 103 | |||
929 | 104 | def create_endpoints(self, region, service_id, publicurl, adminurl, | ||
930 | 105 | internalurl): | ||
931 | 106 | self.api.endpoints.create(region=region, service_id=service_id, | ||
932 | 107 | publicurl=publicurl, adminurl=adminurl, | ||
933 | 108 | internalurl=internalurl) | ||
934 | 109 | |||
935 | 110 | def tenants_list(self): | ||
936 | 111 | return self.api.tenants.list() | ||
937 | 112 | |||
938 | 113 | def create_tenant(self, tenant_name, description, domain='default'): | ||
939 | 114 | self.api.tenants.create(tenant_name=tenant_name, | ||
940 | 115 | description=description) | ||
941 | 116 | |||
942 | 117 | def delete_tenant(self, tenant_id): | ||
943 | 118 | self.api.tenants.delete(tenant_id) | ||
944 | 119 | |||
945 | 120 | def create_user(self, name, password, email, tenant_id=None, | ||
946 | 121 | domain_id=None): | ||
947 | 122 | self.api.users.create(name=name, | ||
948 | 123 | password=password, | ||
949 | 124 | email=email, | ||
950 | 125 | tenant_id=tenant_id) | ||
951 | 126 | |||
952 | 127 | def update_password(self, user, password): | ||
953 | 128 | self.api.users.update_password(user=user, password=password) | ||
954 | 129 | |||
955 | 130 | def roles_for_user(self, user_id, tenant_id=None, domain_id=None): | ||
956 | 131 | return self.api.roles.roles_for_user(user_id, tenant_id) | ||
957 | 132 | |||
958 | 133 | def add_user_role(self, user, role, tenant, domain): | ||
959 | 134 | self.api.roles.add_user_role(user=user, role=role, tenant=tenant) | ||
960 | 135 | |||
961 | 136 | |||
962 | 137 | class KeystoneManager3(KeystoneManager): | ||
963 | 138 | |||
964 | 139 | def __init__(self, endpoint, token): | ||
965 | 140 | self.api_version = 3 | ||
966 | 141 | keystone_auth_v3 = token_endpoint.Token(endpoint=endpoint, token=token) | ||
967 | 142 | keystone_session_v3 = session.Session(auth=keystone_auth_v3) | ||
968 | 143 | self.api = keystoneclient_v3.Client(session=keystone_session_v3) | ||
969 | 144 | |||
970 | 145 | def resolve_tenant_id(self, name): | ||
971 | 146 | """Find the tenant_id of a given tenant""" | ||
972 | 147 | tenants = [t._info for t in self.api.projects.list()] | ||
973 | 148 | for t in tenants: | ||
974 | 149 | if name == t['name']: | ||
975 | 150 | return t['id'] | ||
976 | 151 | |||
977 | 152 | def resolve_domain_id(self, name): | ||
978 | 153 | """Find the domain_id of a given domain""" | ||
979 | 154 | domains = [d._info for d in self.api.domains.list()] | ||
980 | 155 | for d in domains: | ||
981 | 156 | if name == d['name']: | ||
982 | 157 | return d['id'] | ||
983 | 158 | |||
984 | 159 | def create_endpoints(self, region, service_id, publicurl, adminurl, | ||
985 | 160 | internalurl): | ||
986 | 161 | self.api.endpoints.create(service_id, publicurl, interface='public', | ||
987 | 162 | region=region) | ||
988 | 163 | self.api.endpoints.create(service_id, adminurl, interface='admin', | ||
989 | 164 | region=region) | ||
990 | 165 | self.api.endpoints.create(service_id, internalurl, | ||
991 | 166 | interface='internal', region=region) | ||
992 | 167 | |||
993 | 168 | def tenants_list(self): | ||
994 | 169 | return self.api.projects.list() | ||
995 | 170 | |||
996 | 171 | def create_domain(self, domain_name, description): | ||
997 | 172 | self.api.domains.create(domain_name, description=description) | ||
998 | 173 | |||
999 | 174 | def create_tenant(self, tenant_name, description, domain='default'): | ||
1000 | 175 | self.api.projects.create(tenant_name, domain, description=description) | ||
1001 | 176 | |||
1002 | 177 | def delete_tenant(self, tenant_id): | ||
1003 | 178 | self.api.projects.delete(tenant_id) | ||
1004 | 179 | |||
1005 | 180 | def create_user(self, name, password, email, tenant_id=None, | ||
1006 | 181 | domain_id=None): | ||
1007 | 182 | if not domain_id: | ||
1008 | 183 | domain_id = self.resolve_domain_id('default') | ||
1009 | 184 | if tenant_id: | ||
1010 | 185 | self.api.users.create(name, | ||
1011 | 186 | domain=domain_id, | ||
1012 | 187 | password=password, | ||
1013 | 188 | email=email, | ||
1014 | 189 | project=tenant_id) | ||
1015 | 190 | else: | ||
1016 | 191 | self.api.users.create(name, | ||
1017 | 192 | domain=domain_id, | ||
1018 | 193 | password=password, | ||
1019 | 194 | email=email) | ||
1020 | 195 | |||
1021 | 196 | def update_password(self, user, password): | ||
1022 | 197 | self.api.users.update(user, password=password) | ||
1023 | 198 | |||
1024 | 199 | def roles_for_user(self, user_id, tenant_id=None, domain_id=None): | ||
1025 | 200 | # Specify either a domain or project, not both | ||
1026 | 201 | if domain_id: | ||
1027 | 202 | return self.api.roles.list(user_id, domain=domain_id) | ||
1028 | 203 | else: | ||
1029 | 204 | return self.api.roles.list(user_id, project=tenant_id) | ||
1030 | 205 | |||
1031 | 206 | def add_user_role(self, user, role, tenant, domain): | ||
1032 | 207 | # Specify either a domain or project, not both | ||
1033 | 208 | if domain: | ||
1034 | 209 | self.api.roles.grant(role, user=user, domain=domain) | ||
1035 | 210 | if tenant: | ||
1036 | 211 | self.api.roles.grant(role, user=user, project=tenant) | ||
1037 | 212 | |||
1038 | 213 | def find_endpoint_v3(self, interface, service_id, region): | ||
1039 | 214 | found_eps = [] | ||
1040 | 215 | for ep in self.api.endpoints.list(): | ||
1041 | 216 | if ep.service_id == service_id and ep.region == region and \ | ||
1042 | 217 | ep.interface == interface: | ||
1043 | 218 | found_eps.append(ep) | ||
1044 | 219 | return found_eps | ||
1045 | 220 | |||
1046 | 221 | def delete_old_endpoint_v3(self, interface, service_id, region, url): | ||
1047 | 222 | eps = self.find_endpoint_v3(interface, service_id, region) | ||
1048 | 223 | for ep in eps: | ||
1049 | 224 | if getattr(ep, 'url') != url: | ||
1050 | 225 | self.api.endpoints.delete(ep.id) | ||
1051 | 226 | return True | ||
1052 | 227 | return False | ||
1053 | 48 | 228 | ||
1054 | === added directory 'templates/liberty' | |||
1055 | === added file 'templates/liberty/policy.json' | |||
1056 | --- templates/liberty/policy.json 1970-01-01 00:00:00 +0000 | |||
1057 | +++ templates/liberty/policy.json 2016-03-05 15:43:48 +0000 | |||
1058 | @@ -0,0 +1,382 @@ | |||
1059 | 1 | {% if api_version == 3 -%} | ||
1060 | 2 | { | ||
1061 | 3 | "admin_required": "role:{{ admin_role }}", | ||
1062 | 4 | "cloud_admin": "rule:admin_required and domain_id:{{ admin_domain_id }}", | ||
1063 | 5 | "service_role": "role:service", | ||
1064 | 6 | "service_or_admin": "rule:admin_required or rule:service_role", | ||
1065 | 7 | "owner" : "user_id:%(user_id)s or user_id:%(target.token.user_id)s", | ||
1066 | 8 | "admin_or_owner": "(rule:admin_required and domain_id:%(target.token.user.domain.id)s) or rule:owner", | ||
1067 | 9 | "admin_or_cloud_admin": "rule:admin_required or rule:cloud_admin", | ||
1068 | 10 | "admin_and_matching_domain_id": "rule:admin_required and domain_id:%(domain_id)s", | ||
1069 | 11 | "service_admin_or_owner": "rule:service_or_admin or rule:owner", | ||
1070 | 12 | |||
1071 | 13 | "default": "rule:admin_required", | ||
1072 | 14 | |||
1073 | 15 | "identity:get_region": "", | ||
1074 | 16 | "identity:list_regions": "", | ||
1075 | 17 | "identity:create_region": "rule:cloud_admin", | ||
1076 | 18 | "identity:update_region": "rule:cloud_admin", | ||
1077 | 19 | "identity:delete_region": "rule:cloud_admin", | ||
1078 | 20 | |||
1079 | 21 | "identity:get_service": "rule:admin_or_cloud_admin", | ||
1080 | 22 | "identity:list_services": "rule:admin_or_cloud_admin", | ||
1081 | 23 | "identity:create_service": "rule:cloud_admin", | ||
1082 | 24 | "identity:update_service": "rule:cloud_admin", | ||
1083 | 25 | "identity:delete_service": "rule:cloud_admin", | ||
1084 | 26 | |||
1085 | 27 | "identity:get_endpoint": "rule:admin_or_cloud_admin", | ||
1086 | 28 | "identity:list_endpoints": "rule:admin_or_cloud_admin", | ||
1087 | 29 | "identity:create_endpoint": "rule:cloud_admin", | ||
1088 | 30 | "identity:update_endpoint": "rule:cloud_admin", | ||
1089 | 31 | "identity:delete_endpoint": "rule:cloud_admin", | ||
1090 | 32 | |||
1091 | 33 | "identity:get_domain": "rule:cloud_admin or rule:admin_and_matching_domain_id", | ||
1092 | 34 | "identity:list_domains": "rule:cloud_admin", | ||
1093 | 35 | "identity:create_domain": "rule:cloud_admin", | ||
1094 | 36 | "identity:update_domain": "rule:cloud_admin", | ||
1095 | 37 | "identity:delete_domain": "rule:cloud_admin", | ||
1096 | 38 | |||
1097 | 39 | "admin_and_matching_target_project_domain_id": "rule:admin_required and domain_id:%(target.project.domain_id)s", | ||
1098 | 40 | "admin_and_matching_project_domain_id": "rule:admin_required and domain_id:%(project.domain_id)s", | ||
1099 | 41 | "identity:get_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", | ||
1100 | 42 | "identity:list_projects": "rule:cloud_admin or rule:admin_and_matching_domain_id", | ||
1101 | 43 | "identity:list_user_projects": "rule:owner or rule:admin_and_matching_domain_id", | ||
1102 | 44 | "identity:create_project": "rule:cloud_admin or rule:admin_and_matching_project_domain_id", | ||
1103 | 45 | "identity:update_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", | ||
1104 | 46 | "identity:delete_project": "rule:cloud_admin or rule:admin_and_matching_target_project_domain_id", | ||
1105 | 47 | |||
1106 | 48 | "admin_and_matching_target_user_domain_id": "rule:admin_required and domain_id:%(target.user.domain_id)s", | ||
1107 | 49 | "admin_and_matching_user_domain_id": "rule:admin_required and domain_id:%(user.domain_id)s", | ||
1108 | 50 | "identity:get_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", | ||
1109 | 51 | "identity:list_users": "rule:cloud_admin or rule:admin_and_matching_domain_id", | ||
1110 | 52 | "identity:create_user": "rule:cloud_admin or rule:admin_and_matching_user_domain_id", | ||
1111 | 53 | "identity:update_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", | ||
1112 | 54 | "identity:delete_user": "rule:cloud_admin or rule:admin_and_matching_target_user_domain_id", | ||
1113 | 55 | |||
1114 | 56 | "admin_and_matching_target_group_domain_id": "rule:admin_required and domain_id:%(target.group.domain_id)s", | ||
1115 | 57 | "admin_and_matching_group_domain_id": "rule:admin_required and domain_id:%(group.domain_id)s", | ||
1116 | 58 | "identity:get_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1117 | 59 | "identity:list_groups": "rule:cloud_admin or rule:admin_and_matching_domain_id", | ||
1118 | 60 | "identity:list_groups_for_user": "rule:owner or rule:admin_and_matching_domain_id", | ||
1119 | 61 | "identity:create_group": "rule:cloud_admin or rule:admin_and_matching_group_domain_id", | ||
1120 | 62 | "identity:update_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1121 | 63 | "identity:delete_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1122 | 64 | "identity:list_users_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1123 | 65 | "identity:remove_user_from_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1124 | 66 | "identity:check_user_in_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1125 | 67 | "identity:add_user_to_group": "rule:cloud_admin or rule:admin_and_matching_target_group_domain_id", | ||
1126 | 68 | |||
1127 | 69 | "identity:get_credential": "rule:admin_required", | ||
1128 | 70 | "identity:list_credentials": "rule:admin_required or user_id:%(user_id)s", | ||
1129 | 71 | "identity:create_credential": "rule:admin_required", | ||
1130 | 72 | "identity:update_credential": "rule:admin_required", | ||
1131 | 73 | "identity:delete_credential": "rule:admin_required", | ||
1132 | 74 | |||
1133 | 75 | "identity:ec2_get_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1134 | 76 | "identity:ec2_list_credentials": "rule:admin_or_cloud_admin or rule:owner", | ||
1135 | 77 | "identity:ec2_create_credential": "rule:admin_or_cloud_admin or rule:owner", | ||
1136 | 78 | "identity:ec2_delete_credential": "rule:admin_or_cloud_admin or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1137 | 79 | |||
1138 | 80 | "identity:get_role": "rule:admin_or_cloud_admin", | ||
1139 | 81 | "identity:list_roles": "rule:admin_or_cloud_admin", | ||
1140 | 82 | "identity:create_role": "rule:cloud_admin", | ||
1141 | 83 | "identity:update_role": "rule:cloud_admin", | ||
1142 | 84 | "identity:delete_role": "rule:cloud_admin", | ||
1143 | 85 | |||
1144 | 86 | "domain_admin_for_grants": "rule:admin_required and (domain_id:%(domain_id)s or domain_id:%(target.project.domain_id)s)", | ||
1145 | 87 | "project_admin_for_grants": "rule:admin_required and project_id:%(project_id)s", | ||
1146 | 88 | "identity:check_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", | ||
1147 | 89 | "identity:list_grants": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", | ||
1148 | 90 | "identity:create_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", | ||
1149 | 91 | "identity:revoke_grant": "rule:cloud_admin or rule:domain_admin_for_grants or rule:project_admin_for_grants", | ||
1150 | 92 | |||
1151 | 93 | "admin_on_domain_filter" : "rule:admin_required and domain_id:%(scope.domain.id)s", | ||
1152 | 94 | "admin_on_project_filter" : "rule:admin_required and project_id:%(scope.project.id)s", | ||
1153 | 95 | "identity:list_role_assignments": "rule:cloud_admin or rule:admin_on_domain_filter or rule:admin_on_project_filter", | ||
1154 | 96 | |||
1155 | 97 | "identity:get_policy": "rule:cloud_admin", | ||
1156 | 98 | "identity:list_policies": "rule:cloud_admin", | ||
1157 | 99 | "identity:create_policy": "rule:cloud_admin", | ||
1158 | 100 | "identity:update_policy": "rule:cloud_admin", | ||
1159 | 101 | "identity:delete_policy": "rule:cloud_admin", | ||
1160 | 102 | |||
1161 | 103 | "identity:change_password": "rule:owner", | ||
1162 | 104 | "identity:check_token": "rule:admin_or_owner", | ||
1163 | 105 | "identity:validate_token": "rule:service_admin_or_owner", | ||
1164 | 106 | "identity:validate_token_head": "rule:service_or_admin", | ||
1165 | 107 | "identity:revocation_list": "rule:service_or_admin", | ||
1166 | 108 | "identity:revoke_token": "rule:admin_or_owner", | ||
1167 | 109 | |||
1168 | 110 | "identity:create_trust": "user_id:%(trust.trustor_user_id)s", | ||
1169 | 111 | "identity:list_trusts": "", | ||
1170 | 112 | "identity:list_roles_for_trust": "", | ||
1171 | 113 | "identity:get_role_for_trust": "", | ||
1172 | 114 | "identity:delete_trust": "", | ||
1173 | 115 | |||
1174 | 116 | "identity:create_consumer": "rule:admin_required", | ||
1175 | 117 | "identity:get_consumer": "rule:admin_required", | ||
1176 | 118 | "identity:list_consumers": "rule:admin_required", | ||
1177 | 119 | "identity:delete_consumer": "rule:admin_required", | ||
1178 | 120 | "identity:update_consumer": "rule:admin_required", | ||
1179 | 121 | |||
1180 | 122 | "identity:authorize_request_token": "rule:admin_required", | ||
1181 | 123 | "identity:list_access_token_roles": "rule:admin_required", | ||
1182 | 124 | "identity:get_access_token_role": "rule:admin_required", | ||
1183 | 125 | "identity:list_access_tokens": "rule:admin_required", | ||
1184 | 126 | "identity:get_access_token": "rule:admin_required", | ||
1185 | 127 | "identity:delete_access_token": "rule:admin_required", | ||
1186 | 128 | |||
1187 | 129 | "identity:list_projects_for_endpoint": "rule:admin_required", | ||
1188 | 130 | "identity:add_endpoint_to_project": "rule:admin_required", | ||
1189 | 131 | "identity:check_endpoint_in_project": "rule:admin_required", | ||
1190 | 132 | "identity:list_endpoints_for_project": "rule:admin_required", | ||
1191 | 133 | "identity:remove_endpoint_from_project": "rule:admin_required", | ||
1192 | 134 | |||
1193 | 135 | "identity:create_endpoint_group": "rule:admin_required", | ||
1194 | 136 | "identity:list_endpoint_groups": "rule:admin_required", | ||
1195 | 137 | "identity:get_endpoint_group": "rule:admin_required", | ||
1196 | 138 | "identity:update_endpoint_group": "rule:admin_required", | ||
1197 | 139 | "identity:delete_endpoint_group": "rule:admin_required", | ||
1198 | 140 | "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", | ||
1199 | 141 | "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", | ||
1200 | 142 | "identity:get_endpoint_group_in_project": "rule:admin_required", | ||
1201 | 143 | "identity:list_endpoint_groups_for_project": "rule:admin_required", | ||
1202 | 144 | "identity:add_endpoint_group_to_project": "rule:admin_required", | ||
1203 | 145 | "identity:remove_endpoint_group_from_project": "rule:admin_required", | ||
1204 | 146 | |||
1205 | 147 | "identity:create_identity_provider": "rule:cloud_admin", | ||
1206 | 148 | "identity:list_identity_providers": "rule:cloud_admin", | ||
1207 | 149 | "identity:get_identity_providers": "rule:cloud_admin", | ||
1208 | 150 | "identity:update_identity_provider": "rule:cloud_admin", | ||
1209 | 151 | "identity:delete_identity_provider": "rule:cloud_admin", | ||
1210 | 152 | |||
1211 | 153 | "identity:create_protocol": "rule:cloud_admin", | ||
1212 | 154 | "identity:update_protocol": "rule:cloud_admin", | ||
1213 | 155 | "identity:get_protocol": "rule:cloud_admin", | ||
1214 | 156 | "identity:list_protocols": "rule:cloud_admin", | ||
1215 | 157 | "identity:delete_protocol": "rule:cloud_admin", | ||
1216 | 158 | |||
1217 | 159 | "identity:create_mapping": "rule:cloud_admin", | ||
1218 | 160 | "identity:get_mapping": "rule:cloud_admin", | ||
1219 | 161 | "identity:list_mappings": "rule:cloud_admin", | ||
1220 | 162 | "identity:delete_mapping": "rule:cloud_admin", | ||
1221 | 163 | "identity:update_mapping": "rule:cloud_admin", | ||
1222 | 164 | |||
1223 | 165 | "identity:create_service_provider": "rule:cloud_admin", | ||
1224 | 166 | "identity:list_service_providers": "rule:cloud_admin", | ||
1225 | 167 | "identity:get_service_provider": "rule:cloud_admin", | ||
1226 | 168 | "identity:update_service_provider": "rule:cloud_admin", | ||
1227 | 169 | "identity:delete_service_provider": "rule:cloud_admin", | ||
1228 | 170 | |||
1229 | 171 | "identity:get_auth_catalog": "", | ||
1230 | 172 | "identity:get_auth_projects": "", | ||
1231 | 173 | "identity:get_auth_domains": "", | ||
1232 | 174 | |||
1233 | 175 | "identity:list_projects_for_groups": "", | ||
1234 | 176 | "identity:list_domains_for_groups": "", | ||
1235 | 177 | |||
1236 | 178 | "identity:list_revoke_events": "", | ||
1237 | 179 | |||
1238 | 180 | "identity:create_policy_association_for_endpoint": "rule:cloud_admin", | ||
1239 | 181 | "identity:check_policy_association_for_endpoint": "rule:cloud_admin", | ||
1240 | 182 | "identity:delete_policy_association_for_endpoint": "rule:cloud_admin", | ||
1241 | 183 | "identity:create_policy_association_for_service": "rule:cloud_admin", | ||
1242 | 184 | "identity:check_policy_association_for_service": "rule:cloud_admin", | ||
1243 | 185 | "identity:delete_policy_association_for_service": "rule:cloud_admin", | ||
1244 | 186 | "identity:create_policy_association_for_region_and_service": "rule:cloud_admin", | ||
1245 | 187 | "identity:check_policy_association_for_region_and_service": "rule:cloud_admin", | ||
1246 | 188 | "identity:delete_policy_association_for_region_and_service": "rule:cloud_admin", | ||
1247 | 189 | "identity:get_policy_for_endpoint": "rule:cloud_admin", | ||
1248 | 190 | "identity:list_endpoints_for_policy": "rule:cloud_admin", | ||
1249 | 191 | |||
1250 | 192 | "identity:create_domain_config": "rule:cloud_admin", | ||
1251 | 193 | "identity:get_domain_config": "rule:cloud_admin", | ||
1252 | 194 | "identity:update_domain_config": "rule:cloud_admin", | ||
1253 | 195 | "identity:delete_domain_config": "rule:cloud_admin" | ||
1254 | 196 | } | ||
1255 | 197 | {% else -%} | ||
1256 | 198 | { | ||
1257 | 199 | "admin_required": "role:admin or is_admin:1", | ||
1258 | 200 | "service_role": "role:service", | ||
1259 | 201 | "service_or_admin": "rule:admin_required or rule:service_role", | ||
1260 | 202 | "owner" : "user_id:%(user_id)s", | ||
1261 | 203 | "admin_or_owner": "rule:admin_required or rule:owner", | ||
1262 | 204 | "token_subject": "user_id:%(target.token.user_id)s", | ||
1263 | 205 | "admin_or_token_subject": "rule:admin_required or rule:token_subject", | ||
1264 | 206 | "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", | ||
1265 | 207 | |||
1266 | 208 | "default": "rule:admin_required", | ||
1267 | 209 | |||
1268 | 210 | "identity:get_region": "", | ||
1269 | 211 | "identity:list_regions": "", | ||
1270 | 212 | "identity:create_region": "rule:admin_required", | ||
1271 | 213 | "identity:update_region": "rule:admin_required", | ||
1272 | 214 | "identity:delete_region": "rule:admin_required", | ||
1273 | 215 | |||
1274 | 216 | "identity:get_service": "rule:admin_required", | ||
1275 | 217 | "identity:list_services": "rule:admin_required", | ||
1276 | 218 | "identity:create_service": "rule:admin_required", | ||
1277 | 219 | "identity:update_service": "rule:admin_required", | ||
1278 | 220 | "identity:delete_service": "rule:admin_required", | ||
1279 | 221 | |||
1280 | 222 | "identity:get_endpoint": "rule:admin_required", | ||
1281 | 223 | "identity:list_endpoints": "rule:admin_required", | ||
1282 | 224 | "identity:create_endpoint": "rule:admin_required", | ||
1283 | 225 | "identity:update_endpoint": "rule:admin_required", | ||
1284 | 226 | "identity:delete_endpoint": "rule:admin_required", | ||
1285 | 227 | |||
1286 | 228 | "identity:get_domain": "rule:admin_required", | ||
1287 | 229 | "identity:list_domains": "rule:admin_required", | ||
1288 | 230 | "identity:create_domain": "rule:admin_required", | ||
1289 | 231 | "identity:update_domain": "rule:admin_required", | ||
1290 | 232 | "identity:delete_domain": "rule:admin_required", | ||
1291 | 233 | |||
1292 | 234 | "identity:get_project": "rule:admin_required", | ||
1293 | 235 | "identity:list_projects": "rule:admin_required", | ||
1294 | 236 | "identity:list_user_projects": "rule:admin_or_owner", | ||
1295 | 237 | "identity:create_project": "rule:admin_required", | ||
1296 | 238 | "identity:update_project": "rule:admin_required", | ||
1297 | 239 | "identity:delete_project": "rule:admin_required", | ||
1298 | 240 | |||
1299 | 241 | "identity:get_user": "rule:admin_required", | ||
1300 | 242 | "identity:list_users": "rule:admin_required", | ||
1301 | 243 | "identity:create_user": "rule:admin_required", | ||
1302 | 244 | "identity:update_user": "rule:admin_required", | ||
1303 | 245 | "identity:delete_user": "rule:admin_required", | ||
1304 | 246 | "identity:change_password": "rule:admin_or_owner", | ||
1305 | 247 | |||
1306 | 248 | "identity:get_group": "rule:admin_required", | ||
1307 | 249 | "identity:list_groups": "rule:admin_required", | ||
1308 | 250 | "identity:list_groups_for_user": "rule:admin_or_owner", | ||
1309 | 251 | "identity:create_group": "rule:admin_required", | ||
1310 | 252 | "identity:update_group": "rule:admin_required", | ||
1311 | 253 | "identity:delete_group": "rule:admin_required", | ||
1312 | 254 | "identity:list_users_in_group": "rule:admin_required", | ||
1313 | 255 | "identity:remove_user_from_group": "rule:admin_required", | ||
1314 | 256 | "identity:check_user_in_group": "rule:admin_required", | ||
1315 | 257 | "identity:add_user_to_group": "rule:admin_required", | ||
1316 | 258 | |||
1317 | 259 | "identity:get_credential": "rule:admin_required", | ||
1318 | 260 | "identity:list_credentials": "rule:admin_required", | ||
1319 | 261 | "identity:create_credential": "rule:admin_required", | ||
1320 | 262 | "identity:update_credential": "rule:admin_required", | ||
1321 | 263 | "identity:delete_credential": "rule:admin_required", | ||
1322 | 264 | |||
1323 | 265 | "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1324 | 266 | "identity:ec2_list_credentials": "rule:admin_or_owner", | ||
1325 | 267 | "identity:ec2_create_credential": "rule:admin_or_owner", | ||
1326 | 268 | "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1327 | 269 | |||
1328 | 270 | "identity:get_role": "rule:admin_required", | ||
1329 | 271 | "identity:list_roles": "rule:admin_required", | ||
1330 | 272 | "identity:create_role": "rule:admin_required", | ||
1331 | 273 | "identity:update_role": "rule:admin_required", | ||
1332 | 274 | "identity:delete_role": "rule:admin_required", | ||
1333 | 275 | |||
1334 | 276 | "identity:check_grant": "rule:admin_required", | ||
1335 | 277 | "identity:list_grants": "rule:admin_required", | ||
1336 | 278 | "identity:create_grant": "rule:admin_required", | ||
1337 | 279 | "identity:revoke_grant": "rule:admin_required", | ||
1338 | 280 | |||
1339 | 281 | "identity:list_role_assignments": "rule:admin_required", | ||
1340 | 282 | |||
1341 | 283 | "identity:get_policy": "rule:admin_required", | ||
1342 | 284 | "identity:list_policies": "rule:admin_required", | ||
1343 | 285 | "identity:create_policy": "rule:admin_required", | ||
1344 | 286 | "identity:update_policy": "rule:admin_required", | ||
1345 | 287 | "identity:delete_policy": "rule:admin_required", | ||
1346 | 288 | |||
1347 | 289 | "identity:check_token": "rule:admin_or_token_subject", | ||
1348 | 290 | "identity:validate_token": "rule:service_admin_or_token_subject", | ||
1349 | 291 | "identity:validate_token_head": "rule:service_or_admin", | ||
1350 | 292 | "identity:revocation_list": "rule:service_or_admin", | ||
1351 | 293 | "identity:revoke_token": "rule:admin_or_token_subject", | ||
1352 | 294 | |||
1353 | 295 | "identity:create_trust": "user_id:%(trust.trustor_user_id)s", | ||
1354 | 296 | "identity:list_trusts": "", | ||
1355 | 297 | "identity:list_roles_for_trust": "", | ||
1356 | 298 | "identity:get_role_for_trust": "", | ||
1357 | 299 | "identity:delete_trust": "", | ||
1358 | 300 | |||
1359 | 301 | "identity:create_consumer": "rule:admin_required", | ||
1360 | 302 | "identity:get_consumer": "rule:admin_required", | ||
1361 | 303 | "identity:list_consumers": "rule:admin_required", | ||
1362 | 304 | "identity:delete_consumer": "rule:admin_required", | ||
1363 | 305 | "identity:update_consumer": "rule:admin_required", | ||
1364 | 306 | |||
1365 | 307 | "identity:authorize_request_token": "rule:admin_required", | ||
1366 | 308 | "identity:list_access_token_roles": "rule:admin_required", | ||
1367 | 309 | "identity:get_access_token_role": "rule:admin_required", | ||
1368 | 310 | "identity:list_access_tokens": "rule:admin_required", | ||
1369 | 311 | "identity:get_access_token": "rule:admin_required", | ||
1370 | 312 | "identity:delete_access_token": "rule:admin_required", | ||
1371 | 313 | |||
1372 | 314 | "identity:list_projects_for_endpoint": "rule:admin_required", | ||
1373 | 315 | "identity:add_endpoint_to_project": "rule:admin_required", | ||
1374 | 316 | "identity:check_endpoint_in_project": "rule:admin_required", | ||
1375 | 317 | "identity:list_endpoints_for_project": "rule:admin_required", | ||
1376 | 318 | "identity:remove_endpoint_from_project": "rule:admin_required", | ||
1377 | 319 | |||
1378 | 320 | "identity:create_endpoint_group": "rule:admin_required", | ||
1379 | 321 | "identity:list_endpoint_groups": "rule:admin_required", | ||
1380 | 322 | "identity:get_endpoint_group": "rule:admin_required", | ||
1381 | 323 | "identity:update_endpoint_group": "rule:admin_required", | ||
1382 | 324 | "identity:delete_endpoint_group": "rule:admin_required", | ||
1383 | 325 | "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", | ||
1384 | 326 | "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", | ||
1385 | 327 | "identity:get_endpoint_group_in_project": "rule:admin_required", | ||
1386 | 328 | "identity:list_endpoint_groups_for_project": "rule:admin_required", | ||
1387 | 329 | "identity:add_endpoint_group_to_project": "rule:admin_required", | ||
1388 | 330 | "identity:remove_endpoint_group_from_project": "rule:admin_required", | ||
1389 | 331 | |||
1390 | 332 | "identity:create_identity_provider": "rule:admin_required", | ||
1391 | 333 | "identity:list_identity_providers": "rule:admin_required", | ||
1392 | 334 | "identity:get_identity_providers": "rule:admin_required", | ||
1393 | 335 | "identity:update_identity_provider": "rule:admin_required", | ||
1394 | 336 | "identity:delete_identity_provider": "rule:admin_required", | ||
1395 | 337 | |||
1396 | 338 | "identity:create_protocol": "rule:admin_required", | ||
1397 | 339 | "identity:update_protocol": "rule:admin_required", | ||
1398 | 340 | "identity:get_protocol": "rule:admin_required", | ||
1399 | 341 | "identity:list_protocols": "rule:admin_required", | ||
1400 | 342 | "identity:delete_protocol": "rule:admin_required", | ||
1401 | 343 | |||
1402 | 344 | "identity:create_mapping": "rule:admin_required", | ||
1403 | 345 | "identity:get_mapping": "rule:admin_required", | ||
1404 | 346 | "identity:list_mappings": "rule:admin_required", | ||
1405 | 347 | "identity:delete_mapping": "rule:admin_required", | ||
1406 | 348 | "identity:update_mapping": "rule:admin_required", | ||
1407 | 349 | |||
1408 | 350 | "identity:create_service_provider": "rule:admin_required", | ||
1409 | 351 | "identity:list_service_providers": "rule:admin_required", | ||
1410 | 352 | "identity:get_service_provider": "rule:admin_required", | ||
1411 | 353 | "identity:update_service_provider": "rule:admin_required", | ||
1412 | 354 | "identity:delete_service_provider": "rule:admin_required", | ||
1413 | 355 | |||
1414 | 356 | "identity:get_auth_catalog": "", | ||
1415 | 357 | "identity:get_auth_projects": "", | ||
1416 | 358 | "identity:get_auth_domains": "", | ||
1417 | 359 | |||
1418 | 360 | "identity:list_projects_for_groups": "", | ||
1419 | 361 | "identity:list_domains_for_groups": "", | ||
1420 | 362 | |||
1421 | 363 | "identity:list_revoke_events": "", | ||
1422 | 364 | |||
1423 | 365 | "identity:create_policy_association_for_endpoint": "rule:admin_required", | ||
1424 | 366 | "identity:check_policy_association_for_endpoint": "rule:admin_required", | ||
1425 | 367 | "identity:delete_policy_association_for_endpoint": "rule:admin_required", | ||
1426 | 368 | "identity:create_policy_association_for_service": "rule:admin_required", | ||
1427 | 369 | "identity:check_policy_association_for_service": "rule:admin_required", | ||
1428 | 370 | "identity:delete_policy_association_for_service": "rule:admin_required", | ||
1429 | 371 | "identity:create_policy_association_for_region_and_service": "rule:admin_required", | ||
1430 | 372 | "identity:check_policy_association_for_region_and_service": "rule:admin_required", | ||
1431 | 373 | "identity:delete_policy_association_for_region_and_service": "rule:admin_required", | ||
1432 | 374 | "identity:get_policy_for_endpoint": "rule:admin_required", | ||
1433 | 375 | "identity:list_endpoints_for_policy": "rule:admin_required", | ||
1434 | 376 | |||
1435 | 377 | "identity:create_domain_config": "rule:admin_required", | ||
1436 | 378 | "identity:get_domain_config": "rule:admin_required", | ||
1437 | 379 | "identity:update_domain_config": "rule:admin_required", | ||
1438 | 380 | "identity:delete_domain_config": "rule:admin_required" | ||
1439 | 381 | } | ||
1440 | 382 | {% endif -%} | ||
1441 | 0 | 383 | ||
1442 | === added file 'templates/liberty/policy.json.v2' | |||
1443 | --- templates/liberty/policy.json.v2 1970-01-01 00:00:00 +0000 | |||
1444 | +++ templates/liberty/policy.json.v2 2016-03-05 15:43:48 +0000 | |||
1445 | @@ -0,0 +1,184 @@ | |||
1446 | 1 | { | ||
1447 | 2 | "admin_required": "role:admin or is_admin:1", | ||
1448 | 3 | "service_role": "role:service", | ||
1449 | 4 | "service_or_admin": "rule:admin_required or rule:service_role", | ||
1450 | 5 | "owner" : "user_id:%(user_id)s", | ||
1451 | 6 | "admin_or_owner": "rule:admin_required or rule:owner", | ||
1452 | 7 | "token_subject": "user_id:%(target.token.user_id)s", | ||
1453 | 8 | "admin_or_token_subject": "rule:admin_required or rule:token_subject", | ||
1454 | 9 | "service_admin_or_token_subject": "rule:service_or_admin or rule:token_subject", | ||
1455 | 10 | |||
1456 | 11 | "default": "rule:admin_required", | ||
1457 | 12 | |||
1458 | 13 | "identity:get_region": "", | ||
1459 | 14 | "identity:list_regions": "", | ||
1460 | 15 | "identity:create_region": "rule:admin_required", | ||
1461 | 16 | "identity:update_region": "rule:admin_required", | ||
1462 | 17 | "identity:delete_region": "rule:admin_required", | ||
1463 | 18 | |||
1464 | 19 | "identity:get_service": "rule:admin_required", | ||
1465 | 20 | "identity:list_services": "rule:admin_required", | ||
1466 | 21 | "identity:create_service": "rule:admin_required", | ||
1467 | 22 | "identity:update_service": "rule:admin_required", | ||
1468 | 23 | "identity:delete_service": "rule:admin_required", | ||
1469 | 24 | |||
1470 | 25 | "identity:get_endpoint": "rule:admin_required", | ||
1471 | 26 | "identity:list_endpoints": "rule:admin_required", | ||
1472 | 27 | "identity:create_endpoint": "rule:admin_required", | ||
1473 | 28 | "identity:update_endpoint": "rule:admin_required", | ||
1474 | 29 | "identity:delete_endpoint": "rule:admin_required", | ||
1475 | 30 | |||
1476 | 31 | "identity:get_domain": "rule:admin_required", | ||
1477 | 32 | "identity:list_domains": "rule:admin_required", | ||
1478 | 33 | "identity:create_domain": "rule:admin_required", | ||
1479 | 34 | "identity:update_domain": "rule:admin_required", | ||
1480 | 35 | "identity:delete_domain": "rule:admin_required", | ||
1481 | 36 | |||
1482 | 37 | "identity:get_project": "rule:admin_required", | ||
1483 | 38 | "identity:list_projects": "rule:admin_required", | ||
1484 | 39 | "identity:list_user_projects": "rule:admin_or_owner", | ||
1485 | 40 | "identity:create_project": "rule:admin_required", | ||
1486 | 41 | "identity:update_project": "rule:admin_required", | ||
1487 | 42 | "identity:delete_project": "rule:admin_required", | ||
1488 | 43 | |||
1489 | 44 | "identity:get_user": "rule:admin_required", | ||
1490 | 45 | "identity:list_users": "rule:admin_required", | ||
1491 | 46 | "identity:create_user": "rule:admin_required", | ||
1492 | 47 | "identity:update_user": "rule:admin_required", | ||
1493 | 48 | "identity:delete_user": "rule:admin_required", | ||
1494 | 49 | "identity:change_password": "rule:admin_or_owner", | ||
1495 | 50 | |||
1496 | 51 | "identity:get_group": "rule:admin_required", | ||
1497 | 52 | "identity:list_groups": "rule:admin_required", | ||
1498 | 53 | "identity:list_groups_for_user": "rule:admin_or_owner", | ||
1499 | 54 | "identity:create_group": "rule:admin_required", | ||
1500 | 55 | "identity:update_group": "rule:admin_required", | ||
1501 | 56 | "identity:delete_group": "rule:admin_required", | ||
1502 | 57 | "identity:list_users_in_group": "rule:admin_required", | ||
1503 | 58 | "identity:remove_user_from_group": "rule:admin_required", | ||
1504 | 59 | "identity:check_user_in_group": "rule:admin_required", | ||
1505 | 60 | "identity:add_user_to_group": "rule:admin_required", | ||
1506 | 61 | |||
1507 | 62 | "identity:get_credential": "rule:admin_required", | ||
1508 | 63 | "identity:list_credentials": "rule:admin_required", | ||
1509 | 64 | "identity:create_credential": "rule:admin_required", | ||
1510 | 65 | "identity:update_credential": "rule:admin_required", | ||
1511 | 66 | "identity:delete_credential": "rule:admin_required", | ||
1512 | 67 | |||
1513 | 68 | "identity:ec2_get_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1514 | 69 | "identity:ec2_list_credentials": "rule:admin_or_owner", | ||
1515 | 70 | "identity:ec2_create_credential": "rule:admin_or_owner", | ||
1516 | 71 | "identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)", | ||
1517 | 72 | |||
1518 | 73 | "identity:get_role": "rule:admin_required", | ||
1519 | 74 | "identity:list_roles": "rule:admin_required", | ||
1520 | 75 | "identity:create_role": "rule:admin_required", | ||
1521 | 76 | "identity:update_role": "rule:admin_required", | ||
1522 | 77 | "identity:delete_role": "rule:admin_required", | ||
1523 | 78 | |||
1524 | 79 | "identity:check_grant": "rule:admin_required", | ||
1525 | 80 | "identity:list_grants": "rule:admin_required", | ||
1526 | 81 | "identity:create_grant": "rule:admin_required", | ||
1527 | 82 | "identity:revoke_grant": "rule:admin_required", | ||
1528 | 83 | |||
1529 | 84 | "identity:list_role_assignments": "rule:admin_required", | ||
1530 | 85 | |||
1531 | 86 | "identity:get_policy": "rule:admin_required", | ||
1532 | 87 | "identity:list_policies": "rule:admin_required", | ||
1533 | 88 | "identity:create_policy": "rule:admin_required", | ||
1534 | 89 | "identity:update_policy": "rule:admin_required", | ||
1535 | 90 | "identity:delete_policy": "rule:admin_required", | ||
1536 | 91 | |||
1537 | 92 | "identity:check_token": "rule:admin_or_token_subject", | ||
1538 | 93 | "identity:validate_token": "rule:service_admin_or_token_subject", | ||
1539 | 94 | "identity:validate_token_head": "rule:service_or_admin", | ||
1540 | 95 | "identity:revocation_list": "rule:service_or_admin", | ||
1541 | 96 | "identity:revoke_token": "rule:admin_or_token_subject", | ||
1542 | 97 | |||
1543 | 98 | "identity:create_trust": "user_id:%(trust.trustor_user_id)s", | ||
1544 | 99 | "identity:list_trusts": "", | ||
1545 | 100 | "identity:list_roles_for_trust": "", | ||
1546 | 101 | "identity:get_role_for_trust": "", | ||
1547 | 102 | "identity:delete_trust": "", | ||
1548 | 103 | |||
1549 | 104 | "identity:create_consumer": "rule:admin_required", | ||
1550 | 105 | "identity:get_consumer": "rule:admin_required", | ||
1551 | 106 | "identity:list_consumers": "rule:admin_required", | ||
1552 | 107 | "identity:delete_consumer": "rule:admin_required", | ||
1553 | 108 | "identity:update_consumer": "rule:admin_required", | ||
1554 | 109 | |||
1555 | 110 | "identity:authorize_request_token": "rule:admin_required", | ||
1556 | 111 | "identity:list_access_token_roles": "rule:admin_required", | ||
1557 | 112 | "identity:get_access_token_role": "rule:admin_required", | ||
1558 | 113 | "identity:list_access_tokens": "rule:admin_required", | ||
1559 | 114 | "identity:get_access_token": "rule:admin_required", | ||
1560 | 115 | "identity:delete_access_token": "rule:admin_required", | ||
1561 | 116 | |||
1562 | 117 | "identity:list_projects_for_endpoint": "rule:admin_required", | ||
1563 | 118 | "identity:add_endpoint_to_project": "rule:admin_required", | ||
1564 | 119 | "identity:check_endpoint_in_project": "rule:admin_required", | ||
1565 | 120 | "identity:list_endpoints_for_project": "rule:admin_required", | ||
1566 | 121 | "identity:remove_endpoint_from_project": "rule:admin_required", | ||
1567 | 122 | |||
1568 | 123 | "identity:create_endpoint_group": "rule:admin_required", | ||
1569 | 124 | "identity:list_endpoint_groups": "rule:admin_required", | ||
1570 | 125 | "identity:get_endpoint_group": "rule:admin_required", | ||
1571 | 126 | "identity:update_endpoint_group": "rule:admin_required", | ||
1572 | 127 | "identity:delete_endpoint_group": "rule:admin_required", | ||
1573 | 128 | "identity:list_projects_associated_with_endpoint_group": "rule:admin_required", | ||
1574 | 129 | "identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required", | ||
1575 | 130 | "identity:get_endpoint_group_in_project": "rule:admin_required", | ||
1576 | 131 | "identity:list_endpoint_groups_for_project": "rule:admin_required", | ||
1577 | 132 | "identity:add_endpoint_group_to_project": "rule:admin_required", | ||
1578 | 133 | "identity:remove_endpoint_group_from_project": "rule:admin_required", | ||
1579 | 134 | |||
1580 | 135 | "identity:create_identity_provider": "rule:admin_required", | ||
1581 | 136 | "identity:list_identity_providers": "rule:admin_required", | ||
1582 | 137 | "identity:get_identity_providers": "rule:admin_required", | ||
1583 | 138 | "identity:update_identity_provider": "rule:admin_required", | ||
1584 | 139 | "identity:delete_identity_provider": "rule:admin_required", | ||
1585 | 140 | |||
1586 | 141 | "identity:create_protocol": "rule:admin_required", | ||
1587 | 142 | "identity:update_protocol": "rule:admin_required", | ||
1588 | 143 | "identity:get_protocol": "rule:admin_required", | ||
1589 | 144 | "identity:list_protocols": "rule:admin_required", | ||
1590 | 145 | "identity:delete_protocol": "rule:admin_required", | ||
1591 | 146 | |||
1592 | 147 | "identity:create_mapping": "rule:admin_required", | ||
1593 | 148 | "identity:get_mapping": "rule:admin_required", | ||
1594 | 149 | "identity:list_mappings": "rule:admin_required", | ||
1595 | 150 | "identity:delete_mapping": "rule:admin_required", | ||
1596 | 151 | "identity:update_mapping": "rule:admin_required", | ||
1597 | 152 | |||
1598 | 153 | "identity:create_service_provider": "rule:admin_required", | ||
1599 | 154 | "identity:list_service_providers": "rule:admin_required", | ||
1600 | 155 | "identity:get_service_provider": "rule:admin_required", | ||
1601 | 156 | "identity:update_service_provider": "rule:admin_required", | ||
1602 | 157 | "identity:delete_service_provider": "rule:admin_required", | ||
1603 | 158 | |||
1604 | 159 | "identity:get_auth_catalog": "", | ||
1605 | 160 | "identity:get_auth_projects": "", | ||
1606 | 161 | "identity:get_auth_domains": "", | ||
1607 | 162 | |||
1608 | 163 | "identity:list_projects_for_groups": "", | ||
1609 | 164 | "identity:list_domains_for_groups": "", | ||
1610 | 165 | |||
1611 | 166 | "identity:list_revoke_events": "", | ||
1612 | 167 | |||
1613 | 168 | "identity:create_policy_association_for_endpoint": "rule:admin_required", | ||
1614 | 169 | "identity:check_policy_association_for_endpoint": "rule:admin_required", | ||
1615 | 170 | "identity:delete_policy_association_for_endpoint": "rule:admin_required", | ||
1616 | 171 | "identity:create_policy_association_for_service": "rule:admin_required", | ||
1617 | 172 | "identity:check_policy_association_for_service": "rule:admin_required", | ||
1618 | 173 | "identity:delete_policy_association_for_service": "rule:admin_required", | ||
1619 | 174 | "identity:create_policy_association_for_region_and_service": "rule:admin_required", | ||
1620 | 175 | "identity:check_policy_association_for_region_and_service": "rule:admin_required", | ||
1621 | 176 | "identity:delete_policy_association_for_region_and_service": "rule:admin_required", | ||
1622 | 177 | "identity:get_policy_for_endpoint": "rule:admin_required", | ||
1623 | 178 | "identity:list_endpoints_for_policy": "rule:admin_required", | ||
1624 | 179 | |||
1625 | 180 | "identity:create_domain_config": "rule:admin_required", | ||
1626 | 181 | "identity:get_domain_config": "rule:admin_required", | ||
1627 | 182 | "identity:update_domain_config": "rule:admin_required", | ||
1628 | 183 | "identity:delete_domain_config": "rule:admin_required" | ||
1629 | 184 | } | ||
1630 | 0 | 185 | ||
1631 | === modified file 'tests/basic_deployment.py' | |||
1632 | --- tests/basic_deployment.py 2016-01-13 21:33:59 +0000 | |||
1633 | +++ tests/basic_deployment.py 2016-03-05 15:43:48 +0000 | |||
1634 | @@ -17,6 +17,8 @@ | |||
1635 | 17 | DEBUG, | 17 | DEBUG, |
1636 | 18 | # ERROR | 18 | # ERROR |
1637 | 19 | ) | 19 | ) |
1638 | 20 | import keystoneclient | ||
1639 | 21 | from charmhelpers.core.decorators import retry_on_exception | ||
1640 | 20 | 22 | ||
1641 | 21 | # Use DEBUG to turn on debug logging | 23 | # Use DEBUG to turn on debug logging |
1642 | 22 | u = OpenStackAmuletUtils(DEBUG) | 24 | u = OpenStackAmuletUtils(DEBUG) |
1643 | @@ -30,6 +32,7 @@ | |||
1644 | 30 | """Deploy the entire test environment.""" | 32 | """Deploy the entire test environment.""" |
1645 | 31 | super(KeystoneBasicDeployment, self).__init__(series, openstack, | 33 | super(KeystoneBasicDeployment, self).__init__(series, openstack, |
1646 | 32 | source, stable) | 34 | source, stable) |
1647 | 35 | self.keystone_api_version = 2 | ||
1648 | 33 | self.git = git | 36 | self.git = git |
1649 | 34 | self._add_services() | 37 | self._add_services() |
1650 | 35 | self._add_relations() | 38 | self._add_relations() |
1651 | @@ -37,8 +40,8 @@ | |||
1652 | 37 | self._deploy() | 40 | self._deploy() |
1653 | 38 | 41 | ||
1654 | 39 | u.log.info('Waiting on extended status checks...') | 42 | u.log.info('Waiting on extended status checks...') |
1657 | 40 | exclude_services = ['mysql'] | 43 | self.exclude_services = ['mysql'] |
1658 | 41 | self._auto_wait_for_status(exclude_services=exclude_services) | 44 | self._auto_wait_for_status(exclude_services=self.exclude_services) |
1659 | 42 | 45 | ||
1660 | 43 | self._initialize_tests() | 46 | self._initialize_tests() |
1661 | 44 | 47 | ||
1662 | @@ -72,7 +75,8 @@ | |||
1663 | 72 | def _configure_services(self): | 75 | def _configure_services(self): |
1664 | 73 | """Configure all of the services.""" | 76 | """Configure all of the services.""" |
1665 | 74 | keystone_config = {'admin-password': 'openstack', | 77 | keystone_config = {'admin-password': 'openstack', |
1667 | 75 | 'admin-token': 'ubuntutesting'} | 78 | 'admin-token': 'ubuntutesting', |
1668 | 79 | 'preferred-api-version': self.keystone_api_version} | ||
1669 | 76 | if self.git: | 80 | if self.git: |
1670 | 77 | amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') | 81 | amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') |
1671 | 78 | 82 | ||
1672 | @@ -109,6 +113,103 @@ | |||
1673 | 109 | } | 113 | } |
1674 | 110 | super(KeystoneBasicDeployment, self)._configure_services(configs) | 114 | super(KeystoneBasicDeployment, self)._configure_services(configs) |
1675 | 111 | 115 | ||
1676 | 116 | @retry_on_exception(5, base_delay=10) | ||
1677 | 117 | def set_api_version(self, api_version): | ||
1678 | 118 | set_alternate = {'preferred-api-version': api_version} | ||
1679 | 119 | |||
1680 | 120 | # Make config change, check for service restarts | ||
1681 | 121 | u.log.debug('Setting preferred-api-version={}'.format(api_version)) | ||
1682 | 122 | self.d.configure('keystone', set_alternate) | ||
1683 | 123 | self.keystone_api_version = api_version | ||
1684 | 124 | client = self.get_keystone_client(api_version=api_version) | ||
1685 | 125 | # List an artefact that needs authorisation to check admin user | ||
1686 | 126 | # has been setup. If that is still in progess | ||
1687 | 127 | # keystoneclient.exceptions.Unauthorized will be thrown and caught by | ||
1688 | 128 | # @retry_on_exception | ||
1689 | 129 | if api_version == 2: | ||
1690 | 130 | client.tenants.list() | ||
1691 | 131 | self.keystone_v2 = self.get_keystone_client(api_version=2) | ||
1692 | 132 | else: | ||
1693 | 133 | client.projects.list() | ||
1694 | 134 | self.keystone_v3 = self.get_keystone_client(api_version=3) | ||
1695 | 135 | |||
1696 | 136 | def get_keystone_client(self, api_version=None): | ||
1697 | 137 | if api_version == 2: | ||
1698 | 138 | return u.authenticate_keystone_admin(self.keystone_sentry, | ||
1699 | 139 | user='admin', | ||
1700 | 140 | password='openstack', | ||
1701 | 141 | tenant='admin', | ||
1702 | 142 | api_version=api_version, | ||
1703 | 143 | keystone_ip=self.keystone_ip) | ||
1704 | 144 | else: | ||
1705 | 145 | return u.authenticate_keystone_admin(self.keystone_sentry, | ||
1706 | 146 | user='admin', | ||
1707 | 147 | password='openstack', | ||
1708 | 148 | api_version=api_version, | ||
1709 | 149 | keystone_ip=self.keystone_ip) | ||
1710 | 150 | |||
1711 | 151 | def create_users_v2(self): | ||
1712 | 152 | # Create a demo tenant/role/user | ||
1713 | 153 | self.demo_tenant = 'demoTenant' | ||
1714 | 154 | self.demo_role = 'demoRole' | ||
1715 | 155 | self.demo_user = 'demoUser' | ||
1716 | 156 | if not u.tenant_exists(self.keystone_v2, self.demo_tenant): | ||
1717 | 157 | tenant = self.keystone_v2.tenants.create( | ||
1718 | 158 | tenant_name=self.demo_tenant, | ||
1719 | 159 | description='demo tenant', | ||
1720 | 160 | enabled=True) | ||
1721 | 161 | self.keystone_v2.roles.create(name=self.demo_role) | ||
1722 | 162 | self.keystone_v2.users.create(name=self.demo_user, | ||
1723 | 163 | password='password', | ||
1724 | 164 | tenant_id=tenant.id, | ||
1725 | 165 | email='demo@demo.com') | ||
1726 | 166 | |||
1727 | 167 | # Authenticate keystone demo | ||
1728 | 168 | self.keystone_demo = u.authenticate_keystone_user( | ||
1729 | 169 | self.keystone_v2, user=self.demo_user, | ||
1730 | 170 | password='password', tenant=self.demo_tenant) | ||
1731 | 171 | |||
1732 | 172 | def create_users_v3(self): | ||
1733 | 173 | # Create a demo tenant/role/user | ||
1734 | 174 | self.demo_project = 'demoProject' | ||
1735 | 175 | self.demo_user_v3 = 'demoUserV3' | ||
1736 | 176 | self.demo_domain = 'demoDomain' | ||
1737 | 177 | try: | ||
1738 | 178 | domain = self.keystone_v3.domains.find(name=self.demo_domain) | ||
1739 | 179 | except keystoneclient.exceptions.NotFound: | ||
1740 | 180 | domain = self.keystone_v3.domains.create( | ||
1741 | 181 | self.demo_domain, | ||
1742 | 182 | description='Demo Domain', | ||
1743 | 183 | enabled=True | ||
1744 | 184 | ) | ||
1745 | 185 | |||
1746 | 186 | try: | ||
1747 | 187 | self.keystone_v3.projects.find(name=self.demo_project) | ||
1748 | 188 | except keystoneclient.exceptions.NotFound: | ||
1749 | 189 | self.keystone_v3.projects.create( | ||
1750 | 190 | self.demo_project, | ||
1751 | 191 | domain, | ||
1752 | 192 | description='Demo Project', | ||
1753 | 193 | enabled=True, | ||
1754 | 194 | ) | ||
1755 | 195 | |||
1756 | 196 | try: | ||
1757 | 197 | self.keystone_v3.roles.find(name=self.demo_role) | ||
1758 | 198 | except keystoneclient.exceptions.NotFound: | ||
1759 | 199 | self.keystone_v3.roles.create(name=self.demo_role) | ||
1760 | 200 | |||
1761 | 201 | try: | ||
1762 | 202 | self.keystone_v3.users.find(name=self.demo_user_v3) | ||
1763 | 203 | except keystoneclient.exceptions.NotFound: | ||
1764 | 204 | self.keystone_v3.users.create( | ||
1765 | 205 | self.demo_user_v3, | ||
1766 | 206 | domain=domain.id, | ||
1767 | 207 | project=self.demo_project, | ||
1768 | 208 | password='password', | ||
1769 | 209 | email='demov3@demo.com', | ||
1770 | 210 | description='Demo', | ||
1771 | 211 | enabled=True) | ||
1772 | 212 | |||
1773 | 112 | def _initialize_tests(self): | 213 | def _initialize_tests(self): |
1774 | 113 | """Perform final initialization before tests get run.""" | 214 | """Perform final initialization before tests get run.""" |
1775 | 114 | # Access the sentries for inspecting service units | 215 | # Access the sentries for inspecting service units |
1776 | @@ -119,31 +220,14 @@ | |||
1777 | 119 | self._get_openstack_release())) | 220 | self._get_openstack_release())) |
1778 | 120 | u.log.debug('openstack release str: {}'.format( | 221 | u.log.debug('openstack release str: {}'.format( |
1779 | 121 | self._get_openstack_release_string())) | 222 | self._get_openstack_release_string())) |
1781 | 122 | 223 | self.keystone_ip = self.keystone_sentry.relation( | |
1782 | 224 | 'shared-db', | ||
1783 | 225 | 'mysql:shared-db')['private-address'] | ||
1784 | 226 | self.set_api_version(2) | ||
1785 | 123 | # Authenticate keystone admin | 227 | # Authenticate keystone admin |
1809 | 124 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, | 228 | self.keystone_v2 = self.get_keystone_client(api_version=2) |
1810 | 125 | user='admin', | 229 | self.keystone_v3 = self.get_keystone_client(api_version=3) |
1811 | 126 | password='openstack', | 230 | self.create_users_v2() |
1789 | 127 | tenant='admin') | ||
1790 | 128 | |||
1791 | 129 | # Create a demo tenant/role/user | ||
1792 | 130 | self.demo_tenant = 'demoTenant' | ||
1793 | 131 | self.demo_role = 'demoRole' | ||
1794 | 132 | self.demo_user = 'demoUser' | ||
1795 | 133 | if not u.tenant_exists(self.keystone, self.demo_tenant): | ||
1796 | 134 | tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant, | ||
1797 | 135 | description='demo tenant', | ||
1798 | 136 | enabled=True) | ||
1799 | 137 | self.keystone.roles.create(name=self.demo_role) | ||
1800 | 138 | self.keystone.users.create(name=self.demo_user, | ||
1801 | 139 | password='password', | ||
1802 | 140 | tenant_id=tenant.id, | ||
1803 | 141 | email='demo@demo.com') | ||
1804 | 142 | |||
1805 | 143 | # Authenticate keystone demo | ||
1806 | 144 | self.keystone_demo = u.authenticate_keystone_user( | ||
1807 | 145 | self.keystone, user=self.demo_user, | ||
1808 | 146 | password='password', tenant=self.demo_tenant) | ||
1812 | 147 | 231 | ||
1813 | 148 | def test_100_services(self): | 232 | def test_100_services(self): |
1814 | 149 | """Verify the expected services are running on the corresponding | 233 | """Verify the expected services are running on the corresponding |
1815 | @@ -159,7 +243,7 @@ | |||
1816 | 159 | if ret: | 243 | if ret: |
1817 | 160 | amulet.raise_status(amulet.FAIL, msg=ret) | 244 | amulet.raise_status(amulet.FAIL, msg=ret) |
1818 | 161 | 245 | ||
1820 | 162 | def test_102_keystone_tenants(self): | 246 | def validate_keystone_tenants(self, client): |
1821 | 163 | """Verify all existing tenants.""" | 247 | """Verify all existing tenants.""" |
1822 | 164 | u.log.debug('Checking keystone tenants...') | 248 | u.log.debug('Checking keystone tenants...') |
1823 | 165 | expected = [ | 249 | expected = [ |
1824 | @@ -176,13 +260,20 @@ | |||
1825 | 176 | 'description': 'Created by Juju', | 260 | 'description': 'Created by Juju', |
1826 | 177 | 'id': u.not_null} | 261 | 'id': u.not_null} |
1827 | 178 | ] | 262 | ] |
1829 | 179 | actual = self.keystone.tenants.list() | 263 | if self.keystone_api_version == 2: |
1830 | 264 | actual = client.tenants.list() | ||
1831 | 265 | else: | ||
1832 | 266 | actual = client.projects.list() | ||
1833 | 180 | 267 | ||
1834 | 181 | ret = u.validate_tenant_data(expected, actual) | 268 | ret = u.validate_tenant_data(expected, actual) |
1835 | 182 | if ret: | 269 | if ret: |
1836 | 183 | amulet.raise_status(amulet.FAIL, msg=ret) | 270 | amulet.raise_status(amulet.FAIL, msg=ret) |
1837 | 184 | 271 | ||
1839 | 185 | def test_104_keystone_roles(self): | 272 | def test_102_keystone_tenants(self): |
1840 | 273 | self.set_api_version(2) | ||
1841 | 274 | self.validate_keystone_tenants(self.keystone_v2) | ||
1842 | 275 | |||
1843 | 276 | def validate_keystone_roles(self, client): | ||
1844 | 186 | """Verify all existing roles.""" | 277 | """Verify all existing roles.""" |
1845 | 187 | u.log.debug('Checking keystone roles...') | 278 | u.log.debug('Checking keystone roles...') |
1846 | 188 | expected = [ | 279 | expected = [ |
1847 | @@ -191,40 +282,113 @@ | |||
1848 | 191 | {'name': 'Admin', | 282 | {'name': 'Admin', |
1849 | 192 | 'id': u.not_null} | 283 | 'id': u.not_null} |
1850 | 193 | ] | 284 | ] |
1852 | 194 | actual = self.keystone.roles.list() | 285 | actual = client.roles.list() |
1853 | 195 | 286 | ||
1854 | 196 | ret = u.validate_role_data(expected, actual) | 287 | ret = u.validate_role_data(expected, actual) |
1855 | 197 | if ret: | 288 | if ret: |
1856 | 198 | amulet.raise_status(amulet.FAIL, msg=ret) | 289 | amulet.raise_status(amulet.FAIL, msg=ret) |
1857 | 199 | 290 | ||
1859 | 200 | def test_106_keystone_users(self): | 291 | def test_104_keystone_roles(self): |
1860 | 292 | self.set_api_version(2) | ||
1861 | 293 | self.validate_keystone_roles(self.keystone_v2) | ||
1862 | 294 | |||
1863 | 295 | def validate_keystone_users(self, client): | ||
1864 | 201 | """Verify all existing roles.""" | 296 | """Verify all existing roles.""" |
1865 | 202 | u.log.debug('Checking keystone users...') | 297 | u.log.debug('Checking keystone users...') |
1867 | 203 | expected = [ | 298 | base = [ |
1868 | 204 | {'name': 'demoUser', | 299 | {'name': 'demoUser', |
1869 | 205 | 'enabled': True, | 300 | 'enabled': True, |
1870 | 206 | 'tenantId': u.not_null, | ||
1871 | 207 | 'id': u.not_null, | 301 | 'id': u.not_null, |
1872 | 208 | 'email': 'demo@demo.com'}, | 302 | 'email': 'demo@demo.com'}, |
1873 | 209 | {'name': 'admin', | 303 | {'name': 'admin', |
1874 | 210 | 'enabled': True, | 304 | 'enabled': True, |
1875 | 211 | 'tenantId': u.not_null, | ||
1876 | 212 | 'id': u.not_null, | 305 | 'id': u.not_null, |
1877 | 213 | 'email': 'juju@localhost'}, | 306 | 'email': 'juju@localhost'}, |
1878 | 214 | {'name': 'cinder_cinderv2', | 307 | {'name': 'cinder_cinderv2', |
1879 | 215 | 'enabled': True, | 308 | 'enabled': True, |
1880 | 216 | 'tenantId': u.not_null, | ||
1881 | 217 | 'id': u.not_null, | 309 | 'id': u.not_null, |
1882 | 218 | 'email': u'juju@localhost'} | 310 | 'email': u'juju@localhost'} |
1883 | 219 | ] | 311 | ] |
1886 | 220 | actual = self.keystone.users.list() | 312 | expected = [] |
1887 | 221 | ret = u.validate_user_data(expected, actual) | 313 | for user_info in base: |
1888 | 314 | if self.keystone_api_version == 2: | ||
1889 | 315 | user_info['tenantId'] = u.not_null | ||
1890 | 316 | else: | ||
1891 | 317 | user_info['default_project_id'] = u.not_null | ||
1892 | 318 | expected.append(user_info) | ||
1893 | 319 | actual = client.users.list() | ||
1894 | 320 | ret = u.validate_user_data(expected, actual, | ||
1895 | 321 | api_version=self.keystone_api_version) | ||
1896 | 222 | if ret: | 322 | if ret: |
1897 | 223 | amulet.raise_status(amulet.FAIL, msg=ret) | 323 | amulet.raise_status(amulet.FAIL, msg=ret) |
1898 | 224 | 324 | ||
1900 | 225 | def test_108_service_catalog(self): | 325 | def test_106_keystone_users(self): |
1901 | 326 | self.set_api_version(2) | ||
1902 | 327 | self.validate_keystone_users(self.keystone_v2) | ||
1903 | 328 | |||
1904 | 329 | def is_liberty_or_newer(self): | ||
1905 | 330 | os_release = self._get_openstack_release_string() | ||
1906 | 331 | if os_release >= 'liberty': | ||
1907 | 332 | return True | ||
1908 | 333 | else: | ||
1909 | 334 | u.log.info('Skipping test, {} < liberty'.format(os_release)) | ||
1910 | 335 | return False | ||
1911 | 336 | |||
1912 | 337 | def test_112_keystone_tenants(self): | ||
1913 | 338 | if self.is_liberty_or_newer(): | ||
1914 | 339 | self.set_api_version(3) | ||
1915 | 340 | self.validate_keystone_tenants(self.keystone_v3) | ||
1916 | 341 | |||
1917 | 342 | def test_114_keystone_tenants(self): | ||
1918 | 343 | if self.is_liberty_or_newer(): | ||
1919 | 344 | self.set_api_version(3) | ||
1920 | 345 | self.validate_keystone_roles(self.keystone_v3) | ||
1921 | 346 | |||
1922 | 347 | def test_116_keystone_users(self): | ||
1923 | 348 | if self.is_liberty_or_newer(): | ||
1924 | 349 | self.set_api_version(3) | ||
1925 | 350 | self.validate_keystone_users(self.keystone_v3) | ||
1926 | 351 | |||
1927 | 352 | def test_118_keystone_users(self): | ||
1928 | 353 | if self.is_liberty_or_newer(): | ||
1929 | 354 | self.set_api_version(3) | ||
1930 | 355 | self.create_users_v3() | ||
1931 | 356 | actual_user = self.keystone_v3.users.find(name=self.demo_user_v3) | ||
1932 | 357 | expect = { | ||
1933 | 358 | 'default_project_id': self.demo_project, | ||
1934 | 359 | 'email': 'demov3@demo.com', | ||
1935 | 360 | 'name': self.demo_user_v3, | ||
1936 | 361 | } | ||
1937 | 362 | for key in expect.keys(): | ||
1938 | 363 | u.log.debug('Checking user {} {} is {}'.format( | ||
1939 | 364 | self.demo_user_v3, | ||
1940 | 365 | key, | ||
1941 | 366 | expect[key]) | ||
1942 | 367 | ) | ||
1943 | 368 | assert expect[key] == getattr(actual_user, key) | ||
1944 | 369 | |||
1945 | 370 | def test_120_keystone_domains(self): | ||
1946 | 371 | if self.is_liberty_or_newer(): | ||
1947 | 372 | self.set_api_version(3) | ||
1948 | 373 | self.create_users_v3() | ||
1949 | 374 | actual_domain = self.keystone_v3.domains.find( | ||
1950 | 375 | name=self.demo_domain | ||
1951 | 376 | ) | ||
1952 | 377 | expect = { | ||
1953 | 378 | 'name': self.demo_domain, | ||
1954 | 379 | } | ||
1955 | 380 | for key in expect.keys(): | ||
1956 | 381 | u.log.debug('Checking domain {} {} is {}'.format( | ||
1957 | 382 | self.demo_domain, | ||
1958 | 383 | key, | ||
1959 | 384 | expect[key]) | ||
1960 | 385 | ) | ||
1961 | 386 | assert expect[key] == getattr(actual_domain, key) | ||
1962 | 387 | |||
1963 | 388 | def test_138_service_catalog(self): | ||
1964 | 226 | """Verify that the service catalog endpoint data is valid.""" | 389 | """Verify that the service catalog endpoint data is valid.""" |
1965 | 227 | u.log.debug('Checking keystone service catalog...') | 390 | u.log.debug('Checking keystone service catalog...') |
1966 | 391 | self.set_api_version(2) | ||
1967 | 228 | endpoint_check = { | 392 | endpoint_check = { |
1968 | 229 | 'adminURL': u.valid_url, | 393 | 'adminURL': u.valid_url, |
1969 | 230 | 'id': u.not_null, | 394 | 'id': u.not_null, |
1970 | @@ -236,16 +400,16 @@ | |||
1971 | 236 | 'volume': [endpoint_check], | 400 | 'volume': [endpoint_check], |
1972 | 237 | 'identity': [endpoint_check] | 401 | 'identity': [endpoint_check] |
1973 | 238 | } | 402 | } |
1975 | 239 | actual = self.keystone.service_catalog.get_endpoints() | 403 | actual = self.keystone_v2.service_catalog.get_endpoints() |
1976 | 240 | 404 | ||
1977 | 241 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) | 405 | ret = u.validate_svc_catalog_endpoint_data(expected, actual) |
1978 | 242 | if ret: | 406 | if ret: |
1979 | 243 | amulet.raise_status(amulet.FAIL, msg=ret) | 407 | amulet.raise_status(amulet.FAIL, msg=ret) |
1980 | 244 | 408 | ||
1982 | 245 | def test_110_keystone_endpoint(self): | 409 | def test_140_keystone_endpoint(self): |
1983 | 246 | """Verify the keystone endpoint data.""" | 410 | """Verify the keystone endpoint data.""" |
1984 | 247 | u.log.debug('Checking keystone api endpoint data...') | 411 | u.log.debug('Checking keystone api endpoint data...') |
1986 | 248 | endpoints = self.keystone.endpoints.list() | 412 | endpoints = self.keystone_v2.endpoints.list() |
1987 | 249 | admin_port = '35357' | 413 | admin_port = '35357' |
1988 | 250 | internal_port = public_port = '5000' | 414 | internal_port = public_port = '5000' |
1989 | 251 | expected = { | 415 | expected = { |
1990 | @@ -262,10 +426,10 @@ | |||
1991 | 262 | amulet.raise_status(amulet.FAIL, | 426 | amulet.raise_status(amulet.FAIL, |
1992 | 263 | msg='keystone endpoint: {}'.format(ret)) | 427 | msg='keystone endpoint: {}'.format(ret)) |
1993 | 264 | 428 | ||
1995 | 265 | def test_112_cinder_endpoint(self): | 429 | def test_142_cinder_endpoint(self): |
1996 | 266 | """Verify the cinder endpoint data.""" | 430 | """Verify the cinder endpoint data.""" |
1997 | 267 | u.log.debug('Checking cinder endpoint...') | 431 | u.log.debug('Checking cinder endpoint...') |
1999 | 268 | endpoints = self.keystone.endpoints.list() | 432 | endpoints = self.keystone_v2.endpoints.list() |
2000 | 269 | admin_port = internal_port = public_port = '8776' | 433 | admin_port = internal_port = public_port = '8776' |
2001 | 270 | expected = { | 434 | expected = { |
2002 | 271 | 'id': u.not_null, | 435 | 'id': u.not_null, |
2003 | 272 | 436 | ||
2004 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
2005 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-01-04 21:27:51 +0000 | |||
2006 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2016-03-05 15:43:48 +0000 | |||
2007 | @@ -27,6 +27,10 @@ | |||
2008 | 27 | import glanceclient.v1.client as glance_client | 27 | import glanceclient.v1.client as glance_client |
2009 | 28 | import heatclient.v1.client as heat_client | 28 | import heatclient.v1.client as heat_client |
2010 | 29 | import keystoneclient.v2_0 as keystone_client | 29 | import keystoneclient.v2_0 as keystone_client |
2011 | 30 | from keystoneclient.auth.identity import v3 as keystone_id_v3 | ||
2012 | 31 | from keystoneclient import session as keystone_session | ||
2013 | 32 | from keystoneclient.v3 import client as keystone_client_v3 | ||
2014 | 33 | |||
2015 | 30 | import novaclient.v1_1.client as nova_client | 34 | import novaclient.v1_1.client as nova_client |
2016 | 31 | import pika | 35 | import pika |
2017 | 32 | import swiftclient | 36 | import swiftclient |
2018 | @@ -139,7 +143,7 @@ | |||
2019 | 139 | return "role {} does not exist".format(e['name']) | 143 | return "role {} does not exist".format(e['name']) |
2020 | 140 | return ret | 144 | return ret |
2021 | 141 | 145 | ||
2023 | 142 | def validate_user_data(self, expected, actual): | 146 | def validate_user_data(self, expected, actual, api_version=None): |
2024 | 143 | """Validate user data. | 147 | """Validate user data. |
2025 | 144 | 148 | ||
2026 | 145 | Validate a list of actual user data vs a list of expected user | 149 | Validate a list of actual user data vs a list of expected user |
2027 | @@ -150,10 +154,14 @@ | |||
2028 | 150 | for e in expected: | 154 | for e in expected: |
2029 | 151 | found = False | 155 | found = False |
2030 | 152 | for act in actual: | 156 | for act in actual: |
2035 | 153 | a = {'enabled': act.enabled, 'name': act.name, | 157 | if e['name'] == act.name: |
2036 | 154 | 'email': act.email, 'tenantId': act.tenantId, | 158 | a = {'enabled': act.enabled, 'name': act.name, |
2037 | 155 | 'id': act.id} | 159 | 'email': act.email, 'id': act.id} |
2038 | 156 | if e['name'] == a['name']: | 160 | if api_version == 2: |
2039 | 161 | a['tenantId'] = act.tenantId | ||
2040 | 162 | else: | ||
2041 | 163 | a['default_project_id'] = getattr(act, | ||
2042 | 164 | 'default_project_id', 'none') | ||
2043 | 157 | found = True | 165 | found = True |
2044 | 158 | ret = self._validate_dict_data(e, a) | 166 | ret = self._validate_dict_data(e, a) |
2045 | 159 | if ret: | 167 | if ret: |
2046 | @@ -188,15 +196,30 @@ | |||
2047 | 188 | return cinder_client.Client(username, password, tenant, ept) | 196 | return cinder_client.Client(username, password, tenant, ept) |
2048 | 189 | 197 | ||
2049 | 190 | def authenticate_keystone_admin(self, keystone_sentry, user, password, | 198 | def authenticate_keystone_admin(self, keystone_sentry, user, password, |
2051 | 191 | tenant): | 199 | tenant=None, api_version=None, |
2052 | 200 | keystone_ip=None): | ||
2053 | 192 | """Authenticates admin user with the keystone admin endpoint.""" | 201 | """Authenticates admin user with the keystone admin endpoint.""" |
2054 | 193 | self.log.debug('Authenticating keystone admin...') | 202 | self.log.debug('Authenticating keystone admin...') |
2055 | 194 | unit = keystone_sentry | 203 | unit = keystone_sentry |
2061 | 195 | service_ip = unit.relation('shared-db', | 204 | if not keystone_ip: |
2062 | 196 | 'mysql:shared-db')['private-address'] | 205 | keystone_ip = unit.relation('shared-db', |
2063 | 197 | ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) | 206 | 'mysql:shared-db')['private-address'] |
2064 | 198 | return keystone_client.Client(username=user, password=password, | 207 | base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8')) |
2065 | 199 | tenant_name=tenant, auth_url=ep) | 208 | if not api_version or api_version == 2: |
2066 | 209 | ep = base_ep + "/v2.0" | ||
2067 | 210 | return keystone_client.Client(username=user, password=password, | ||
2068 | 211 | tenant_name=tenant, auth_url=ep) | ||
2069 | 212 | else: | ||
2070 | 213 | ep = base_ep + "/v3" | ||
2071 | 214 | auth = keystone_id_v3.Password( | ||
2072 | 215 | user_domain_name='admin_domain', | ||
2073 | 216 | username=user, | ||
2074 | 217 | password=password, | ||
2075 | 218 | domain_name='admin_domain', | ||
2076 | 219 | auth_url=ep, | ||
2077 | 220 | ) | ||
2078 | 221 | sess = keystone_session.Session(auth=auth) | ||
2079 | 222 | return keystone_client_v3.Client(session=sess) | ||
2080 | 200 | 223 | ||
2081 | 201 | def authenticate_keystone_user(self, keystone, user, password, tenant): | 224 | def authenticate_keystone_user(self, keystone, user, password, tenant): |
2082 | 202 | """Authenticates a regular user with the keystone public endpoint.""" | 225 | """Authenticates a regular user with the keystone public endpoint.""" |
2083 | 203 | 226 | ||
2084 | === added directory 'tests/charmhelpers/core' | |||
2085 | === added file 'tests/charmhelpers/core/__init__.py' | |||
2086 | --- tests/charmhelpers/core/__init__.py 1970-01-01 00:00:00 +0000 | |||
2087 | +++ tests/charmhelpers/core/__init__.py 2016-03-05 15:43:48 +0000 | |||
2088 | @@ -0,0 +1,15 @@ | |||
2089 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
2090 | 2 | # | ||
2091 | 3 | # This file is part of charm-helpers. | ||
2092 | 4 | # | ||
2093 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
2094 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
2095 | 7 | # published by the Free Software Foundation. | ||
2096 | 8 | # | ||
2097 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
2098 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2099 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2100 | 12 | # GNU Lesser General Public License for more details. | ||
2101 | 13 | # | ||
2102 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
2103 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
2104 | 0 | 16 | ||
2105 | === added file 'tests/charmhelpers/core/decorators.py' | |||
2106 | --- tests/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000 | |||
2107 | +++ tests/charmhelpers/core/decorators.py 2016-03-05 15:43:48 +0000 | |||
2108 | @@ -0,0 +1,57 @@ | |||
2109 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
2110 | 2 | # | ||
2111 | 3 | # This file is part of charm-helpers. | ||
2112 | 4 | # | ||
2113 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
2114 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
2115 | 7 | # published by the Free Software Foundation. | ||
2116 | 8 | # | ||
2117 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
2118 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2119 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2120 | 12 | # GNU Lesser General Public License for more details. | ||
2121 | 13 | # | ||
2122 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
2123 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
2124 | 16 | |||
2125 | 17 | # | ||
2126 | 18 | # Copyright 2014 Canonical Ltd. | ||
2127 | 19 | # | ||
2128 | 20 | # Authors: | ||
2129 | 21 | # Edward Hope-Morley <opentastic@gmail.com> | ||
2130 | 22 | # | ||
2131 | 23 | |||
2132 | 24 | import time | ||
2133 | 25 | |||
2134 | 26 | from charmhelpers.core.hookenv import ( | ||
2135 | 27 | log, | ||
2136 | 28 | INFO, | ||
2137 | 29 | ) | ||
2138 | 30 | |||
2139 | 31 | |||
2140 | 32 | def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): | ||
2141 | 33 | """If the decorated function raises exception exc_type, allow num_retries | ||
2142 | 34 | retry attempts before raise the exception. | ||
2143 | 35 | """ | ||
2144 | 36 | def _retry_on_exception_inner_1(f): | ||
2145 | 37 | def _retry_on_exception_inner_2(*args, **kwargs): | ||
2146 | 38 | retries = num_retries | ||
2147 | 39 | multiplier = 1 | ||
2148 | 40 | while True: | ||
2149 | 41 | try: | ||
2150 | 42 | return f(*args, **kwargs) | ||
2151 | 43 | except exc_type: | ||
2152 | 44 | if not retries: | ||
2153 | 45 | raise | ||
2154 | 46 | |||
2155 | 47 | delay = base_delay * multiplier | ||
2156 | 48 | multiplier += 1 | ||
2157 | 49 | log("Retrying '%s' %d more times (delay=%s)" % | ||
2158 | 50 | (f.__name__, retries, delay), level=INFO) | ||
2159 | 51 | retries -= 1 | ||
2160 | 52 | if delay: | ||
2161 | 53 | time.sleep(delay) | ||
2162 | 54 | |||
2163 | 55 | return _retry_on_exception_inner_2 | ||
2164 | 56 | |||
2165 | 57 | return _retry_on_exception_inner_1 | ||
2166 | 0 | 58 | ||
2167 | === added file 'tests/charmhelpers/core/hookenv.py' | |||
2168 | --- tests/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000 | |||
2169 | +++ tests/charmhelpers/core/hookenv.py 2016-03-05 15:43:48 +0000 | |||
2170 | @@ -0,0 +1,978 @@ | |||
2171 | 1 | # Copyright 2014-2015 Canonical Limited. | ||
2172 | 2 | # | ||
2173 | 3 | # This file is part of charm-helpers. | ||
2174 | 4 | # | ||
2175 | 5 | # charm-helpers is free software: you can redistribute it and/or modify | ||
2176 | 6 | # it under the terms of the GNU Lesser General Public License version 3 as | ||
2177 | 7 | # published by the Free Software Foundation. | ||
2178 | 8 | # | ||
2179 | 9 | # charm-helpers is distributed in the hope that it will be useful, | ||
2180 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2181 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2182 | 12 | # GNU Lesser General Public License for more details. | ||
2183 | 13 | # | ||
2184 | 14 | # You should have received a copy of the GNU Lesser General Public License | ||
2185 | 15 | # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. | ||
2186 | 16 | |||
2187 | 17 | "Interactions with the Juju environment" | ||
2188 | 18 | # Copyright 2013 Canonical Ltd. | ||
2189 | 19 | # | ||
2190 | 20 | # Authors: | ||
2191 | 21 | # Charm Helpers Developers <juju@lists.ubuntu.com> | ||
2192 | 22 | |||
2193 | 23 | from __future__ import print_function | ||
2194 | 24 | import copy | ||
2195 | 25 | from distutils.version import LooseVersion | ||
2196 | 26 | from functools import wraps | ||
2197 | 27 | import glob | ||
2198 | 28 | import os | ||
2199 | 29 | import json | ||
2200 | 30 | import yaml | ||
2201 | 31 | import subprocess | ||
2202 | 32 | import sys | ||
2203 | 33 | import errno | ||
2204 | 34 | import tempfile | ||
2205 | 35 | from subprocess import CalledProcessError | ||
2206 | 36 | |||
2207 | 37 | import six | ||
2208 | 38 | if not six.PY3: | ||
2209 | 39 | from UserDict import UserDict | ||
2210 | 40 | else: | ||
2211 | 41 | from collections import UserDict | ||
2212 | 42 | |||
2213 | 43 | CRITICAL = "CRITICAL" | ||
2214 | 44 | ERROR = "ERROR" | ||
2215 | 45 | WARNING = "WARNING" | ||
2216 | 46 | INFO = "INFO" | ||
2217 | 47 | DEBUG = "DEBUG" | ||
2218 | 48 | MARKER = object() | ||
2219 | 49 | |||
2220 | 50 | cache = {} | ||
2221 | 51 | |||
2222 | 52 | |||
2223 | 53 | def cached(func): | ||
2224 | 54 | """Cache return values for multiple executions of func + args | ||
2225 | 55 | |||
2226 | 56 | For example:: | ||
2227 | 57 | |||
2228 | 58 | @cached | ||
2229 | 59 | def unit_get(attribute): | ||
2230 | 60 | pass | ||
2231 | 61 | |||
2232 | 62 | unit_get('test') | ||
2233 | 63 | |||
2234 | 64 | will cache the result of unit_get + 'test' for future calls. | ||
2235 | 65 | """ | ||
2236 | 66 | @wraps(func) | ||
2237 | 67 | def wrapper(*args, **kwargs): | ||
2238 | 68 | global cache | ||
2239 | 69 | key = str((func, args, kwargs)) | ||
2240 | 70 | try: | ||
2241 | 71 | return cache[key] | ||
2242 | 72 | except KeyError: | ||
2243 | 73 | pass # Drop out of the exception handler scope. | ||
2244 | 74 | res = func(*args, **kwargs) | ||
2245 | 75 | cache[key] = res | ||
2246 | 76 | return res | ||
2247 | 77 | wrapper._wrapped = func | ||
2248 | 78 | return wrapper | ||
2249 | 79 | |||
2250 | 80 | |||
2251 | 81 | def flush(key): | ||
2252 | 82 | """Flushes any entries from function cache where the | ||
2253 | 83 | key is found in the function+args """ | ||
2254 | 84 | flush_list = [] | ||
2255 | 85 | for item in cache: | ||
2256 | 86 | if key in item: | ||
2257 | 87 | flush_list.append(item) | ||
2258 | 88 | for item in flush_list: | ||
2259 | 89 | del cache[item] | ||
2260 | 90 | |||
2261 | 91 | |||
2262 | 92 | def log(message, level=None): | ||
2263 | 93 | """Write a message to the juju log""" | ||
2264 | 94 | command = ['juju-log'] | ||
2265 | 95 | if level: | ||
2266 | 96 | command += ['-l', level] | ||
2267 | 97 | if not isinstance(message, six.string_types): | ||
2268 | 98 | message = repr(message) | ||
2269 | 99 | command += [message] | ||
2270 | 100 | # Missing juju-log should not cause failures in unit tests | ||
2271 | 101 | # Send log output to stderr | ||
2272 | 102 | try: | ||
2273 | 103 | subprocess.call(command) | ||
2274 | 104 | except OSError as e: | ||
2275 | 105 | if e.errno == errno.ENOENT: | ||
2276 | 106 | if level: | ||
2277 | 107 | message = "{}: {}".format(level, message) | ||
2278 | 108 | message = "juju-log: {}".format(message) | ||
2279 | 109 | print(message, file=sys.stderr) | ||
2280 | 110 | else: | ||
2281 | 111 | raise | ||
2282 | 112 | |||
2283 | 113 | |||
2284 | 114 | class Serializable(UserDict): | ||
2285 | 115 | """Wrapper, an object that can be serialized to yaml or json""" | ||
2286 | 116 | |||
2287 | 117 | def __init__(self, obj): | ||
2288 | 118 | # wrap the object | ||
2289 | 119 | UserDict.__init__(self) | ||
2290 | 120 | self.data = obj | ||
2291 | 121 | |||
2292 | 122 | def __getattr__(self, attr): | ||
2293 | 123 | # See if this object has attribute. | ||
2294 | 124 | if attr in ("json", "yaml", "data"): | ||
2295 | 125 | return self.__dict__[attr] | ||
2296 | 126 | # Check for attribute in wrapped object. | ||
2297 | 127 | got = getattr(self.data, attr, MARKER) | ||
2298 | 128 | if got is not MARKER: | ||
2299 | 129 | return got | ||
2300 | 130 | # Proxy to the wrapped object via dict interface. | ||
2301 | 131 | try: | ||
2302 | 132 | return self.data[attr] | ||
2303 | 133 | except KeyError: | ||
2304 | 134 | raise AttributeError(attr) | ||
2305 | 135 | |||
2306 | 136 | def __getstate__(self): | ||
2307 | 137 | # Pickle as a standard dictionary. | ||
2308 | 138 | return self.data | ||
2309 | 139 | |||
2310 | 140 | def __setstate__(self, state): | ||
2311 | 141 | # Unpickle into our wrapper. | ||
2312 | 142 | self.data = state | ||
2313 | 143 | |||
2314 | 144 | def json(self): | ||
2315 | 145 | """Serialize the object to json""" | ||
2316 | 146 | return json.dumps(self.data) | ||
2317 | 147 | |||
2318 | 148 | def yaml(self): | ||
2319 | 149 | """Serialize the object to yaml""" | ||
2320 | 150 | return yaml.dump(self.data) | ||
2321 | 151 | |||
2322 | 152 | |||
2323 | 153 | def execution_environment(): | ||
2324 | 154 | """A convenient bundling of the current execution context""" | ||
2325 | 155 | context = {} | ||
2326 | 156 | context['conf'] = config() | ||
2327 | 157 | if relation_id(): | ||
2328 | 158 | context['reltype'] = relation_type() | ||
2329 | 159 | context['relid'] = relation_id() | ||
2330 | 160 | context['rel'] = relation_get() | ||
2331 | 161 | context['unit'] = local_unit() | ||
2332 | 162 | context['rels'] = relations() | ||
2333 | 163 | context['env'] = os.environ | ||
2334 | 164 | return context | ||
2335 | 165 | |||
2336 | 166 | |||
2337 | 167 | def in_relation_hook(): | ||
2338 | 168 | """Determine whether we're running in a relation hook""" | ||
2339 | 169 | return 'JUJU_RELATION' in os.environ | ||
2340 | 170 | |||
2341 | 171 | |||
2342 | 172 | def relation_type(): | ||
2343 | 173 | """The scope for the current relation hook""" | ||
2344 | 174 | return os.environ.get('JUJU_RELATION', None) | ||
2345 | 175 | |||
2346 | 176 | |||
2347 | 177 | @cached | ||
2348 | 178 | def relation_id(relation_name=None, service_or_unit=None): | ||
2349 | 179 | """The relation ID for the current or a specified relation""" | ||
2350 | 180 | if not relation_name and not service_or_unit: | ||
2351 | 181 | return os.environ.get('JUJU_RELATION_ID', None) | ||
2352 | 182 | elif relation_name and service_or_unit: | ||
2353 | 183 | service_name = service_or_unit.split('/')[0] | ||
2354 | 184 | for relid in relation_ids(relation_name): | ||
2355 | 185 | remote_service = remote_service_name(relid) | ||
2356 | 186 | if remote_service == service_name: | ||
2357 | 187 | return relid | ||
2358 | 188 | else: | ||
2359 | 189 | raise ValueError('Must specify neither or both of relation_name and service_or_unit') | ||
2360 | 190 | |||
2361 | 191 | |||
2362 | 192 | def local_unit(): | ||
2363 | 193 | """Local unit ID""" | ||
2364 | 194 | return os.environ['JUJU_UNIT_NAME'] | ||
2365 | 195 | |||
2366 | 196 | |||
2367 | 197 | def remote_unit(): | ||
2368 | 198 | """The remote unit for the current relation hook""" | ||
2369 | 199 | return os.environ.get('JUJU_REMOTE_UNIT', None) | ||
2370 | 200 | |||
2371 | 201 | |||
2372 | 202 | def service_name(): | ||
2373 | 203 | """The name service group this unit belongs to""" | ||
2374 | 204 | return local_unit().split('/')[0] | ||
2375 | 205 | |||
2376 | 206 | |||
2377 | 207 | @cached | ||
2378 | 208 | def remote_service_name(relid=None): | ||
2379 | 209 | """The remote service name for a given relation-id (or the current relation)""" | ||
2380 | 210 | if relid is None: | ||
2381 | 211 | unit = remote_unit() | ||
2382 | 212 | else: | ||
2383 | 213 | units = related_units(relid) | ||
2384 | 214 | unit = units[0] if units else None | ||
2385 | 215 | return unit.split('/')[0] if unit else None | ||
2386 | 216 | |||
2387 | 217 | |||
2388 | 218 | def hook_name(): | ||
2389 | 219 | """The name of the currently executing hook""" | ||
2390 | 220 | return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) | ||
2391 | 221 | |||
2392 | 222 | |||
2393 | 223 | class Config(dict): | ||
2394 | 224 | """A dictionary representation of the charm's config.yaml, with some | ||
2395 | 225 | extra features: | ||
2396 | 226 | |||
2397 | 227 | - See which values in the dictionary have changed since the previous hook. | ||
2398 | 228 | - For values that have changed, see what the previous value was. | ||
2399 | 229 | - Store arbitrary data for use in a later hook. | ||
2400 | 230 | |||
2401 | 231 | NOTE: Do not instantiate this object directly - instead call | ||
2402 | 232 | ``hookenv.config()``, which will return an instance of :class:`Config`. | ||
2403 | 233 | |||
2404 | 234 | Example usage:: | ||
2405 | 235 | |||
2406 | 236 | >>> # inside a hook | ||
2407 | 237 | >>> from charmhelpers.core import hookenv | ||
2408 | 238 | >>> config = hookenv.config() | ||
2409 | 239 | >>> config['foo'] | ||
2410 | 240 | 'bar' | ||
2411 | 241 | >>> # store a new key/value for later use | ||
2412 | 242 | >>> config['mykey'] = 'myval' | ||
2413 | 243 | |||
2414 | 244 | |||
2415 | 245 | >>> # user runs `juju set mycharm foo=baz` | ||
2416 | 246 | >>> # now we're inside subsequent config-changed hook | ||
2417 | 247 | >>> config = hookenv.config() | ||
2418 | 248 | >>> config['foo'] | ||
2419 | 249 | 'baz' | ||
2420 | 250 | >>> # test to see if this val has changed since last hook | ||
2421 | 251 | >>> config.changed('foo') | ||
2422 | 252 | True | ||
2423 | 253 | >>> # what was the previous value? | ||
2424 | 254 | >>> config.previous('foo') | ||
2425 | 255 | 'bar' | ||
2426 | 256 | >>> # keys/values that we add are preserved across hooks | ||
2427 | 257 | >>> config['mykey'] | ||
2428 | 258 | 'myval' | ||
2429 | 259 | |||
2430 | 260 | """ | ||
2431 | 261 | CONFIG_FILE_NAME = '.juju-persistent-config' | ||
2432 | 262 | |||
2433 | 263 | def __init__(self, *args, **kw): | ||
2434 | 264 | super(Config, self).__init__(*args, **kw) | ||
2435 | 265 | self.implicit_save = True | ||
2436 | 266 | self._prev_dict = None | ||
2437 | 267 | self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) | ||
2438 | 268 | if os.path.exists(self.path): | ||
2439 | 269 | self.load_previous() | ||
2440 | 270 | atexit(self._implicit_save) | ||
2441 | 271 | |||
2442 | 272 | def load_previous(self, path=None): | ||
2443 | 273 | """Load previous copy of config from disk. | ||
2444 | 274 | |||
2445 | 275 | In normal usage you don't need to call this method directly - it | ||
2446 | 276 | is called automatically at object initialization. | ||
2447 | 277 | |||
2448 | 278 | :param path: | ||
2449 | 279 | |||
2450 | 280 | File path from which to load the previous config. If `None`, | ||
2451 | 281 | config is loaded from the default location. If `path` is | ||
2452 | 282 | specified, subsequent `save()` calls will write to the same | ||
2453 | 283 | path. | ||
2454 | 284 | |||
2455 | 285 | """ | ||
2456 | 286 | self.path = path or self.path | ||
2457 | 287 | with open(self.path) as f: | ||
2458 | 288 | self._prev_dict = json.load(f) | ||
2459 | 289 | for k, v in copy.deepcopy(self._prev_dict).items(): | ||
2460 | 290 | if k not in self: | ||
2461 | 291 | self[k] = v | ||
2462 | 292 | |||
2463 | 293 | def changed(self, key): | ||
2464 | 294 | """Return True if the current value for this key is different from | ||
2465 | 295 | the previous value. | ||
2466 | 296 | |||
2467 | 297 | """ | ||
2468 | 298 | if self._prev_dict is None: | ||
2469 | 299 | return True | ||
2470 | 300 | return self.previous(key) != self.get(key) | ||
2471 | 301 | |||
2472 | 302 | def previous(self, key): | ||
2473 | 303 | """Return previous value for this key, or None if there | ||
2474 | 304 | is no previous value. | ||
2475 | 305 | |||
2476 | 306 | """ | ||
2477 | 307 | if self._prev_dict: | ||
2478 | 308 | return self._prev_dict.get(key) | ||
2479 | 309 | return None | ||
2480 | 310 | |||
2481 | 311 | def save(self): | ||
2482 | 312 | """Save this config to disk. | ||
2483 | 313 | |||
2484 | 314 | If the charm is using the :mod:`Services Framework <services.base>` | ||
2485 | 315 | or :meth:'@hook <Hooks.hook>' decorator, this | ||
2486 | 316 | is called automatically at the end of successful hook execution. | ||
2487 | 317 | Otherwise, it should be called directly by user code. | ||
2488 | 318 | |||
2489 | 319 | To disable automatic saves, set ``implicit_save=False`` on this | ||
2490 | 320 | instance. | ||
2491 | 321 | |||
2492 | 322 | """ | ||
2493 | 323 | with open(self.path, 'w') as f: | ||
2494 | 324 | json.dump(self, f) | ||
2495 | 325 | |||
2496 | 326 | def _implicit_save(self): | ||
2497 | 327 | if self.implicit_save: | ||
2498 | 328 | self.save() | ||
2499 | 329 | |||
2500 | 330 | |||
2501 | 331 | @cached | ||
2502 | 332 | def config(scope=None): | ||
2503 | 333 | """Juju charm configuration""" | ||
2504 | 334 | config_cmd_line = ['config-get'] | ||
2505 | 335 | if scope is not None: | ||
2506 | 336 | config_cmd_line.append(scope) | ||
2507 | 337 | config_cmd_line.append('--format=json') | ||
2508 | 338 | try: | ||
2509 | 339 | config_data = json.loads( | ||
2510 | 340 | subprocess.check_output(config_cmd_line).decode('UTF-8')) | ||
2511 | 341 | if scope is not None: | ||
2512 | 342 | return config_data | ||
2513 | 343 | return Config(config_data) | ||
2514 | 344 | except ValueError: | ||
2515 | 345 | return None | ||
2516 | 346 | |||
2517 | 347 | |||
2518 | 348 | @cached | ||
2519 | 349 | def relation_get(attribute=None, unit=None, rid=None): | ||
2520 | 350 | """Get relation information""" | ||
2521 | 351 | _args = ['relation-get', '--format=json'] | ||
2522 | 352 | if rid: | ||
2523 | 353 | _args.append('-r') | ||
2524 | 354 | _args.append(rid) | ||
2525 | 355 | _args.append(attribute or '-') | ||
2526 | 356 | if unit: | ||
2527 | 357 | _args.append(unit) | ||
2528 | 358 | try: | ||
2529 | 359 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
2530 | 360 | except ValueError: | ||
2531 | 361 | return None | ||
2532 | 362 | except CalledProcessError as e: | ||
2533 | 363 | if e.returncode == 2: | ||
2534 | 364 | return None | ||
2535 | 365 | raise | ||
2536 | 366 | |||
2537 | 367 | |||
2538 | 368 | def relation_set(relation_id=None, relation_settings=None, **kwargs): | ||
2539 | 369 | """Set relation information for the current unit""" | ||
2540 | 370 | relation_settings = relation_settings if relation_settings else {} | ||
2541 | 371 | relation_cmd_line = ['relation-set'] | ||
2542 | 372 | accepts_file = "--file" in subprocess.check_output( | ||
2543 | 373 | relation_cmd_line + ["--help"], universal_newlines=True) | ||
2544 | 374 | if relation_id is not None: | ||
2545 | 375 | relation_cmd_line.extend(('-r', relation_id)) | ||
2546 | 376 | settings = relation_settings.copy() | ||
2547 | 377 | settings.update(kwargs) | ||
2548 | 378 | for key, value in settings.items(): | ||
2549 | 379 | # Force value to be a string: it always should, but some call | ||
2550 | 380 | # sites pass in things like dicts or numbers. | ||
2551 | 381 | if value is not None: | ||
2552 | 382 | settings[key] = "{}".format(value) | ||
2553 | 383 | if accepts_file: | ||
2554 | 384 | # --file was introduced in Juju 1.23.2. Use it by default if | ||
2555 | 385 | # available, since otherwise we'll break if the relation data is | ||
2556 | 386 | # too big. Ideally we should tell relation-set to read the data from | ||
2557 | 387 | # stdin, but that feature is broken in 1.23.2: Bug #1454678. | ||
2558 | 388 | with tempfile.NamedTemporaryFile(delete=False) as settings_file: | ||
2559 | 389 | settings_file.write(yaml.safe_dump(settings).encode("utf-8")) | ||
2560 | 390 | subprocess.check_call( | ||
2561 | 391 | relation_cmd_line + ["--file", settings_file.name]) | ||
2562 | 392 | os.remove(settings_file.name) | ||
2563 | 393 | else: | ||
2564 | 394 | for key, value in settings.items(): | ||
2565 | 395 | if value is None: | ||
2566 | 396 | relation_cmd_line.append('{}='.format(key)) | ||
2567 | 397 | else: | ||
2568 | 398 | relation_cmd_line.append('{}={}'.format(key, value)) | ||
2569 | 399 | subprocess.check_call(relation_cmd_line) | ||
2570 | 400 | # Flush cache of any relation-gets for local unit | ||
2571 | 401 | flush(local_unit()) | ||
2572 | 402 | |||
2573 | 403 | |||
2574 | 404 | def relation_clear(r_id=None): | ||
2575 | 405 | ''' Clears any relation data already set on relation r_id ''' | ||
2576 | 406 | settings = relation_get(rid=r_id, | ||
2577 | 407 | unit=local_unit()) | ||
2578 | 408 | for setting in settings: | ||
2579 | 409 | if setting not in ['public-address', 'private-address']: | ||
2580 | 410 | settings[setting] = None | ||
2581 | 411 | relation_set(relation_id=r_id, | ||
2582 | 412 | **settings) | ||
2583 | 413 | |||
2584 | 414 | |||
2585 | 415 | @cached | ||
2586 | 416 | def relation_ids(reltype=None): | ||
2587 | 417 | """A list of relation_ids""" | ||
2588 | 418 | reltype = reltype or relation_type() | ||
2589 | 419 | relid_cmd_line = ['relation-ids', '--format=json'] | ||
2590 | 420 | if reltype is not None: | ||
2591 | 421 | relid_cmd_line.append(reltype) | ||
2592 | 422 | return json.loads( | ||
2593 | 423 | subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] | ||
2594 | 424 | return [] | ||
2595 | 425 | |||
2596 | 426 | |||
2597 | 427 | @cached | ||
2598 | 428 | def related_units(relid=None): | ||
2599 | 429 | """A list of related units""" | ||
2600 | 430 | relid = relid or relation_id() | ||
2601 | 431 | units_cmd_line = ['relation-list', '--format=json'] | ||
2602 | 432 | if relid is not None: | ||
2603 | 433 | units_cmd_line.extend(('-r', relid)) | ||
2604 | 434 | return json.loads( | ||
2605 | 435 | subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] | ||
2606 | 436 | |||
2607 | 437 | |||
2608 | 438 | @cached | ||
2609 | 439 | def relation_for_unit(unit=None, rid=None): | ||
2610 | 440 | """Get the json represenation of a unit's relation""" | ||
2611 | 441 | unit = unit or remote_unit() | ||
2612 | 442 | relation = relation_get(unit=unit, rid=rid) | ||
2613 | 443 | for key in relation: | ||
2614 | 444 | if key.endswith('-list'): | ||
2615 | 445 | relation[key] = relation[key].split() | ||
2616 | 446 | relation['__unit__'] = unit | ||
2617 | 447 | return relation | ||
2618 | 448 | |||
2619 | 449 | |||
2620 | 450 | @cached | ||
2621 | 451 | def relations_for_id(relid=None): | ||
2622 | 452 | """Get relations of a specific relation ID""" | ||
2623 | 453 | relation_data = [] | ||
2624 | 454 | relid = relid or relation_ids() | ||
2625 | 455 | for unit in related_units(relid): | ||
2626 | 456 | unit_data = relation_for_unit(unit, relid) | ||
2627 | 457 | unit_data['__relid__'] = relid | ||
2628 | 458 | relation_data.append(unit_data) | ||
2629 | 459 | return relation_data | ||
2630 | 460 | |||
2631 | 461 | |||
2632 | 462 | @cached | ||
2633 | 463 | def relations_of_type(reltype=None): | ||
2634 | 464 | """Get relations of a specific type""" | ||
2635 | 465 | relation_data = [] | ||
2636 | 466 | reltype = reltype or relation_type() | ||
2637 | 467 | for relid in relation_ids(reltype): | ||
2638 | 468 | for relation in relations_for_id(relid): | ||
2639 | 469 | relation['__relid__'] = relid | ||
2640 | 470 | relation_data.append(relation) | ||
2641 | 471 | return relation_data | ||
2642 | 472 | |||
2643 | 473 | |||
2644 | 474 | @cached | ||
2645 | 475 | def metadata(): | ||
2646 | 476 | """Get the current charm metadata.yaml contents as a python object""" | ||
2647 | 477 | with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: | ||
2648 | 478 | return yaml.safe_load(md) | ||
2649 | 479 | |||
2650 | 480 | |||
2651 | 481 | @cached | ||
2652 | 482 | def relation_types(): | ||
2653 | 483 | """Get a list of relation types supported by this charm""" | ||
2654 | 484 | rel_types = [] | ||
2655 | 485 | md = metadata() | ||
2656 | 486 | for key in ('provides', 'requires', 'peers'): | ||
2657 | 487 | section = md.get(key) | ||
2658 | 488 | if section: | ||
2659 | 489 | rel_types.extend(section.keys()) | ||
2660 | 490 | return rel_types | ||
2661 | 491 | |||
2662 | 492 | |||
2663 | 493 | @cached | ||
2664 | 494 | def peer_relation_id(): | ||
2665 | 495 | '''Get the peers relation id if a peers relation has been joined, else None.''' | ||
2666 | 496 | md = metadata() | ||
2667 | 497 | section = md.get('peers') | ||
2668 | 498 | if section: | ||
2669 | 499 | for key in section: | ||
2670 | 500 | relids = relation_ids(key) | ||
2671 | 501 | if relids: | ||
2672 | 502 | return relids[0] | ||
2673 | 503 | return None | ||
2674 | 504 | |||
2675 | 505 | |||
2676 | 506 | @cached | ||
2677 | 507 | def relation_to_interface(relation_name): | ||
2678 | 508 | """ | ||
2679 | 509 | Given the name of a relation, return the interface that relation uses. | ||
2680 | 510 | |||
2681 | 511 | :returns: The interface name, or ``None``. | ||
2682 | 512 | """ | ||
2683 | 513 | return relation_to_role_and_interface(relation_name)[1] | ||
2684 | 514 | |||
2685 | 515 | |||
2686 | 516 | @cached | ||
2687 | 517 | def relation_to_role_and_interface(relation_name): | ||
2688 | 518 | """ | ||
2689 | 519 | Given the name of a relation, return the role and the name of the interface | ||
2690 | 520 | that relation uses (where role is one of ``provides``, ``requires``, or ``peers``). | ||
2691 | 521 | |||
2692 | 522 | :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. | ||
2693 | 523 | """ | ||
2694 | 524 | _metadata = metadata() | ||
2695 | 525 | for role in ('provides', 'requires', 'peers'): | ||
2696 | 526 | interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') | ||
2697 | 527 | if interface: | ||
2698 | 528 | return role, interface | ||
2699 | 529 | return None, None | ||
2700 | 530 | |||
2701 | 531 | |||
2702 | 532 | @cached | ||
2703 | 533 | def role_and_interface_to_relations(role, interface_name): | ||
2704 | 534 | """ | ||
2705 | 535 | Given a role and interface name, return a list of relation names for the | ||
2706 | 536 | current charm that use that interface under that role (where role is one | ||
2707 | 537 | of ``provides``, ``requires``, or ``peers``). | ||
2708 | 538 | |||
2709 | 539 | :returns: A list of relation names. | ||
2710 | 540 | """ | ||
2711 | 541 | _metadata = metadata() | ||
2712 | 542 | results = [] | ||
2713 | 543 | for relation_name, relation in _metadata.get(role, {}).items(): | ||
2714 | 544 | if relation['interface'] == interface_name: | ||
2715 | 545 | results.append(relation_name) | ||
2716 | 546 | return results | ||
2717 | 547 | |||
2718 | 548 | |||
2719 | 549 | @cached | ||
2720 | 550 | def interface_to_relations(interface_name): | ||
2721 | 551 | """ | ||
2722 | 552 | Given an interface, return a list of relation names for the current | ||
2723 | 553 | charm that use that interface. | ||
2724 | 554 | |||
2725 | 555 | :returns: A list of relation names. | ||
2726 | 556 | """ | ||
2727 | 557 | results = [] | ||
2728 | 558 | for role in ('provides', 'requires', 'peers'): | ||
2729 | 559 | results.extend(role_and_interface_to_relations(role, interface_name)) | ||
2730 | 560 | return results | ||
2731 | 561 | |||
2732 | 562 | |||
2733 | 563 | @cached | ||
2734 | 564 | def charm_name(): | ||
2735 | 565 | """Get the name of the current charm as is specified on metadata.yaml""" | ||
2736 | 566 | return metadata().get('name') | ||
2737 | 567 | |||
2738 | 568 | |||
2739 | 569 | @cached | ||
2740 | 570 | def relations(): | ||
2741 | 571 | """Get a nested dictionary of relation data for all related units""" | ||
2742 | 572 | rels = {} | ||
2743 | 573 | for reltype in relation_types(): | ||
2744 | 574 | relids = {} | ||
2745 | 575 | for relid in relation_ids(reltype): | ||
2746 | 576 | units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} | ||
2747 | 577 | for unit in related_units(relid): | ||
2748 | 578 | reldata = relation_get(unit=unit, rid=relid) | ||
2749 | 579 | units[unit] = reldata | ||
2750 | 580 | relids[relid] = units | ||
2751 | 581 | rels[reltype] = relids | ||
2752 | 582 | return rels | ||
2753 | 583 | |||
2754 | 584 | |||
2755 | 585 | @cached | ||
2756 | 586 | def is_relation_made(relation, keys='private-address'): | ||
2757 | 587 | ''' | ||
2758 | 588 | Determine whether a relation is established by checking for | ||
2759 | 589 | presence of key(s). If a list of keys is provided, they | ||
2760 | 590 | must all be present for the relation to be identified as made | ||
2761 | 591 | ''' | ||
2762 | 592 | if isinstance(keys, str): | ||
2763 | 593 | keys = [keys] | ||
2764 | 594 | for r_id in relation_ids(relation): | ||
2765 | 595 | for unit in related_units(r_id): | ||
2766 | 596 | context = {} | ||
2767 | 597 | for k in keys: | ||
2768 | 598 | context[k] = relation_get(k, rid=r_id, | ||
2769 | 599 | unit=unit) | ||
2770 | 600 | if None not in context.values(): | ||
2771 | 601 | return True | ||
2772 | 602 | return False | ||
2773 | 603 | |||
2774 | 604 | |||
2775 | 605 | def open_port(port, protocol="TCP"): | ||
2776 | 606 | """Open a service network port""" | ||
2777 | 607 | _args = ['open-port'] | ||
2778 | 608 | _args.append('{}/{}'.format(port, protocol)) | ||
2779 | 609 | subprocess.check_call(_args) | ||
2780 | 610 | |||
2781 | 611 | |||
2782 | 612 | def close_port(port, protocol="TCP"): | ||
2783 | 613 | """Close a service network port""" | ||
2784 | 614 | _args = ['close-port'] | ||
2785 | 615 | _args.append('{}/{}'.format(port, protocol)) | ||
2786 | 616 | subprocess.check_call(_args) | ||
2787 | 617 | |||
2788 | 618 | |||
2789 | 619 | @cached | ||
2790 | 620 | def unit_get(attribute): | ||
2791 | 621 | """Get the unit ID for the remote unit""" | ||
2792 | 622 | _args = ['unit-get', '--format=json', attribute] | ||
2793 | 623 | try: | ||
2794 | 624 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
2795 | 625 | except ValueError: | ||
2796 | 626 | return None | ||
2797 | 627 | |||
2798 | 628 | |||
2799 | 629 | def unit_public_ip(): | ||
2800 | 630 | """Get this unit's public IP address""" | ||
2801 | 631 | return unit_get('public-address') | ||
2802 | 632 | |||
2803 | 633 | |||
2804 | 634 | def unit_private_ip(): | ||
2805 | 635 | """Get this unit's private IP address""" | ||
2806 | 636 | return unit_get('private-address') | ||
2807 | 637 | |||
2808 | 638 | |||
2809 | 639 | @cached | ||
2810 | 640 | def storage_get(attribute=None, storage_id=None): | ||
2811 | 641 | """Get storage attributes""" | ||
2812 | 642 | _args = ['storage-get', '--format=json'] | ||
2813 | 643 | if storage_id: | ||
2814 | 644 | _args.extend(('-s', storage_id)) | ||
2815 | 645 | if attribute: | ||
2816 | 646 | _args.append(attribute) | ||
2817 | 647 | try: | ||
2818 | 648 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
2819 | 649 | except ValueError: | ||
2820 | 650 | return None | ||
2821 | 651 | |||
2822 | 652 | |||
2823 | 653 | @cached | ||
2824 | 654 | def storage_list(storage_name=None): | ||
2825 | 655 | """List the storage IDs for the unit""" | ||
2826 | 656 | _args = ['storage-list', '--format=json'] | ||
2827 | 657 | if storage_name: | ||
2828 | 658 | _args.append(storage_name) | ||
2829 | 659 | try: | ||
2830 | 660 | return json.loads(subprocess.check_output(_args).decode('UTF-8')) | ||
2831 | 661 | except ValueError: | ||
2832 | 662 | return None | ||
2833 | 663 | except OSError as e: | ||
2834 | 664 | import errno | ||
2835 | 665 | if e.errno == errno.ENOENT: | ||
2836 | 666 | # storage-list does not exist | ||
2837 | 667 | return [] | ||
2838 | 668 | raise | ||
2839 | 669 | |||
2840 | 670 | |||
2841 | 671 | class UnregisteredHookError(Exception): | ||
2842 | 672 | """Raised when an undefined hook is called""" | ||
2843 | 673 | pass | ||
2844 | 674 | |||
2845 | 675 | |||
2846 | 676 | class Hooks(object): | ||
2847 | 677 | """A convenient handler for hook functions. | ||
2848 | 678 | |||
2849 | 679 | Example:: | ||
2850 | 680 | |||
2851 | 681 | hooks = Hooks() | ||
2852 | 682 | |||
2853 | 683 | # register a hook, taking its name from the function name | ||
2854 | 684 | @hooks.hook() | ||
2855 | 685 | def install(): | ||
2856 | 686 | pass # your code here | ||
2857 | 687 | |||
2858 | 688 | # register a hook, providing a custom hook name | ||
2859 | 689 | @hooks.hook("config-changed") | ||
2860 | 690 | def config_changed(): | ||
2861 | 691 | pass # your code here | ||
2862 | 692 | |||
2863 | 693 | if __name__ == "__main__": | ||
2864 | 694 | # execute a hook based on the name the program is called by | ||
2865 | 695 | hooks.execute(sys.argv) | ||
2866 | 696 | """ | ||
2867 | 697 | |||
2868 | 698 | def __init__(self, config_save=None): | ||
2869 | 699 | super(Hooks, self).__init__() | ||
2870 | 700 | self._hooks = {} | ||
2871 | 701 | |||
2872 | 702 | # For unknown reasons, we allow the Hooks constructor to override | ||
2873 | 703 | # config().implicit_save. | ||
2874 | 704 | if config_save is not None: | ||
2875 | 705 | config().implicit_save = config_save | ||
2876 | 706 | |||
2877 | 707 | def register(self, name, function): | ||
2878 | 708 | """Register a hook""" | ||
2879 | 709 | self._hooks[name] = function | ||
2880 | 710 | |||
2881 | 711 | def execute(self, args): | ||
2882 | 712 | """Execute a registered hook based on args[0]""" | ||
2883 | 713 | _run_atstart() | ||
2884 | 714 | hook_name = os.path.basename(args[0]) | ||
2885 | 715 | if hook_name in self._hooks: | ||
2886 | 716 | try: | ||
2887 | 717 | self._hooks[hook_name]() | ||
2888 | 718 | except SystemExit as x: | ||
2889 | 719 | if x.code is None or x.code == 0: | ||
2890 | 720 | _run_atexit() | ||
2891 | 721 | raise | ||
2892 | 722 | _run_atexit() | ||
2893 | 723 | else: | ||
2894 | 724 | raise UnregisteredHookError(hook_name) | ||
2895 | 725 | |||
2896 | 726 | def hook(self, *hook_names): | ||
2897 | 727 | """Decorator, registering them as hooks""" | ||
2898 | 728 | def wrapper(decorated): | ||
2899 | 729 | for hook_name in hook_names: | ||
2900 | 730 | self.register(hook_name, decorated) | ||
2901 | 731 | else: | ||
2902 | 732 | self.register(decorated.__name__, decorated) | ||
2903 | 733 | if '_' in decorated.__name__: | ||
2904 | 734 | self.register( | ||
2905 | 735 | decorated.__name__.replace('_', '-'), decorated) | ||
2906 | 736 | return decorated | ||
2907 | 737 | return wrapper | ||
2908 | 738 | |||
2909 | 739 | |||
2910 | 740 | def charm_dir(): | ||
2911 | 741 | """Return the root directory of the current charm""" | ||
2912 | 742 | return os.environ.get('CHARM_DIR') | ||
2913 | 743 | |||
2914 | 744 | |||
2915 | 745 | @cached | ||
2916 | 746 | def action_get(key=None): | ||
2917 | 747 | """Gets the value of an action parameter, or all key/value param pairs""" | ||
2918 | 748 | cmd = ['action-get'] | ||
2919 | 749 | if key is not None: | ||
2920 | 750 | cmd.append(key) | ||
2921 | 751 | cmd.append('--format=json') | ||
2922 | 752 | action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
2923 | 753 | return action_data | ||
2924 | 754 | |||
2925 | 755 | |||
2926 | 756 | def action_set(values): | ||
2927 | 757 | """Sets the values to be returned after the action finishes""" | ||
2928 | 758 | cmd = ['action-set'] | ||
2929 | 759 | for k, v in list(values.items()): | ||
2930 | 760 | cmd.append('{}={}'.format(k, v)) | ||
2931 | 761 | subprocess.check_call(cmd) | ||
2932 | 762 | |||
2933 | 763 | |||
2934 | 764 | def action_fail(message): | ||
2935 | 765 | """Sets the action status to failed and sets the error message. | ||
2936 | 766 | |||
2937 | 767 | The results set by action_set are preserved.""" | ||
2938 | 768 | subprocess.check_call(['action-fail', message]) | ||
2939 | 769 | |||
2940 | 770 | |||
2941 | 771 | def action_name(): | ||
2942 | 772 | """Get the name of the currently executing action.""" | ||
2943 | 773 | return os.environ.get('JUJU_ACTION_NAME') | ||
2944 | 774 | |||
2945 | 775 | |||
2946 | 776 | def action_uuid(): | ||
2947 | 777 | """Get the UUID of the currently executing action.""" | ||
2948 | 778 | return os.environ.get('JUJU_ACTION_UUID') | ||
2949 | 779 | |||
2950 | 780 | |||
2951 | 781 | def action_tag(): | ||
2952 | 782 | """Get the tag for the currently executing action.""" | ||
2953 | 783 | return os.environ.get('JUJU_ACTION_TAG') | ||
2954 | 784 | |||
2955 | 785 | |||
2956 | 786 | def status_set(workload_state, message): | ||
2957 | 787 | """Set the workload state with a message | ||
2958 | 788 | |||
2959 | 789 | Use status-set to set the workload state with a message which is visible | ||
2960 | 790 | to the user via juju status. If the status-set command is not found then | ||
2961 | 791 | assume this is juju < 1.23 and juju-log the message unstead. | ||
2962 | 792 | |||
2963 | 793 | workload_state -- valid juju workload state. | ||
2964 | 794 | message -- status update message | ||
2965 | 795 | """ | ||
2966 | 796 | valid_states = ['maintenance', 'blocked', 'waiting', 'active'] | ||
2967 | 797 | if workload_state not in valid_states: | ||
2968 | 798 | raise ValueError( | ||
2969 | 799 | '{!r} is not a valid workload state'.format(workload_state) | ||
2970 | 800 | ) | ||
2971 | 801 | cmd = ['status-set', workload_state, message] | ||
2972 | 802 | try: | ||
2973 | 803 | ret = subprocess.call(cmd) | ||
2974 | 804 | if ret == 0: | ||
2975 | 805 | return | ||
2976 | 806 | except OSError as e: | ||
2977 | 807 | if e.errno != errno.ENOENT: | ||
2978 | 808 | raise | ||
2979 | 809 | log_message = 'status-set failed: {} {}'.format(workload_state, | ||
2980 | 810 | message) | ||
2981 | 811 | log(log_message, level='INFO') | ||
2982 | 812 | |||
2983 | 813 | |||
2984 | 814 | def status_get(): | ||
2985 | 815 | """Retrieve the previously set juju workload state and message | ||
2986 | 816 | |||
2987 | 817 | If the status-get command is not found then assume this is juju < 1.23 and | ||
2988 | 818 | return 'unknown', "" | ||
2989 | 819 | |||
2990 | 820 | """ | ||
2991 | 821 | cmd = ['status-get', "--format=json", "--include-data"] | ||
2992 | 822 | try: | ||
2993 | 823 | raw_status = subprocess.check_output(cmd) | ||
2994 | 824 | except OSError as e: | ||
2995 | 825 | if e.errno == errno.ENOENT: | ||
2996 | 826 | return ('unknown', "") | ||
2997 | 827 | else: | ||
2998 | 828 | raise | ||
2999 | 829 | else: | ||
3000 | 830 | status = json.loads(raw_status.decode("UTF-8")) | ||
3001 | 831 | return (status["status"], status["message"]) | ||
3002 | 832 | |||
3003 | 833 | |||
3004 | 834 | def translate_exc(from_exc, to_exc): | ||
3005 | 835 | def inner_translate_exc1(f): | ||
3006 | 836 | @wraps(f) | ||
3007 | 837 | def inner_translate_exc2(*args, **kwargs): | ||
3008 | 838 | try: | ||
3009 | 839 | return f(*args, **kwargs) | ||
3010 | 840 | except from_exc: | ||
3011 | 841 | raise to_exc | ||
3012 | 842 | |||
3013 | 843 | return inner_translate_exc2 | ||
3014 | 844 | |||
3015 | 845 | return inner_translate_exc1 | ||
3016 | 846 | |||
3017 | 847 | |||
3018 | 848 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3019 | 849 | def is_leader(): | ||
3020 | 850 | """Does the current unit hold the juju leadership | ||
3021 | 851 | |||
3022 | 852 | Uses juju to determine whether the current unit is the leader of its peers | ||
3023 | 853 | """ | ||
3024 | 854 | cmd = ['is-leader', '--format=json'] | ||
3025 | 855 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
3026 | 856 | |||
3027 | 857 | |||
3028 | 858 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3029 | 859 | def leader_get(attribute=None): | ||
3030 | 860 | """Juju leader get value(s)""" | ||
3031 | 861 | cmd = ['leader-get', '--format=json'] + [attribute or '-'] | ||
3032 | 862 | return json.loads(subprocess.check_output(cmd).decode('UTF-8')) | ||
3033 | 863 | |||
3034 | 864 | |||
3035 | 865 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3036 | 866 | def leader_set(settings=None, **kwargs): | ||
3037 | 867 | """Juju leader set value(s)""" | ||
3038 | 868 | # Don't log secrets. | ||
3039 | 869 | # log("Juju leader-set '%s'" % (settings), level=DEBUG) | ||
3040 | 870 | cmd = ['leader-set'] | ||
3041 | 871 | settings = settings or {} | ||
3042 | 872 | settings.update(kwargs) | ||
3043 | 873 | for k, v in settings.items(): | ||
3044 | 874 | if v is None: | ||
3045 | 875 | cmd.append('{}='.format(k)) | ||
3046 | 876 | else: | ||
3047 | 877 | cmd.append('{}={}'.format(k, v)) | ||
3048 | 878 | subprocess.check_call(cmd) | ||
3049 | 879 | |||
3050 | 880 | |||
3051 | 881 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3052 | 882 | def payload_register(ptype, klass, pid): | ||
3053 | 883 | """ is used while a hook is running to let Juju know that a | ||
3054 | 884 | payload has been started.""" | ||
3055 | 885 | cmd = ['payload-register'] | ||
3056 | 886 | for x in [ptype, klass, pid]: | ||
3057 | 887 | cmd.append(x) | ||
3058 | 888 | subprocess.check_call(cmd) | ||
3059 | 889 | |||
3060 | 890 | |||
3061 | 891 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3062 | 892 | def payload_unregister(klass, pid): | ||
3063 | 893 | """ is used while a hook is running to let Juju know | ||
3064 | 894 | that a payload has been manually stopped. The <class> and <id> provided | ||
3065 | 895 | must match a payload that has been previously registered with juju using | ||
3066 | 896 | payload-register.""" | ||
3067 | 897 | cmd = ['payload-unregister'] | ||
3068 | 898 | for x in [klass, pid]: | ||
3069 | 899 | cmd.append(x) | ||
3070 | 900 | subprocess.check_call(cmd) | ||
3071 | 901 | |||
3072 | 902 | |||
3073 | 903 | @translate_exc(from_exc=OSError, to_exc=NotImplementedError) | ||
3074 | 904 | def payload_status_set(klass, pid, status): | ||
3075 | 905 | """is used to update the current status of a registered payload. | ||
3076 | 906 | The <class> and <id> provided must match a payload that has been previously | ||
3077 | 907 | registered with juju using payload-register. The <status> must be one of the | ||
3078 | 908 | follow: starting, started, stopping, stopped""" | ||
3079 | 909 | cmd = ['payload-status-set'] | ||
3080 | 910 | for x in [klass, pid, status]: | ||
3081 | 911 | cmd.append(x) | ||
3082 | 912 | subprocess.check_call(cmd) | ||
3083 | 913 | |||
3084 | 914 | |||
3085 | 915 | @cached | ||
3086 | 916 | def juju_version(): | ||
3087 | 917 | """Full version string (eg. '1.23.3.1-trusty-amd64')""" | ||
3088 | 918 | # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 | ||
3089 | 919 | jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] | ||
3090 | 920 | return subprocess.check_output([jujud, 'version'], | ||
3091 | 921 | universal_newlines=True).strip() | ||
3092 | 922 | |||
3093 | 923 | |||
3094 | 924 | @cached | ||
3095 | 925 | def has_juju_version(minimum_version): | ||
3096 | 926 | """Return True if the Juju version is at least the provided version""" | ||
3097 | 927 | return LooseVersion(juju_version()) >= LooseVersion(minimum_version) | ||
3098 | 928 | |||
3099 | 929 | |||
3100 | 930 | _atexit = [] | ||
3101 | 931 | _atstart = [] | ||
3102 | 932 | |||
3103 | 933 | |||
3104 | 934 | def atstart(callback, *args, **kwargs): | ||
3105 | 935 | '''Schedule a callback to run before the main hook. | ||
3106 | 936 | |||
3107 | 937 | Callbacks are run in the order they were added. | ||
3108 | 938 | |||
3109 | 939 | This is useful for modules and classes to perform initialization | ||
3110 | 940 | and inject behavior. In particular: | ||
3111 | 941 | |||
3112 | 942 | - Run common code before all of your hooks, such as logging | ||
3113 | 943 | the hook name or interesting relation data. | ||
3114 | 944 | - Defer object or module initialization that requires a hook | ||
3115 | 945 | context until we know there actually is a hook context, | ||
3116 | 946 | making testing easier. | ||
3117 | 947 | - Rather than requiring charm authors to include boilerplate to | ||
3118 | 948 | invoke your helper's behavior, have it run automatically if | ||
3119 | 949 | your object is instantiated or module imported. | ||
3120 | 950 | |||
3121 | 951 | This is not at all useful after your hook framework as been launched. | ||
3122 | 952 | ''' | ||
3123 | 953 | global _atstart | ||
3124 | 954 | _atstart.append((callback, args, kwargs)) | ||
3125 | 955 | |||
3126 | 956 | |||
3127 | 957 | def atexit(callback, *args, **kwargs): | ||
3128 | 958 | '''Schedule a callback to run on successful hook completion. | ||
3129 | 959 | |||
3130 | 960 | Callbacks are run in the reverse order that they were added.''' | ||
3131 | 961 | _atexit.append((callback, args, kwargs)) | ||
3132 | 962 | |||
3133 | 963 | |||
3134 | 964 | def _run_atstart(): | ||
3135 | 965 | '''Hook frameworks must invoke this before running the main hook body.''' | ||
3136 | 966 | global _atstart | ||
3137 | 967 | for callback, args, kwargs in _atstart: | ||
3138 | 968 | callback(*args, **kwargs) | ||
3139 | 969 | del _atstart[:] | ||
3140 | 970 | |||
3141 | 971 | |||
3142 | 972 | def _run_atexit(): | ||
3143 | 973 | '''Hook frameworks must invoke this after the main hook body has | ||
3144 | 974 | successfully completed. Do not invoke it if the hook fails.''' | ||
3145 | 975 | global _atexit | ||
3146 | 976 | for callback, args, kwargs in reversed(_atexit): | ||
3147 | 977 | callback(*args, **kwargs) | ||
3148 | 978 | del _atexit[:] | ||
3149 | 0 | 979 | ||
3150 | === modified file 'unit_tests/test_actions.py' | |||
3151 | --- unit_tests/test_actions.py 2016-01-13 15:13:10 +0000 | |||
3152 | +++ unit_tests/test_actions.py 2016-03-05 15:43:48 +0000 | |||
3153 | @@ -5,7 +5,8 @@ | |||
3154 | 5 | 5 | ||
3155 | 6 | with patch('actions.hooks.keystone_utils.is_paused') as is_paused: | 6 | with patch('actions.hooks.keystone_utils.is_paused') as is_paused: |
3156 | 7 | with patch('actions.hooks.keystone_utils.register_configs') as configs: | 7 | with patch('actions.hooks.keystone_utils.register_configs') as configs: |
3158 | 8 | import actions.actions | 8 | with patch('actions.hooks.keystone_utils.os_release') as os_release: |
3159 | 9 | import actions.actions | ||
3160 | 9 | 10 | ||
3161 | 10 | 11 | ||
3162 | 11 | class PauseTestCase(CharmTestCase): | 12 | class PauseTestCase(CharmTestCase): |
3163 | @@ -15,7 +16,8 @@ | |||
3164 | 15 | actions.actions, ["service_pause", "HookData", "kv", | 16 | actions.actions, ["service_pause", "HookData", "kv", |
3165 | 16 | "assess_status"]) | 17 | "assess_status"]) |
3166 | 17 | 18 | ||
3168 | 18 | def test_pauses_services(self): | 19 | @patch('actions.hooks.keystone_utils.os_release') |
3169 | 20 | def test_pauses_services(self, os_release): | ||
3170 | 19 | """Pause action pauses all Keystone services.""" | 21 | """Pause action pauses all Keystone services.""" |
3171 | 20 | pause_calls = [] | 22 | pause_calls = [] |
3172 | 21 | 23 | ||
3173 | @@ -29,7 +31,8 @@ | |||
3174 | 29 | self.assertItemsEqual( | 31 | self.assertItemsEqual( |
3175 | 30 | pause_calls, ['haproxy', 'keystone', 'apache2']) | 32 | pause_calls, ['haproxy', 'keystone', 'apache2']) |
3176 | 31 | 33 | ||
3178 | 32 | def test_bails_out_early_on_error(self): | 34 | @patch('actions.hooks.keystone_utils.os_release') |
3179 | 35 | def test_bails_out_early_on_error(self, os_release): | ||
3180 | 33 | """Pause action fails early if there are errors stopping a service.""" | 36 | """Pause action fails early if there are errors stopping a service.""" |
3181 | 34 | pause_calls = [] | 37 | pause_calls = [] |
3182 | 35 | 38 | ||
3183 | @@ -46,7 +49,8 @@ | |||
3184 | 46 | actions.actions.pause, []) | 49 | actions.actions.pause, []) |
3185 | 47 | self.assertEqual(pause_calls, ['haproxy']) | 50 | self.assertEqual(pause_calls, ['haproxy']) |
3186 | 48 | 51 | ||
3188 | 49 | def test_pause_sets_value(self): | 52 | @patch('actions.hooks.keystone_utils.os_release') |
3189 | 53 | def test_pause_sets_value(self, os_release): | ||
3190 | 50 | """Pause action sets the unit-paused value to True.""" | 54 | """Pause action sets the unit-paused value to True.""" |
3191 | 51 | self.HookData()().return_value = True | 55 | self.HookData()().return_value = True |
3192 | 52 | 56 | ||
3193 | @@ -61,7 +65,8 @@ | |||
3194 | 61 | actions.actions, ["service_resume", "HookData", "kv", | 65 | actions.actions, ["service_resume", "HookData", "kv", |
3195 | 62 | "assess_status"]) | 66 | "assess_status"]) |
3196 | 63 | 67 | ||
3198 | 64 | def test_resumes_services(self): | 68 | @patch('actions.hooks.keystone_utils.os_release') |
3199 | 69 | def test_resumes_services(self, os_release): | ||
3200 | 65 | """Resume action resumes all Keystone services.""" | 70 | """Resume action resumes all Keystone services.""" |
3201 | 66 | resume_calls = [] | 71 | resume_calls = [] |
3202 | 67 | 72 | ||
3203 | @@ -73,7 +78,8 @@ | |||
3204 | 73 | actions.actions.resume([]) | 78 | actions.actions.resume([]) |
3205 | 74 | self.assertEqual(resume_calls, ['haproxy', 'keystone', 'apache2']) | 79 | self.assertEqual(resume_calls, ['haproxy', 'keystone', 'apache2']) |
3206 | 75 | 80 | ||
3208 | 76 | def test_bails_out_early_on_error(self): | 81 | @patch('actions.hooks.keystone_utils.os_release') |
3209 | 82 | def test_bails_out_early_on_error(self, os_release): | ||
3210 | 77 | """Resume action fails early if there are errors starting a service.""" | 83 | """Resume action fails early if there are errors starting a service.""" |
3211 | 78 | resume_calls = [] | 84 | resume_calls = [] |
3212 | 79 | 85 | ||
3213 | @@ -90,7 +96,8 @@ | |||
3214 | 90 | actions.actions.resume, []) | 96 | actions.actions.resume, []) |
3215 | 91 | self.assertEqual(resume_calls, ['haproxy']) | 97 | self.assertEqual(resume_calls, ['haproxy']) |
3216 | 92 | 98 | ||
3218 | 93 | def test_resume_sets_value(self): | 99 | @patch('actions.hooks.keystone_utils.os_release') |
3219 | 100 | def test_resume_sets_value(self, os_release): | ||
3220 | 94 | """Resume action sets the unit-paused value to False.""" | 101 | """Resume action sets the unit-paused value to False.""" |
3221 | 95 | self.HookData()().return_value = True | 102 | self.HookData()().return_value = True |
3222 | 96 | 103 | ||
3223 | 97 | 104 | ||
3224 | === modified file 'unit_tests/test_actions_git_reinstall.py' | |||
3225 | --- unit_tests/test_actions_git_reinstall.py 2015-10-30 23:30:09 +0000 | |||
3226 | +++ unit_tests/test_actions_git_reinstall.py 2016-03-05 15:43:48 +0000 | |||
3227 | @@ -1,7 +1,8 @@ | |||
3228 | 1 | from mock import patch | 1 | from mock import patch |
3229 | 2 | 2 | ||
3230 | 3 | with patch('hooks.keystone_utils.register_configs') as register_configs: | 3 | with patch('hooks.keystone_utils.register_configs') as register_configs: |
3232 | 4 | import git_reinstall | 4 | with patch('hooks.keystone_utils.os_release') as os_release: |
3233 | 5 | import git_reinstall | ||
3234 | 5 | 6 | ||
3235 | 6 | from test_utils import ( | 7 | from test_utils import ( |
3236 | 7 | CharmTestCase | 8 | CharmTestCase |
3237 | 8 | 9 | ||
3238 | === modified file 'unit_tests/test_actions_openstack_upgrade.py' | |||
3239 | --- unit_tests/test_actions_openstack_upgrade.py 2015-10-19 13:33:33 +0000 | |||
3240 | +++ unit_tests/test_actions_openstack_upgrade.py 2016-03-05 15:43:48 +0000 | |||
3241 | @@ -3,9 +3,12 @@ | |||
3242 | 3 | 3 | ||
3243 | 4 | os.environ['JUJU_UNIT_NAME'] = 'keystone' | 4 | os.environ['JUJU_UNIT_NAME'] = 'keystone' |
3244 | 5 | 5 | ||
3245 | 6 | # with patch('charmhelpers.contrib.openstack.utils.os_release') as os_release: | ||
3246 | 7 | # with patch('keystone_hooks.os_release') as os_release: | ||
3247 | 6 | with patch('keystone_utils.register_configs') as register_configs: | 8 | with patch('keystone_utils.register_configs') as register_configs: |
3250 | 7 | import openstack_upgrade | 9 | with patch('keystone_utils.os_release') as os_release: |
3251 | 8 | import keystone_hooks as hooks | 10 | import openstack_upgrade |
3252 | 11 | import keystone_hooks as hooks | ||
3253 | 9 | 12 | ||
3254 | 10 | from test_utils import ( | 13 | from test_utils import ( |
3255 | 11 | CharmTestCase | 14 | CharmTestCase |
3256 | @@ -23,13 +26,14 @@ | |||
3257 | 23 | super(TestKeystoneUpgradeActions, self).setUp(openstack_upgrade, | 26 | super(TestKeystoneUpgradeActions, self).setUp(openstack_upgrade, |
3258 | 24 | TO_PATCH) | 27 | TO_PATCH) |
3259 | 25 | 28 | ||
3260 | 29 | @patch.object(hooks, 'os_release') | ||
3261 | 26 | @patch.object(hooks, 'register_configs') | 30 | @patch.object(hooks, 'register_configs') |
3262 | 27 | @patch('charmhelpers.contrib.openstack.utils.config') | 31 | @patch('charmhelpers.contrib.openstack.utils.config') |
3263 | 28 | @patch('charmhelpers.contrib.openstack.utils.action_set') | 32 | @patch('charmhelpers.contrib.openstack.utils.action_set') |
3264 | 29 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') | 33 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') |
3265 | 30 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') | 34 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') |
3266 | 31 | def test_openstack_upgrade_true(self, upgrade_avail, git_requested, | 35 | def test_openstack_upgrade_true(self, upgrade_avail, git_requested, |
3268 | 32 | action_set, config, reg_configs): | 36 | action_set, config, reg_configs, os_rel): |
3269 | 33 | git_requested.return_value = False | 37 | git_requested.return_value = False |
3270 | 34 | upgrade_avail.return_value = True | 38 | upgrade_avail.return_value = True |
3271 | 35 | config.return_value = True | 39 | config.return_value = True |
3272 | @@ -40,13 +44,14 @@ | |||
3273 | 40 | self.os.execl.assert_called_with('./hooks/config-changed-postupgrade', | 44 | self.os.execl.assert_called_with('./hooks/config-changed-postupgrade', |
3274 | 41 | '') | 45 | '') |
3275 | 42 | 46 | ||
3276 | 47 | @patch.object(hooks, 'os_release') | ||
3277 | 43 | @patch.object(hooks, 'register_configs') | 48 | @patch.object(hooks, 'register_configs') |
3278 | 44 | @patch('charmhelpers.contrib.openstack.utils.config') | 49 | @patch('charmhelpers.contrib.openstack.utils.config') |
3279 | 45 | @patch('charmhelpers.contrib.openstack.utils.action_set') | 50 | @patch('charmhelpers.contrib.openstack.utils.action_set') |
3280 | 46 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') | 51 | @patch('charmhelpers.contrib.openstack.utils.git_install_requested') |
3281 | 47 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') | 52 | @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') |
3282 | 48 | def test_openstack_upgrade_false(self, upgrade_avail, git_requested, | 53 | def test_openstack_upgrade_false(self, upgrade_avail, git_requested, |
3284 | 49 | action_set, config, reg_configs): | 54 | action_set, config, reg_configs, os_rel): |
3285 | 50 | git_requested.return_value = False | 55 | git_requested.return_value = False |
3286 | 51 | upgrade_avail.return_value = True | 56 | upgrade_avail.return_value = True |
3287 | 52 | config.return_value = False | 57 | config.return_value = False |
3288 | 53 | 58 | ||
3289 | === modified file 'unit_tests/test_keystone_hooks.py' | |||
3290 | --- unit_tests/test_keystone_hooks.py 2016-01-12 11:09:46 +0000 | |||
3291 | +++ unit_tests/test_keystone_hooks.py 2016-03-05 15:43:48 +0000 | |||
3292 | @@ -73,6 +73,7 @@ | |||
3293 | 73 | 'git_install', | 73 | 'git_install', |
3294 | 74 | 'is_service_present', | 74 | 'is_service_present', |
3295 | 75 | 'delete_service_entry', | 75 | 'delete_service_entry', |
3296 | 76 | 'os_release', | ||
3297 | 76 | ] | 77 | ] |
3298 | 77 | 78 | ||
3299 | 78 | 79 | ||
3300 | @@ -83,9 +84,10 @@ | |||
3301 | 83 | self.config.side_effect = self.test_config.get | 84 | self.config.side_effect = self.test_config.get |
3302 | 84 | self.ssh_user = 'juju_keystone' | 85 | self.ssh_user = 'juju_keystone' |
3303 | 85 | 86 | ||
3304 | 87 | @patch.object(utils, 'os_release') | ||
3305 | 86 | @patch.object(utils, 'git_install_requested') | 88 | @patch.object(utils, 'git_install_requested') |
3306 | 87 | @patch.object(unison, 'ensure_user') | 89 | @patch.object(unison, 'ensure_user') |
3308 | 88 | def test_install_hook(self, ensure_user, git_requested): | 90 | def test_install_hook(self, ensure_user, git_requested, os_release): |
3309 | 89 | git_requested.return_value = False | 91 | git_requested.return_value = False |
3310 | 90 | repo = 'cloud:precise-grizzly' | 92 | repo = 'cloud:precise-grizzly' |
3311 | 91 | self.test_config.set('openstack-origin', repo) | 93 | self.test_config.set('openstack-origin', repo) |
3312 | @@ -100,9 +102,10 @@ | |||
3313 | 100 | 'python-six', 'unison', 'uuid'], fatal=True) | 102 | 'python-six', 'unison', 'uuid'], fatal=True) |
3314 | 101 | self.git_install.assert_called_with(None) | 103 | self.git_install.assert_called_with(None) |
3315 | 102 | 104 | ||
3316 | 105 | @patch.object(utils, 'os_release') | ||
3317 | 103 | @patch.object(utils, 'git_install_requested') | 106 | @patch.object(utils, 'git_install_requested') |
3318 | 104 | @patch.object(unison, 'ensure_user') | 107 | @patch.object(unison, 'ensure_user') |
3320 | 105 | def test_install_hook_git(self, ensure_user, git_requested): | 108 | def test_install_hook_git(self, ensure_user, git_requested, os_release): |
3321 | 106 | git_requested.return_value = True | 109 | git_requested.return_value = True |
3322 | 107 | repo = 'cloud:trusty-juno' | 110 | repo = 'cloud:trusty-juno' |
3323 | 108 | openstack_origin_git = { | 111 | openstack_origin_git = { |
3324 | @@ -135,6 +138,7 @@ | |||
3325 | 135 | 138 | ||
3326 | 136 | mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils' | 139 | mod_ch_openstack_utils = 'charmhelpers.contrib.openstack.utils' |
3327 | 137 | 140 | ||
3328 | 141 | @patch.object(utils, 'os_release') | ||
3329 | 138 | @patch.object(hooks, 'config') | 142 | @patch.object(hooks, 'config') |
3330 | 139 | @patch('%s.config' % (mod_ch_openstack_utils)) | 143 | @patch('%s.config' % (mod_ch_openstack_utils)) |
3331 | 140 | @patch('%s.relation_set' % (mod_ch_openstack_utils)) | 144 | @patch('%s.relation_set' % (mod_ch_openstack_utils)) |
3332 | @@ -143,7 +147,7 @@ | |||
3333 | 143 | @patch('%s.sync_db_with_multi_ipv6_addresses' % (mod_ch_openstack_utils)) | 147 | @patch('%s.sync_db_with_multi_ipv6_addresses' % (mod_ch_openstack_utils)) |
3334 | 144 | def test_db_joined(self, mock_sync_db_with_multi, mock_get_ipv6_addr, | 148 | def test_db_joined(self, mock_sync_db_with_multi, mock_get_ipv6_addr, |
3335 | 145 | mock_relation_ids, mock_relation_set, mock_config, | 149 | mock_relation_ids, mock_relation_set, mock_config, |
3337 | 146 | mock_hooks_config): | 150 | mock_hooks_config, os_release): |
3338 | 147 | 151 | ||
3339 | 148 | cfg_dict = {'prefer-ipv6': False, | 152 | cfg_dict = {'prefer-ipv6': False, |
3340 | 149 | 'database': 'keystone', | 153 | 'database': 'keystone', |
3341 | @@ -317,6 +321,7 @@ | |||
3342 | 317 | mock_ensure_ssl_cert_master, mock_log, | 321 | mock_ensure_ssl_cert_master, mock_log, |
3343 | 318 | mock_peer_store, mock_peer_retrieve, | 322 | mock_peer_store, mock_peer_retrieve, |
3344 | 319 | mock_relation_ids): | 323 | mock_relation_ids): |
3345 | 324 | self.os_release.return_value = 'kilo' | ||
3346 | 320 | mock_relation_ids.return_value = ['peer/0'] | 325 | mock_relation_ids.return_value = ['peer/0'] |
3347 | 321 | 326 | ||
3348 | 322 | peer_settings = {} | 327 | peer_settings = {} |
3349 | @@ -907,6 +912,7 @@ | |||
3350 | 907 | cmd = ['a2dissite', 'openstack_https_frontend'] | 912 | cmd = ['a2dissite', 'openstack_https_frontend'] |
3351 | 908 | self.check_call.assert_called_with(cmd) | 913 | self.check_call.assert_called_with(cmd) |
3352 | 909 | 914 | ||
3353 | 915 | @patch.object(utils, 'os_release') | ||
3354 | 910 | @patch.object(utils, 'git_install_requested') | 916 | @patch.object(utils, 'git_install_requested') |
3355 | 911 | @patch.object(hooks, 'is_db_ready') | 917 | @patch.object(hooks, 'is_db_ready') |
3356 | 912 | @patch.object(hooks, 'is_db_initialised') | 918 | @patch.object(hooks, 'is_db_initialised') |
3357 | @@ -926,7 +932,8 @@ | |||
3358 | 926 | mock_log, | 932 | mock_log, |
3359 | 927 | mock_is_db_initialised, | 933 | mock_is_db_initialised, |
3360 | 928 | mock_is_db_ready, | 934 | mock_is_db_ready, |
3362 | 929 | git_requested): | 935 | git_requested, |
3363 | 936 | os_release): | ||
3364 | 930 | mock_is_db_initialised.return_value = True | 937 | mock_is_db_initialised.return_value = True |
3365 | 931 | mock_is_db_ready.return_value = True | 938 | mock_is_db_ready.return_value = True |
3366 | 932 | mock_is_elected_leader.return_value = False | 939 | mock_is_elected_leader.return_value = False |
3367 | @@ -949,6 +956,7 @@ | |||
3368 | 949 | 'Firing identity_changed hook for all related services.') | 956 | 'Firing identity_changed hook for all related services.') |
3369 | 950 | self.assertTrue(self.ensure_initial_admin.called) | 957 | self.assertTrue(self.ensure_initial_admin.called) |
3370 | 951 | 958 | ||
3371 | 959 | @patch.object(utils, 'os_release') | ||
3372 | 952 | @patch.object(utils, 'git_install_requested') | 960 | @patch.object(utils, 'git_install_requested') |
3373 | 953 | @patch('keystone_utils.log') | 961 | @patch('keystone_utils.log') |
3374 | 954 | @patch('keystone_utils.relation_ids') | 962 | @patch('keystone_utils.relation_ids') |
3375 | @@ -959,7 +967,8 @@ | |||
3376 | 959 | mock_update_hash_from_path, | 967 | mock_update_hash_from_path, |
3377 | 960 | mock_ensure_ssl_cert_master, | 968 | mock_ensure_ssl_cert_master, |
3378 | 961 | mock_relation_ids, | 969 | mock_relation_ids, |
3380 | 962 | mock_log, git_requested): | 970 | mock_log, git_requested, |
3381 | 971 | os_release): | ||
3382 | 963 | mock_relation_ids.return_value = [] | 972 | mock_relation_ids.return_value = [] |
3383 | 964 | mock_ensure_ssl_cert_master.return_value = False | 973 | mock_ensure_ssl_cert_master.return_value = False |
3384 | 965 | # Ensure always returns diff | 974 | # Ensure always returns diff |
3385 | 966 | 975 | ||
3386 | === modified file 'unit_tests/test_keystone_utils.py' | |||
3387 | --- unit_tests/test_keystone_utils.py 2016-02-19 14:49:59 +0000 | |||
3388 | +++ unit_tests/test_keystone_utils.py 2016-03-05 15:43:48 +0000 | |||
3389 | @@ -1,7 +1,6 @@ | |||
3390 | 1 | from mock import patch, call, MagicMock, Mock | 1 | from mock import patch, call, MagicMock, Mock |
3391 | 2 | from test_utils import CharmTestCase | 2 | from test_utils import CharmTestCase |
3392 | 3 | import os | 3 | import os |
3393 | 4 | import manager | ||
3394 | 5 | 4 | ||
3395 | 6 | os.environ['JUJU_UNIT_NAME'] = 'keystone' | 5 | os.environ['JUJU_UNIT_NAME'] = 'keystone' |
3396 | 7 | with patch('charmhelpers.core.hookenv.config') as config: | 6 | with patch('charmhelpers.core.hookenv.config') as config: |
3397 | @@ -172,10 +171,11 @@ | |||
3398 | 172 | self.subprocess.check_output.assert_called_with(cmd) | 171 | self.subprocess.check_output.assert_called_with(cmd) |
3399 | 173 | self.service_start.assert_called_with('keystone') | 172 | self.service_start.assert_called_with('keystone') |
3400 | 174 | 173 | ||
3401 | 174 | @patch.object(utils, 'get_manager') | ||
3402 | 175 | @patch.object(utils, 'resolve_address') | 175 | @patch.object(utils, 'resolve_address') |
3403 | 176 | @patch.object(utils, 'b64encode') | 176 | @patch.object(utils, 'b64encode') |
3404 | 177 | def test_add_service_to_keystone_clustered_https_none_values( | 177 | def test_add_service_to_keystone_clustered_https_none_values( |
3406 | 178 | self, b64encode, _resolve_address): | 178 | self, b64encode, _resolve_address, _get_manager): |
3407 | 179 | relation_id = 'identity-service:0' | 179 | relation_id = 'identity-service:0' |
3408 | 180 | remote_unit = 'unit/0' | 180 | remote_unit = 'unit/0' |
3409 | 181 | _resolve_address.return_value = '10.10.10.10' | 181 | _resolve_address.return_value = '10.10.10.10' |
3410 | @@ -214,7 +214,7 @@ | |||
3411 | 214 | @patch.object(utils, 'resolve_address') | 214 | @patch.object(utils, 'resolve_address') |
3412 | 215 | @patch.object(utils, 'ensure_valid_service') | 215 | @patch.object(utils, 'ensure_valid_service') |
3413 | 216 | @patch.object(utils, 'add_endpoint') | 216 | @patch.object(utils, 'add_endpoint') |
3415 | 217 | @patch.object(manager, 'KeystoneManager') | 217 | @patch.object(utils, 'get_manager') |
3416 | 218 | def test_add_service_to_keystone_no_clustered_no_https_complete_values( | 218 | def test_add_service_to_keystone_no_clustered_no_https_complete_values( |
3417 | 219 | self, KeystoneManager, add_endpoint, ensure_valid_service, | 219 | self, KeystoneManager, add_endpoint, ensure_valid_service, |
3418 | 220 | _resolve_address): | 220 | _resolve_address): |
3419 | @@ -253,9 +253,12 @@ | |||
3420 | 253 | internalurl='192.168.1.2') | 253 | internalurl='192.168.1.2') |
3421 | 254 | self.assertTrue(self.get_admin_token.called) | 254 | self.assertTrue(self.get_admin_token.called) |
3422 | 255 | self.get_service_password.assert_called_with('keystone') | 255 | self.get_service_password.assert_called_with('keystone') |
3426 | 256 | self.create_user.assert_called_with('keystone', 'password', 'tenant') | 256 | self.create_user.assert_called_with('keystone', 'password', 'tenant', |
3427 | 257 | self.grant_role.assert_called_with('keystone', 'admin', 'tenant') | 257 | None) |
3428 | 258 | self.create_role.assert_called_with('role1', 'keystone', 'tenant') | 258 | self.grant_role.assert_called_with('keystone', 'Admin', 'tenant', |
3429 | 259 | 'default') | ||
3430 | 260 | self.create_role.assert_called_with('role1', 'keystone', 'tenant', | ||
3431 | 261 | None) | ||
3432 | 259 | 262 | ||
3433 | 260 | relation_data = {'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', | 263 | relation_data = {'auth_host': '10.0.0.3', 'service_host': '10.0.0.3', |
3434 | 261 | 'admin_token': 'token', 'service_port': 81, | 264 | 'admin_token': 'token', 'service_port': 81, |
3435 | @@ -266,7 +269,7 @@ | |||
3436 | 266 | 'ssl_cert': '__null__', 'ssl_key': '__null__', | 269 | 'ssl_cert': '__null__', 'ssl_key': '__null__', |
3437 | 267 | 'ca_cert': '__null__', | 270 | 'ca_cert': '__null__', |
3438 | 268 | 'auth_protocol': 'http', 'service_protocol': 'http', | 271 | 'auth_protocol': 'http', 'service_protocol': 'http', |
3440 | 269 | 'service_tenant_id': 'tenant_id'} | 272 | 'service_tenant_id': 'tenant_id', 'api_version': 2} |
3441 | 270 | 273 | ||
3442 | 271 | filtered = {} | 274 | filtered = {} |
3443 | 272 | for k, v in relation_data.iteritems(): | 275 | for k, v in relation_data.iteritems(): |
3444 | @@ -284,7 +287,7 @@ | |||
3445 | 284 | @patch('charmhelpers.contrib.openstack.ip.config') | 287 | @patch('charmhelpers.contrib.openstack.ip.config') |
3446 | 285 | @patch.object(utils, 'ensure_valid_service') | 288 | @patch.object(utils, 'ensure_valid_service') |
3447 | 286 | @patch.object(utils, 'add_endpoint') | 289 | @patch.object(utils, 'add_endpoint') |
3449 | 287 | @patch.object(manager, 'KeystoneManager') | 290 | @patch.object(utils, 'get_manager') |
3450 | 288 | def test_add_service_to_keystone_nosubset( | 291 | def test_add_service_to_keystone_nosubset( |
3451 | 289 | self, KeystoneManager, add_endpoint, ensure_valid_service, | 292 | self, KeystoneManager, add_endpoint, ensure_valid_service, |
3452 | 290 | ip_config): | 293 | ip_config): |
3453 | @@ -317,8 +320,9 @@ | |||
3454 | 317 | mock_grant_role, | 320 | mock_grant_role, |
3455 | 318 | mock_user_exists): | 321 | mock_user_exists): |
3456 | 319 | mock_user_exists.return_value = False | 322 | mock_user_exists.return_value = False |
3459 | 320 | utils.create_user_credentials('userA', 'tenantA', 'passA') | 323 | utils.create_user_credentials('userA', 'passA', tenant='tenantA') |
3460 | 321 | mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) | 324 | mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', |
3461 | 325 | None)]) | ||
3462 | 322 | mock_create_role.assert_has_calls([]) | 326 | mock_create_role.assert_has_calls([]) |
3463 | 323 | mock_grant_role.assert_has_calls([]) | 327 | mock_grant_role.assert_has_calls([]) |
3464 | 324 | 328 | ||
3465 | @@ -329,11 +333,14 @@ | |||
3466 | 329 | def test_create_user_credentials(self, mock_create_user, mock_create_role, | 333 | def test_create_user_credentials(self, mock_create_user, mock_create_role, |
3467 | 330 | mock_grant_role, mock_user_exists): | 334 | mock_grant_role, mock_user_exists): |
3468 | 331 | mock_user_exists.return_value = False | 335 | mock_user_exists.return_value = False |
3470 | 332 | utils.create_user_credentials('userA', 'tenantA', 'passA', | 336 | utils.create_user_credentials('userA', 'passA', tenant='tenantA', |
3471 | 333 | grants=['roleA'], new_roles=['roleB']) | 337 | grants=['roleA'], new_roles=['roleB']) |
3475 | 334 | mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA')]) | 338 | mock_create_user.assert_has_calls([call('userA', 'passA', 'tenantA', |
3476 | 335 | mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) | 339 | None)]) |
3477 | 336 | mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) | 340 | mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', |
3478 | 341 | None)]) | ||
3479 | 342 | mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', | ||
3480 | 343 | None)]) | ||
3481 | 337 | 344 | ||
3482 | 338 | @patch.object(utils, 'update_user_password') | 345 | @patch.object(utils, 'update_user_password') |
3483 | 339 | @patch.object(utils, 'user_exists') | 346 | @patch.object(utils, 'user_exists') |
3484 | @@ -346,11 +353,13 @@ | |||
3485 | 346 | mock_user_exists, | 353 | mock_user_exists, |
3486 | 347 | mock_update_user_password): | 354 | mock_update_user_password): |
3487 | 348 | mock_user_exists.return_value = True | 355 | mock_user_exists.return_value = True |
3489 | 349 | utils.create_user_credentials('userA', 'tenantA', 'passA', | 356 | utils.create_user_credentials('userA', 'passA', tenant='tenantA', |
3490 | 350 | grants=['roleA'], new_roles=['roleB']) | 357 | grants=['roleA'], new_roles=['roleB']) |
3491 | 351 | mock_create_user.assert_has_calls([]) | 358 | mock_create_user.assert_has_calls([]) |
3494 | 352 | mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA')]) | 359 | mock_create_role.assert_has_calls([call('roleB', 'userA', 'tenantA', |
3495 | 353 | mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA')]) | 360 | None)]) |
3496 | 361 | mock_grant_role.assert_has_calls([call('userA', 'roleA', 'tenantA', | ||
3497 | 362 | None)]) | ||
3498 | 354 | mock_update_user_password.assert_has_calls([call('userA', 'passA')]) | 363 | mock_update_user_password.assert_has_calls([call('userA', 'passA')]) |
3499 | 355 | 364 | ||
3500 | 356 | @patch.object(utils, 'get_service_password') | 365 | @patch.object(utils, 'get_service_password') |
3501 | @@ -358,10 +367,12 @@ | |||
3502 | 358 | def test_create_service_credentials(self, mock_create_user_credentials, | 367 | def test_create_service_credentials(self, mock_create_user_credentials, |
3503 | 359 | mock_get_service_password): | 368 | mock_get_service_password): |
3504 | 360 | mock_get_service_password.return_value = 'passA' | 369 | mock_get_service_password.return_value = 'passA' |
3506 | 361 | cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin'} | 370 | cfg = {'service-tenant': 'tenantA', 'admin-role': 'Admin', |
3507 | 371 | 'preferred-api-version': 2} | ||
3508 | 362 | self.config.side_effect = lambda key: cfg.get(key, None) | 372 | self.config.side_effect = lambda key: cfg.get(key, None) |
3511 | 363 | calls = [call('serviceA', 'tenantA', 'passA', grants=['Admin'], | 373 | calls = [call('serviceA', 'passA', domain=None, grants=['Admin'], |
3512 | 364 | new_roles=None)] | 374 | new_roles=None, tenant='tenantA')] |
3513 | 375 | |||
3514 | 365 | utils.create_service_credentials('serviceA') | 376 | utils.create_service_credentials('serviceA') |
3515 | 366 | mock_create_user_credentials.assert_has_calls(calls) | 377 | mock_create_user_credentials.assert_has_calls(calls) |
3516 | 367 | 378 | ||
3517 | @@ -594,7 +605,8 @@ | |||
3518 | 594 | internal_ip='10.0.0.1', | 605 | internal_ip='10.0.0.1', |
3519 | 595 | admin_ip='10.0.0.1', | 606 | admin_ip='10.0.0.1', |
3520 | 596 | auth_port=35357, | 607 | auth_port=35357, |
3522 | 597 | region='RegionOne' | 608 | region='RegionOne', |
3523 | 609 | api_version=2, | ||
3524 | 598 | ) | 610 | ) |
3525 | 599 | 611 | ||
3526 | 600 | @patch.object(utils, 'peer_units') | 612 | @patch.object(utils, 'peer_units') |
3527 | @@ -704,21 +716,21 @@ | |||
3528 | 704 | self.assertEquals(render.call_args_list, expected) | 716 | self.assertEquals(render.call_args_list, expected) |
3529 | 705 | service_restart.assert_called_with('keystone') | 717 | service_restart.assert_called_with('keystone') |
3530 | 706 | 718 | ||
3532 | 707 | @patch.object(manager, 'KeystoneManager') | 719 | @patch.object(utils, 'get_manager') |
3533 | 708 | def test_is_service_present(self, KeystoneManager): | 720 | def test_is_service_present(self, KeystoneManager): |
3534 | 709 | mock_keystone = MagicMock() | 721 | mock_keystone = MagicMock() |
3535 | 710 | mock_keystone.resolve_service_id.return_value = 'sid1' | 722 | mock_keystone.resolve_service_id.return_value = 'sid1' |
3536 | 711 | KeystoneManager.return_value = mock_keystone | 723 | KeystoneManager.return_value = mock_keystone |
3537 | 712 | self.assertTrue(utils.is_service_present('bob', 'bill')) | 724 | self.assertTrue(utils.is_service_present('bob', 'bill')) |
3538 | 713 | 725 | ||
3540 | 714 | @patch.object(manager, 'KeystoneManager') | 726 | @patch.object(utils, 'get_manager') |
3541 | 715 | def test_is_service_present_false(self, KeystoneManager): | 727 | def test_is_service_present_false(self, KeystoneManager): |
3542 | 716 | mock_keystone = MagicMock() | 728 | mock_keystone = MagicMock() |
3543 | 717 | mock_keystone.resolve_service_id.return_value = None | 729 | mock_keystone.resolve_service_id.return_value = None |
3544 | 718 | KeystoneManager.return_value = mock_keystone | 730 | KeystoneManager.return_value = mock_keystone |
3545 | 719 | self.assertFalse(utils.is_service_present('bob', 'bill')) | 731 | self.assertFalse(utils.is_service_present('bob', 'bill')) |
3546 | 720 | 732 | ||
3548 | 721 | @patch.object(manager, 'KeystoneManager') | 733 | @patch.object(utils, 'get_manager') |
3549 | 722 | def test_delete_service_entry(self, KeystoneManager): | 734 | def test_delete_service_entry(self, KeystoneManager): |
3550 | 723 | mock_keystone = MagicMock() | 735 | mock_keystone = MagicMock() |
3551 | 724 | mock_keystone.resolve_service_id.return_value = 'sid1' | 736 | mock_keystone.resolve_service_id.return_value = 'sid1' |