Merge lp:~james-page/charms/precise/keystone/aggressive-refactor into lp:~openstack-charmers/charms/precise/keystone/ha-support
- Precise Pangolin (12.04)
- aggressive-refactor
- Merge into ha-support
Proposed by
James Page
Status: | Merged |
---|---|
Merged at revision: | 58 |
Proposed branch: | lp:~james-page/charms/precise/keystone/aggressive-refactor |
Merge into: | lp:~openstack-charmers/charms/precise/keystone/ha-support |
Diff against target: |
2364 lines (+1121/-423) 11 files modified
hooks/keystone_hooks.py (+200/-160) hooks/keystone_ssl.py (+18/-15) hooks/keystone_utils.py (+154/-220) hooks/lib/apache_utils.py (+196/-0) hooks/lib/cluster_utils.py (+130/-0) hooks/lib/haproxy_utils.py (+55/-0) hooks/lib/openstack_common.py (+8/-3) hooks/lib/unison.py (+24/-19) hooks/lib/utils.py (+329/-0) hooks/manager.py (+1/-0) templates/haproxy.cfg (+6/-6) |
To merge this branch: | bzr merge lp:~james-page/charms/precise/keystone/aggressive-refactor |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email: mp+154093@code.launchpad.net |
Commit message
Description of the change
Refactoring to use shared charm helpers where possible.
Service/Admin port now fixed with haproxy + api port pipelining configuration
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified symlink 'hooks/cluster-relation-changed' |
2 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
3 | === modified symlink 'hooks/cluster-relation-departed' |
4 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
5 | === modified symlink 'hooks/cluster-relation-joined' |
6 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
7 | === modified symlink 'hooks/config-changed' |
8 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
9 | === modified symlink 'hooks/ha-relation-changed' |
10 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
11 | === modified symlink 'hooks/ha-relation-joined' |
12 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
13 | === modified symlink 'hooks/identity-service-relation-changed' |
14 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
15 | === modified symlink 'hooks/identity-service-relation-joined' |
16 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
17 | === modified symlink 'hooks/install' |
18 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
19 | === renamed file 'hooks/keystone-hooks' => 'hooks/keystone_hooks.py' |
20 | --- hooks/keystone-hooks 2013-03-11 09:10:47 +0000 |
21 | +++ hooks/keystone_hooks.py 2013-03-19 14:13:27 +0000 |
22 | @@ -1,19 +1,51 @@ |
23 | #!/usr/bin/python |
24 | |
25 | -import sys |
26 | import time |
27 | import urlparse |
28 | |
29 | from base64 import b64encode |
30 | |
31 | -from utils import * |
32 | +from keystone_utils import ( |
33 | + config_get, |
34 | + execute, |
35 | + update_config_block, |
36 | + set_admin_token, |
37 | + ensure_initial_admin, |
38 | + create_service_entry, |
39 | + create_endpoint_template, |
40 | + create_role, |
41 | + get_admin_token, |
42 | + get_service_password, |
43 | + create_user, |
44 | + grant_role, |
45 | + get_ca, |
46 | + synchronize_service_credentials, |
47 | + do_openstack_upgrade, |
48 | + configure_pki_tokens, |
49 | + SSH_USER, |
50 | + SSL_DIR, |
51 | + CLUSTER_RES, |
52 | + https |
53 | + ) |
54 | |
55 | -from lib.openstack_common import * |
56 | +from lib.openstack_common import ( |
57 | + get_os_codename_install_source, |
58 | + get_os_codename_package, |
59 | + get_os_version_codename, |
60 | + get_os_version_package, |
61 | + save_script_rc |
62 | + ) |
63 | import lib.unison as unison |
64 | +import lib.utils as utils |
65 | +import lib.cluster_utils as cluster |
66 | +import lib.haproxy_utils as haproxy |
67 | |
68 | config = config_get() |
69 | |
70 | -packages = "keystone python-mysqldb pwgen haproxy python-jinja2 openssl unison" |
71 | +packages = [ |
72 | + "keystone", "python-mysqldb", "pwgen", |
73 | + "haproxy", "python-jinja2", "openssl", "unison" |
74 | + ] |
75 | service = "keystone" |
76 | |
77 | # used to verify joined services are valid openstack components. |
78 | @@ -62,13 +94,14 @@ |
79 | } |
80 | } |
81 | |
82 | + |
83 | def install_hook(): |
84 | - if config["openstack-origin"] != "distro": |
85 | - configure_installation_source(config["openstack-origin"]) |
86 | - execute("apt-get update", die=True) |
87 | - execute("apt-get -y install %s" % packages, die=True, echo=True) |
88 | - update_config_block('DEFAULT', public_port=config["service-port"]) |
89 | - update_config_block('DEFAULT', admin_port=config["admin-port"]) |
90 | + utils.configure_source() |
91 | + utils.install(*packages) |
92 | + update_config_block('DEFAULT', |
93 | + public_port=cluster.determine_api_port(config["service-port"])) |
94 | + update_config_block('DEFAULT', |
95 | + admin_port=cluster.determine_api_port(config["admin-port"])) |
96 | set_admin_token(config['admin-token']) |
97 | |
98 | # set all backends to use sql+sqlite, if they are not already by default |
99 | @@ -83,9 +116,9 @@ |
100 | update_config_block('ec2', |
101 | driver='keystone.contrib.ec2.backends.sql.Ec2') |
102 | |
103 | - execute("service keystone stop", echo=True) |
104 | + utils.stop('keystone') |
105 | execute("keystone-manage db_sync") |
106 | - execute("service keystone start", echo=True) |
107 | + utils.start('keystone') |
108 | |
109 | # ensure /var/lib/keystone is g+wrx for peer relations that |
110 | # may be syncing data there via SSH_USER. |
111 | @@ -96,17 +129,21 @@ |
112 | |
113 | |
114 | def db_joined(): |
115 | - relation_data = { "database": config["database"], |
116 | - "username": config["database-user"], |
117 | - "hostname": config["hostname"] } |
118 | - relation_set(relation_data) |
119 | + relation_data = { |
120 | + "database": config["database"], |
121 | + "username": config["database-user"], |
122 | + "hostname": config["hostname"] |
123 | + } |
124 | + utils.relation_set(**relation_data) |
125 | + |
126 | |
127 | def db_changed(): |
128 | - relation_data = relation_get_dict() |
129 | + relation_data = utils.relation_get_dict() |
130 | if ('password' not in relation_data or |
131 | 'db_host' not in relation_data): |
132 | - juju_log("db_host or password not set. Peer not ready, exit 0") |
133 | - exit(0) |
134 | + utils.juju_log('INFO', |
135 | + "db_host or password not set. Peer not ready, exit 0") |
136 | + return |
137 | |
138 | update_config_block('sql', connection="mysql://%s:%s@%s/%s" % |
139 | (config["database-user"], |
140 | @@ -114,80 +151,71 @@ |
141 | relation_data["db_host"], |
142 | config["database"])) |
143 | |
144 | - execute("service keystone stop", echo=True) |
145 | - |
146 | - if not eligible_leader(): |
147 | - juju_log('Deferring DB initialization to service leader.') |
148 | - execute("service keystone start") |
149 | - return |
150 | - |
151 | - execute("keystone-manage db_sync", echo=True) |
152 | - execute("service keystone start") |
153 | + utils.stop('keystone') |
154 | + if cluster.eligible_leader(CLUSTER_RES): |
155 | + utils.juju_log('INFO', |
156 | + 'Cluster leader, performing db-sync') |
157 | + execute("keystone-manage db_sync", echo=True) |
158 | + utils.start('keystone') |
159 | time.sleep(5) |
160 | - ensure_initial_admin(config) |
161 | - |
162 | - # If the backend database has been switched to something new and there |
163 | - # are existing identity-service relations,, service entries need to be |
164 | - # recreated in the new database. Re-executing identity-service-changed |
165 | - # will do this. |
166 | - for id in relation_ids(relation_name='identity-service'): |
167 | - for unit in relation_list(relation_id=id): |
168 | - juju_log("Re-exec'ing identity-service-changed for: %s - %s" % |
169 | - (id, unit)) |
170 | - identity_changed(relation_id=id, remote_unit=unit) |
171 | + |
172 | + if cluster.eligible_leader(CLUSTER_RES): |
173 | + ensure_initial_admin(config) |
174 | + # If the backend database has been switched to something new and there |
175 | + # are existing identity-service relations,, service entries need to be |
176 | + # recreated in the new database. Re-executing identity-service-changed |
177 | + # will do this. |
178 | + for rid in utils.relation_ids('identity-service'): |
179 | + for unit in utils.relation_list(rid=rid): |
180 | + utils.juju_log('INFO', |
181 | + "Re-exec'ing identity-service-changed" |
182 | + " for: %s - %s" % (rid, unit)) |
183 | + identity_changed(relation_id=rid, remote_unit=unit) |
184 | + |
185 | |
186 | def ensure_valid_service(service): |
187 | if service not in valid_services.keys(): |
188 | - juju_log("WARN: Invalid service requested: '%s'" % service) |
189 | - realtion_set({ "admin_token": -1 }) |
190 | + utils.juju_log('WARNING', |
191 | + "Invalid service requested: '%s'" % service) |
192 | + utils.relation_set(admin_token=-1) |
193 | return |
194 | |
195 | -def add_endpoint(region, service, public_url, admin_url, internal_url): |
196 | + |
197 | +def add_endpoint(region, service, publicurl, adminurl, internalurl): |
198 | desc = valid_services[service]["desc"] |
199 | service_type = valid_services[service]["type"] |
200 | create_service_entry(service, service_type, desc) |
201 | create_endpoint_template(region=region, service=service, |
202 | - public_url=public_url, |
203 | - admin_url=admin_url, |
204 | - internal_url=internal_url) |
205 | + publicurl=publicurl, |
206 | + adminurl=adminurl, |
207 | + internalurl=internalurl) |
208 | + |
209 | |
210 | def identity_joined(): |
211 | """ Do nothing until we get information about requested service """ |
212 | pass |
213 | |
214 | + |
215 | def identity_changed(relation_id=None, remote_unit=None): |
216 | """ A service has advertised its API endpoints, create an entry in the |
217 | service catalog. |
218 | Optionally allow this hook to be re-fired for an existing |
219 | relation+unit, for context see see db_changed(). |
220 | """ |
221 | - def ensure_valid_service(service): |
222 | - if service not in valid_services.keys(): |
223 | - juju_log("WARN: Invalid service requested: '%s'" % service) |
224 | - realtion_set({ "admin_token": -1 }) |
225 | - return |
226 | - |
227 | - def add_endpoint(region, service, publicurl, adminurl, internalurl): |
228 | - desc = valid_services[service]["desc"] |
229 | - service_type = valid_services[service]["type"] |
230 | - create_service_entry(service, service_type, desc) |
231 | - create_endpoint_template(region=region, service=service, |
232 | - publicurl=publicurl, |
233 | - adminurl=adminurl, |
234 | - internalurl=internalurl) |
235 | - |
236 | - if not eligible_leader(): |
237 | - juju_log('Deferring identity_changed() to service leader.') |
238 | + if not cluster.eligible_leader(CLUSTER_RES): |
239 | + utils.juju_log('INFO', |
240 | + 'Deferring identity_changed() to service leader.') |
241 | return |
242 | |
243 | - settings = relation_get_dict(relation_id=relation_id, |
244 | - remote_unit=remote_unit) |
245 | + settings = utils.relation_get_dict(relation_id=relation_id, |
246 | + remote_unit=remote_unit) |
247 | |
248 | # Allow the remote service to request creation of any additional roles. |
249 | # Currently used by Swift. |
250 | if 'requested_roles' in settings and settings['requested_roles'] != 'None': |
251 | roles = settings['requested_roles'].split(',') |
252 | - juju_log("Creating requested roles: %s" % roles) |
253 | + utils.juju_log('INFO', |
254 | + "Creating requested roles: %s" % roles) |
255 | for role in roles: |
256 | create_role(role, user=config['admin-user'], tenant='admin') |
257 | |
258 | @@ -196,25 +224,29 @@ |
259 | 'internal_url']) |
260 | if single.issubset(settings): |
261 | # other end of relation advertised only one endpoint |
262 | - if 'None' in [v for k,v in settings.iteritems()]: |
263 | + if 'None' in [v for k, v in settings.iteritems()]: |
264 | # Some backend services advertise no endpoint but require a |
265 | # hook execution to update auth strategy. |
266 | relation_data = {} |
267 | # Check if clustered and use vip + haproxy ports if so |
268 | - if is_clustered(): |
269 | + if cluster.is_clustered(): |
270 | relation_data["auth_host"] = config['vip'] |
271 | - relation_data["auth_port"] = SERVICE_PORTS['keystone_admin'] |
272 | relation_data["service_host"] = config['vip'] |
273 | - relation_data["service_port"] = SERVICE_PORTS['keystone_service'] |
274 | else: |
275 | relation_data["auth_host"] = config['hostname'] |
276 | - relation_data["auth_port"] = config['admin-port'] |
277 | relation_data["service_host"] = config['hostname'] |
278 | - relation_data["service_port"] = config['service-port'] |
279 | - relation_set(relation_data) |
280 | + relation_data["auth_port"] = config['admin-port'] |
281 | + relation_data["service_port"] = config['service-port'] |
282 | + if config['https-service-endpoints'] in ['True', 'true']: |
283 | + # Pass CA cert as client will need it to |
284 | + # verify https connections |
285 | + ca = get_ca(user=SSH_USER) |
286 | + ca_bundle = ca.get_ca_bundle() |
287 | + relation_data['https_keystone'] = 'True' |
288 | + relation_data['ca_cert'] = b64encode(ca_bundle) |
289 | + utils.relation_set(**relation_data) |
290 | return |
291 | |
292 | - |
293 | ensure_valid_service(settings['service']) |
294 | |
295 | add_endpoint(region=settings['region'], service=settings['service'], |
296 | @@ -242,7 +274,7 @@ |
297 | # } |
298 | # } |
299 | endpoints = {} |
300 | - for k,v in settings.iteritems(): |
301 | + for k, v in settings.iteritems(): |
302 | ep = k.split('_')[0] |
303 | x = '_'.join(k.split('_')[1:]) |
304 | if ep not in endpoints: |
305 | @@ -267,18 +299,20 @@ |
306 | https_cn = https_cn.hostname |
307 | service_username = '_'.join(services) |
308 | |
309 | - if 'None' in [v for k,v in settings.iteritems()]: |
310 | + if 'None' in [v for k, v in settings.iteritems()]: |
311 | return |
312 | |
313 | if not service_username: |
314 | return |
315 | |
316 | token = get_admin_token() |
317 | - juju_log("Creating service credentials for '%s'" % service_username) |
318 | + utils.juju_log('INFO', |
319 | + "Creating service credentials for '%s'" % service_username) |
320 | |
321 | service_password = get_service_password(service_username) |
322 | create_user(service_username, service_password, config['service-tenant']) |
323 | - grant_role(service_username, config['admin-role'], config['service-tenant']) |
324 | + grant_role(service_username, config['admin-role'], |
325 | + config['service-tenant']) |
326 | |
327 | # As of https://review.openstack.org/#change,4675, all nodes hosting |
328 | # an endpoint(s) needs a service username and password assigned to |
329 | @@ -305,27 +339,25 @@ |
330 | relation_data['rid'] = relation_id |
331 | |
332 | # Check if clustered and use vip + haproxy ports if so |
333 | - if is_clustered(): |
334 | + if cluster.is_clustered(): |
335 | relation_data["auth_host"] = config['vip'] |
336 | - relation_data["auth_port"] = SERVICE_PORTS['keystone_admin'] |
337 | relation_data["service_host"] = config['vip'] |
338 | - relation_data["service_port"] = SERVICE_PORTS['keystone_service'] |
339 | |
340 | # generate or get a new cert/key for service if set to manage certs. |
341 | if config['https-service-endpoints'] in ['True', 'true']: |
342 | ca = get_ca(user=SSH_USER) |
343 | - service = os.getenv('JUJU_REMOTE_UNIT').split('/')[0] |
344 | cert, key = ca.get_cert_and_key(common_name=https_cn) |
345 | - ca_bundle= ca.get_ca_bundle() |
346 | + ca_bundle = ca.get_ca_bundle() |
347 | relation_data['ssl_cert'] = b64encode(cert) |
348 | relation_data['ssl_key'] = b64encode(key) |
349 | relation_data['ca_cert'] = b64encode(ca_bundle) |
350 | relation_data['https_keystone'] = 'True' |
351 | unison.sync_to_peers(peer_interface='cluster', |
352 | paths=[SSL_DIR], user=SSH_USER, verbose=True) |
353 | - relation_set_2(**relation_data) |
354 | + utils.relation_set(**relation_data) |
355 | synchronize_service_credentials() |
356 | |
357 | + |
358 | def config_changed(): |
359 | |
360 | # Determine whether or not we should do an upgrade, based on the |
361 | @@ -334,8 +366,10 @@ |
362 | installed = get_os_codename_package('keystone') |
363 | |
364 | if (available and |
365 | - get_os_version_codename(available) > get_os_version_codename(installed)): |
366 | - do_openstack_upgrade(config['openstack-origin'], packages) |
367 | + get_os_version_codename(available) > \ |
368 | + get_os_version_codename(installed)): |
369 | + # TODO: fixup this call to work like utils.install() |
370 | + do_openstack_upgrade(config['openstack-origin'], ' '.join(packages)) |
371 | |
372 | env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone', |
373 | 'OPENSTACK_PORT_ADMIN': config['admin-port'], |
374 | @@ -344,8 +378,10 @@ |
375 | |
376 | set_admin_token(config['admin-token']) |
377 | |
378 | - if eligible_leader(): |
379 | - juju_log('Cluster leader - ensuring endpoint configuration is up to date') |
380 | + if cluster.eligible_leader(CLUSTER_RES): |
381 | + utils.juju_log('INFO', |
382 | + 'Cluster leader - ensuring endpoint configuration' |
383 | + ' is up to date') |
384 | ensure_initial_admin(config) |
385 | |
386 | update_config_block('logger_root', level=config['log-level'], |
387 | @@ -354,69 +390,86 @@ |
388 | # PKI introduced in Grizzly |
389 | configure_pki_tokens(config) |
390 | |
391 | - execute("service keystone restart", echo=True) |
392 | - cluster_changed() |
393 | + utils.restart('keystone') |
394 | + |
395 | + if cluster.eligible_leader(CLUSTER_RES): |
396 | + utils.juju_log('INFO', |
397 | + 'Firing identity_changed hook' |
398 | + ' for all related services.') |
399 | + # HTTPS may have been set - so fire all identity relations |
400 | + # again |
401 | + for r_id in utils.relation_ids('identity-service'): |
402 | + for unit in utils.relation_list(r_id): |
403 | + identity_changed(relation_id=r_id, |
404 | + remote_unit=unit) |
405 | |
406 | |
407 | def upgrade_charm(): |
408 | cluster_changed() |
409 | - if eligible_leader(): |
410 | - juju_log('Cluster leader - ensuring endpoint configuration is up to date') |
411 | + if cluster.eligible_leader(CLUSTER_RES): |
412 | + utils.juju_log('INFO', |
413 | + 'Cluster leader - ensuring endpoint configuration' |
414 | + ' is up to date') |
415 | ensure_initial_admin(config) |
416 | |
417 | |
418 | -SERVICE_PORTS = { |
419 | - "keystone_admin": int(config['admin-port']) + 1, |
420 | - "keystone_service": int(config['service-port']) + 1 |
421 | - } |
422 | - |
423 | def cluster_joined(): |
424 | unison.ssh_authorized_peers(user=SSH_USER, |
425 | group='keystone', |
426 | peer_interface='cluster', |
427 | ensure_user=True) |
428 | + update_config_block('DEFAULT', |
429 | + public_port=cluster.determine_api_port(config["service-port"])) |
430 | + update_config_block('DEFAULT', |
431 | + admin_port=cluster.determine_api_port(config["admin-port"])) |
432 | + utils.restart('keystone') |
433 | + service_ports = { |
434 | + "keystone_admin": [ |
435 | + cluster.determine_haproxy_port(config['admin-port']), |
436 | + cluster.determine_api_port(config["admin-port"]) |
437 | + ], |
438 | + "keystone_service": [ |
439 | + cluster.determine_haproxy_port(config['service-port']), |
440 | + cluster.determine_api_port(config["service-port"]) |
441 | + ] |
442 | + } |
443 | + haproxy.configure_haproxy(service_ports) |
444 | + |
445 | |
446 | def cluster_changed(): |
447 | unison.ssh_authorized_peers(user=SSH_USER, |
448 | - group='keystone', |
449 | - peer_interface='cluster', |
450 | - ensure_user=True) |
451 | - cluster_hosts = {} |
452 | - cluster_hosts['self'] = config['hostname'] |
453 | - for r_id in relation_ids('cluster'): |
454 | - for unit in relation_list(r_id): |
455 | - # trigger identity-changed to reconfigure HTTPS |
456 | - # as necessary. |
457 | - identity_changed(relation_id=r_id, remote_unit=unit) |
458 | - cluster_hosts[unit.replace('/','-')] = \ |
459 | - relation_get_dict(relation_id=r_id, |
460 | - remote_unit=unit)['private-address'] |
461 | - configure_haproxy(cluster_hosts, |
462 | - SERVICE_PORTS) |
463 | - |
464 | + group='keystone', |
465 | + peer_interface='cluster', |
466 | + ensure_user=True) |
467 | synchronize_service_credentials() |
468 | + service_ports = { |
469 | + "keystone_admin": [ |
470 | + cluster.determine_haproxy_port(config['admin-port']), |
471 | + cluster.determine_api_port(config["admin-port"]) |
472 | + ], |
473 | + "keystone_service": [ |
474 | + cluster.determine_haproxy_port(config['service-port']), |
475 | + cluster.determine_api_port(config["service-port"]) |
476 | + ] |
477 | + } |
478 | + haproxy.configure_haproxy(service_ports) |
479 | |
480 | - for r_id in relation_ids('identity-service'): |
481 | - for unit in relation_list(r_id): |
482 | - # trigger identity-changed to reconfigure HTTPS as necessary |
483 | - identity_changed(relation_id=r_id, remote_unit=unit) |
484 | |
485 | def ha_relation_changed(): |
486 | - relation_data = relation_get_dict() |
487 | + relation_data = utils.relation_get_dict() |
488 | if ('clustered' in relation_data and |
489 | - is_leader()): |
490 | - juju_log('Cluster configured, notifying other services and updating' |
491 | - 'keystone endpoint configuration') |
492 | + cluster.is_leader(CLUSTER_RES)): |
493 | + utils.juju_log('INFO', |
494 | + 'Cluster configured, notifying other services' |
495 | + ' and updating keystone endpoint configuration') |
496 | # Update keystone endpoint to point at VIP |
497 | ensure_initial_admin(config) |
498 | # Tell all related services to start using |
499 | # the VIP and haproxy ports instead |
500 | - for r_id in relation_ids('identity-service'): |
501 | - relation_set_2(rid=r_id, |
502 | - auth_host=config['vip'], |
503 | - service_host=config['vip'], |
504 | - service_port=SERVICE_PORTS['keystone_service'], |
505 | - auth_port=SERVICE_PORTS['keystone_admin']) |
506 | + for r_id in utils.relation_ids('identity-service'): |
507 | + utils.relation_set(rid=r_id, |
508 | + auth_host=config['vip'], |
509 | + service_host=config['vip']) |
510 | |
511 | |
512 | def ha_relation_joined(): |
513 | @@ -424,41 +477,33 @@ |
514 | # include multicast port and interface to bind to. |
515 | corosync_bindiface = config['ha-bindiface'] |
516 | corosync_mcastport = config['ha-mcastport'] |
517 | + vip = config['vip'] |
518 | + vip_cidr = config['vip_cidr'] |
519 | + vip_iface = config['vip_iface'] |
520 | |
521 | # Obtain resources |
522 | resources = { |
523 | - 'res_ks_vip':'ocf:heartbeat:IPaddr2', |
524 | - 'res_ks_haproxy':'lsb:haproxy' |
525 | + 'res_ks_vip': 'ocf:heartbeat:IPaddr2', |
526 | + 'res_ks_haproxy': 'lsb:haproxy' |
527 | } |
528 | - # TODO: Obtain netmask and nic where to place VIP. |
529 | resource_params = { |
530 | - 'res_ks_vip':'params ip="%s" cidr_netmask="%s" nic="%s"' % (config['vip'], |
531 | - config['vip_cidr'], config['vip_iface']), |
532 | - 'res_ks_haproxy':'op monitor interval="5s"' |
533 | + 'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ |
534 | + (vip, vip_cidr, vip_iface), |
535 | + 'res_ks_haproxy': 'op monitor interval="5s"' |
536 | } |
537 | init_services = { |
538 | - 'res_ks_haproxy':'haproxy' |
539 | - } |
540 | - groups = { |
541 | - 'grp_ks_haproxy':'res_ks_vip res_ks_haproxy' |
542 | - } |
543 | - #clones = { |
544 | - # 'cln_ks_haproxy':'res_ks_haproxy meta globally-unique="false" interleave="true"' |
545 | - # } |
546 | - |
547 | - #orders = { |
548 | - # 'ord_vip_before_haproxy':'inf: res_ks_vip res_ks_haproxy' |
549 | - # } |
550 | - #colocations = { |
551 | - # 'col_vip_on_haproxy':'inf: res_ks_haproxy res_ks_vip' |
552 | - # } |
553 | - |
554 | - relation_set_2(init_services=init_services, |
555 | - corosync_bindiface=corosync_bindiface, |
556 | - corosync_mcastport=corosync_mcastport, |
557 | - resources=resources, |
558 | - resource_params=resource_params, |
559 | - groups=groups) |
560 | + 'res_ks_haproxy': 'haproxy' |
561 | + } |
562 | + clones = { |
563 | + 'cl_ks_haproxy': 'res_ks_haproxy' |
564 | + } |
565 | + |
566 | + utils.relation_set(init_services=init_services, |
567 | + corosync_bindiface=corosync_bindiface, |
568 | + corosync_mcastport=corosync_mcastport, |
569 | + resources=resources, |
570 | + resource_params=resource_params, |
571 | + clones=clones) |
572 | |
573 | |
574 | hooks = { |
575 | @@ -476,9 +521,4 @@ |
576 | "upgrade-charm": upgrade_charm |
577 | } |
578 | |
579 | -# keystone-hooks gets called by symlink corresponding to the requested relation |
580 | -# hook. |
581 | -hook = os.path.basename(sys.argv[0]) |
582 | -if hook not in hooks.keys(): |
583 | - error_out("Unsupported hook: %s" % hook) |
584 | -hooks[hook]() |
585 | +utils.do_hooks(hooks) |
586 | |
587 | === modified file 'hooks/keystone_ssl.py' |
588 | --- hooks/keystone_ssl.py 2013-02-20 23:30:25 +0000 |
589 | +++ hooks/keystone_ssl.py 2013-03-19 14:13:27 +0000 |
590 | @@ -1,17 +1,15 @@ |
591 | #!/usr/bin/python |
592 | |
593 | -import base64 |
594 | import os |
595 | import shutil |
596 | import subprocess |
597 | import tarfile |
598 | import tempfile |
599 | -from utils import * |
600 | |
601 | CA_EXPIRY = '365' |
602 | ORG_NAME = 'Ubuntu' |
603 | ORG_UNIT = 'Ubuntu Cloud' |
604 | -CA_BUNDLE='/usr/local/share/ca-certificates/juju_ca_cert.crt' |
605 | +CA_BUNDLE = '/usr/local/share/ca-certificates/juju_ca_cert.crt' |
606 | |
607 | CA_CONFIG = """ |
608 | [ ca ] |
609 | @@ -58,7 +56,7 @@ |
610 | keyUsage = cRLSign, keyCertSign |
611 | """ |
612 | |
613 | -SIGNING_CONFIG=""" |
614 | +SIGNING_CONFIG = """ |
615 | [ ca ] |
616 | default_ca = CA_default |
617 | |
618 | @@ -141,9 +139,9 @@ |
619 | else: |
620 | print 'Found %s.' % f |
621 | if init: |
622 | - cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), '-x509', |
623 | - '-nodes', '-newkey', 'rsa', '-days', '21360', '-keyout', key, |
624 | - '-out', crt, '-outform', 'PEM'] |
625 | + cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), |
626 | + '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360', |
627 | + '-keyout', key, '-out', crt, '-outform', 'PEM'] |
628 | subprocess.check_call(cmd) |
629 | return crt, key |
630 | |
631 | @@ -152,8 +150,9 @@ |
632 | print 'Creating new intermediate CSR.' |
633 | key = os.path.join(ca_dir, 'private', 'cacert.key') |
634 | csr = os.path.join(ca_dir, 'cacert.csr') |
635 | - cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), '-sha1', |
636 | - '-newkey', 'rsa', '-nodes', '-keyout', key, '-out', csr, '-outform', |
637 | + cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'), |
638 | + '-sha1', '-newkey', 'rsa', '-nodes', '-keyout', key, '-out', |
639 | + csr, '-outform', |
640 | 'PEM'] |
641 | subprocess.check_call(cmd) |
642 | return csr, key |
643 | @@ -164,7 +163,8 @@ |
644 | crt = os.path.join(ca_dir, 'certs', |
645 | '%s.crt' % os.path.basename(csr).split('.')[0]) |
646 | subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) |
647 | - cmd = ['openssl', 'ca', '-batch', '-config', os.path.join(ca_dir, 'ca.cnf'), |
648 | + cmd = ['openssl', 'ca', '-batch', '-config', |
649 | + os.path.join(ca_dir, 'ca.cnf'), |
650 | '-extensions', 'ca_extensions', '-days', CA_EXPIRY, '-notext', |
651 | '-in', csr, '-out', crt, '-subj', subj, '-batch'] |
652 | print ' '.join(cmd) |
653 | @@ -201,10 +201,11 @@ |
654 | cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', '-keyout', |
655 | key, '-out', csr, '-subj', subj] |
656 | subprocess.check_call(cmd) |
657 | - crt = sign_csr(ca_dir, csr, common_name) |
658 | + crt = sign_int_csr(ca_dir, csr, common_name) |
659 | print 'Signed new CSR, crt @ %s' % crt |
660 | return |
661 | |
662 | + |
663 | def update_bundle(bundle_file, new_bundle): |
664 | return |
665 | if os.path.isfile(bundle_file): |
666 | @@ -219,10 +220,11 @@ |
667 | out.write(new_bundle) |
668 | subprocess.check_call(['update-ca-certificates']) |
669 | |
670 | + |
671 | def tar_directory(path): |
672 | cwd = os.getcwd() |
673 | - parent=os.path.dirname(path) |
674 | - directory=os.path.basename(path) |
675 | + parent = os.path.dirname(path) |
676 | + directory = os.path.basename(path) |
677 | tmp = tempfile.TemporaryFile() |
678 | os.chdir(parent) |
679 | tarball = tarfile.TarFile(fileobj=tmp, mode='w') |
680 | @@ -234,6 +236,7 @@ |
681 | os.chdir(cwd) |
682 | return out |
683 | |
684 | + |
685 | class JujuCA(object): |
686 | def __init__(self, name, ca_dir, root_ca_dir, user, group): |
687 | root_crt, root_key = init_root_ca(root_ca_dir, |
688 | @@ -265,8 +268,8 @@ |
689 | subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name) |
690 | csr = os.path.join(self.ca_dir, 'certs', '%s.csr' % service) |
691 | key = os.path.join(self.ca_dir, 'certs', '%s.key' % service) |
692 | - cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', '-keyout', |
693 | - key, '-out', csr, '-subj', subj] |
694 | + cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', |
695 | + '-keyout', key, '-out', csr, '-subj', subj] |
696 | subprocess.check_call(cmd) |
697 | crt = self._sign_csr(csr, service, common_name) |
698 | cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir] |
699 | |
700 | === renamed file 'hooks/utils.py' => 'hooks/keystone_utils.py' |
701 | --- hooks/utils.py 2013-03-08 21:33:22 +0000 |
702 | +++ hooks/keystone_utils.py 2013-03-19 14:13:27 +0000 |
703 | @@ -1,17 +1,23 @@ |
704 | #!/usr/bin/python |
705 | import ConfigParser |
706 | -import subprocess |
707 | import sys |
708 | import json |
709 | +import time |
710 | +import subprocess |
711 | import os |
712 | -import tarfile |
713 | -import tempfile |
714 | -import time |
715 | |
716 | -from lib.openstack_common import * |
717 | +from lib.openstack_common import( |
718 | + get_os_codename_install_source, |
719 | + get_os_codename_package, |
720 | + error_out, |
721 | + configure_installation_source |
722 | + ) |
723 | |
724 | import keystone_ssl as ssl |
725 | import lib.unison as unison |
726 | +import lib.utils as utils |
727 | +import lib.cluster_utils as cluster |
728 | + |
729 | |
730 | keystone_conf = "/etc/keystone/keystone.conf" |
731 | stored_passwd = "/var/lib/keystone/keystone.passwd" |
732 | @@ -20,8 +26,9 @@ |
733 | |
734 | SSL_DIR = '/var/lib/keystone/juju_ssl/' |
735 | SSL_CA_NAME = 'Ubuntu Cloud' |
736 | +CLUSTER_RES = 'res_ks_vip' |
737 | +SSH_USER = 'juju_keystone' |
738 | |
739 | -SSH_USER='juju_keystone' |
740 | |
741 | def execute(cmd, die=False, echo=False): |
742 | """ Executes a command |
743 | @@ -35,8 +42,8 @@ |
744 | stdout=subprocess.PIPE, |
745 | stdin=subprocess.PIPE, |
746 | stderr=subprocess.PIPE) |
747 | - stdout="" |
748 | - stderr="" |
749 | + stdout = "" |
750 | + stderr = "" |
751 | |
752 | def print_line(l): |
753 | if echo: |
754 | @@ -59,7 +66,7 @@ |
755 | |
756 | |
757 | def config_get(): |
758 | - """ Obtain the units config via 'config-get' |
759 | + """ Obtain the units config via 'config-get' |
760 | Returns a dict representing current config. |
761 | private-address and IP of the unit is also tacked on for |
762 | convienence |
763 | @@ -68,102 +75,38 @@ |
764 | config = json.loads(output) |
765 | # make sure no config element is blank after config-get |
766 | for c in config.keys(): |
767 | - if not config[c]: |
768 | + if not config[c]: |
769 | error_out("ERROR: Config option has no paramter: %s" % c) |
770 | # tack on our private address and ip |
771 | - hostname = execute("unit-get private-address")[0].strip() |
772 | - config["hostname"] = execute("unit-get private-address")[0].strip() |
773 | + config["hostname"] = utils.unit_get('private-address') |
774 | return config |
775 | |
776 | -def relation_ids(relation_name=None): |
777 | - j = execute('relation-ids --format=json %s' % relation_name)[0] |
778 | - return json.loads(j) |
779 | - |
780 | -def relation_list(relation_id=None): |
781 | - cmd = 'relation-list --format=json' |
782 | - if relation_id: |
783 | - cmd += ' -r %s' % relation_id |
784 | - j = execute(cmd)[0] |
785 | - return json.loads(j) |
786 | - |
787 | -def relation_set(relation_data): |
788 | - """ calls relation-set for all key=values in dict """ |
789 | - for k in relation_data: |
790 | - execute("relation-set %s=%s" % (k, relation_data[k]), die=True) |
791 | - |
792 | -def relation_set_2(**kwargs): |
793 | - cmd = [ |
794 | - 'relation-set' |
795 | - ] |
796 | - args = [] |
797 | - for k, v in kwargs.items(): |
798 | - if k == 'rid': |
799 | - cmd.append('-r') |
800 | - cmd.append(v) |
801 | - else: |
802 | - args.append('{}={}'.format(k, v)) |
803 | - cmd += args |
804 | - subprocess.check_call(cmd) |
805 | - |
806 | - |
807 | -def unit_get(attribute): |
808 | - cmd = [ |
809 | - 'unit-get', |
810 | - attribute |
811 | - ] |
812 | - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
813 | - if value == "": |
814 | - return None |
815 | - else: |
816 | - return value |
817 | - |
818 | - |
819 | -def relation_get(relation_data): |
820 | - """ Obtain all current relation data |
821 | - relation_data is a list of options to query from the relation |
822 | - Returns a k,v dict of the results. |
823 | - Leave empty responses out of the results as they haven't yet been |
824 | - set on the other end. |
825 | - Caller can then "len(results.keys()) == len(relation_data)" to find out if |
826 | - all relation values have been set on the other side |
827 | - """ |
828 | - results = {} |
829 | - for r in relation_data: |
830 | - result = execute("relation-get %s" % r, die=True)[0].strip('\n') |
831 | - if result != "": |
832 | - results[r] = result |
833 | - return results |
834 | - |
835 | -def relation_get_dict(relation_id=None, remote_unit=None): |
836 | - """Obtain all relation data as dict by way of JSON""" |
837 | - cmd = 'relation-get --format=json' |
838 | - if relation_id: |
839 | - cmd += ' -r %s' % relation_id |
840 | - if remote_unit: |
841 | - remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) |
842 | - os.environ['JUJU_REMOTE_UNIT'] = remote_unit |
843 | - j = execute(cmd, die=True)[0] |
844 | - if remote_unit and remote_unit_orig: |
845 | - os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig |
846 | - d = json.loads(j) |
847 | - settings = {} |
848 | - # convert unicode to strings |
849 | - for k, v in d.iteritems(): |
850 | - settings[str(k)] = str(v) |
851 | - return settings |
852 | + |
853 | +@utils.cached |
854 | +def get_local_endpoint(): |
855 | + """ Returns the URL for the local end-point bypassing haproxy/ssl """ |
856 | + local_endpoint = 'http://localhost:{}/v2.0/'.format( |
857 | + cluster.determine_api_port(utils.config_get('admin-port')) |
858 | + ) |
859 | + return local_endpoint |
860 | + |
861 | |
862 | def set_admin_token(admin_token): |
863 | """Set admin token according to deployment config or use a randomly |
864 | generated token if none is specified (default). |
865 | """ |
866 | if admin_token != 'None': |
867 | - juju_log('Configuring Keystone to use a pre-configured admin token.') |
868 | + utils.juju_log('INFO', |
869 | + 'Configuring Keystone to use' |
870 | + ' a pre-configured admin token.') |
871 | token = admin_token |
872 | else: |
873 | - juju_log('Configuring Keystone to use a random admin token.') |
874 | + utils.juju_log('INFO', |
875 | + 'Configuring Keystone to use a random admin token.') |
876 | if os.path.isfile(stored_token): |
877 | - msg = 'Loading a previously generated admin token from %s' % stored_token |
878 | - juju_log(msg) |
879 | + msg = 'Loading a previously generated' \ |
880 | + ' admin token from %s' % stored_token |
881 | + utils.juju_log('INFO', msg) |
882 | f = open(stored_token, 'r') |
883 | token = f.read().strip() |
884 | f.close() |
885 | @@ -174,20 +117,22 @@ |
886 | out.close() |
887 | update_config_block('DEFAULT', admin_token=token) |
888 | |
889 | + |
890 | def get_admin_token(): |
891 | """Temporary utility to grab the admin token as configured in |
892 | keystone.conf |
893 | """ |
894 | - f = open(keystone_conf, 'r+') |
895 | - for l in open(keystone_conf, 'r+').readlines(): |
896 | - if l.split(' ')[0] == 'admin_token': |
897 | - try: |
898 | - return l.split('=')[1].strip() |
899 | - except: |
900 | - error_out('Could not parse admin_token line from %s' % |
901 | - keystone_conf) |
902 | + with open(keystone_conf, 'r') as f: |
903 | + for l in f.readlines(): |
904 | + if l.split(' ')[0] == 'admin_token': |
905 | + try: |
906 | + return l.split('=')[1].strip() |
907 | + except: |
908 | + error_out('Could not parse admin_token line from %s' % |
909 | + keystone_conf) |
910 | error_out('Could not find admin_token line in %s' % keystone_conf) |
911 | |
912 | + |
913 | def update_config_block(section, **kwargs): |
914 | """ Updates keystone.conf blocks given kwargs. |
915 | Update a config setting in a specific setting of a config |
916 | @@ -209,32 +154,37 @@ |
917 | with open(conf_file, 'wb') as out: |
918 | config.write(out) |
919 | |
920 | + |
921 | def create_service_entry(service_name, service_type, service_desc, owner=None): |
922 | """ Add a new service entry to keystone if one does not already exist """ |
923 | import manager |
924 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
925 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
926 | token=get_admin_token()) |
927 | for service in [s._info for s in manager.api.services.list()]: |
928 | if service['name'] == service_name: |
929 | - juju_log("Service entry for '%s' already exists." % service_name) |
930 | + utils.juju_log('INFO', |
931 | + "Service entry for '%s' already exists." % \ |
932 | + service_name) |
933 | return |
934 | manager.api.services.create(name=service_name, |
935 | service_type=service_type, |
936 | description=service_desc) |
937 | - juju_log("Created new service entry '%s'" % service_name) |
938 | + utils.juju_log('INFO', "Created new service entry '%s'" % service_name) |
939 | + |
940 | |
941 | def create_endpoint_template(region, service, publicurl, adminurl, |
942 | internalurl): |
943 | """ Create a new endpoint template for service if one does not already |
944 | exist matching name *and* region """ |
945 | import manager |
946 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
947 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
948 | token=get_admin_token()) |
949 | service_id = manager.resolve_service_id(service) |
950 | for ep in [e._info for e in manager.api.endpoints.list()]: |
951 | if ep['service_id'] == service_id and ep['region'] == region: |
952 | - juju_log("Endpoint template already exists for '%s' in '%s'" |
953 | - % (service, region)) |
954 | + utils.juju_log('INFO', |
955 | + "Endpoint template already exists for '%s' in '%s'" |
956 | + % (service, region)) |
957 | |
958 | up_to_date = True |
959 | for k in ['publicurl', 'adminurl', 'internalurl']: |
960 | @@ -245,7 +195,9 @@ |
961 | return |
962 | else: |
963 | # delete endpoint and recreate if endpoint urls need updating. |
964 | - juju_log("Updating endpoint template with new endpoint urls.") |
965 | + utils.juju_log('INFO', |
966 | + "Updating endpoint template with" |
967 | + " new endpoint urls.") |
968 | manager.api.endpoints.delete(ep['id']) |
969 | |
970 | manager.api.endpoints.create(region=region, |
971 | @@ -253,26 +205,28 @@ |
972 | publicurl=publicurl, |
973 | adminurl=adminurl, |
974 | internalurl=internalurl) |
975 | - juju_log("Created new endpoint template for '%s' in '%s'" % |
976 | - (region, service)) |
977 | + utils.juju_log('INFO', "Created new endpoint template for '%s' in '%s'" % |
978 | + (region, service)) |
979 | + |
980 | |
981 | def create_tenant(name): |
982 | """ creates a tenant if it does not already exist """ |
983 | import manager |
984 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
985 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
986 | token=get_admin_token()) |
987 | tenants = [t._info for t in manager.api.tenants.list()] |
988 | if not tenants or name not in [t['name'] for t in tenants]: |
989 | manager.api.tenants.create(tenant_name=name, |
990 | description='Created by Juju') |
991 | - juju_log("Created new tenant: %s" % name) |
992 | + utils.juju_log('INFO', "Created new tenant: %s" % name) |
993 | return |
994 | - juju_log("Tenant '%s' already exists." % name) |
995 | + utils.juju_log('INFO', "Tenant '%s' already exists." % name) |
996 | + |
997 | |
998 | def create_user(name, password, tenant): |
999 | """ creates a user if it doesn't already exist, as a member of tenant """ |
1000 | import manager |
1001 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
1002 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
1003 | token=get_admin_token()) |
1004 | users = [u._info for u in manager.api.users.list()] |
1005 | if not users or name not in [u['name'] for u in users]: |
1006 | @@ -283,21 +237,23 @@ |
1007 | password=password, |
1008 | email='juju@localhost', |
1009 | tenant_id=tenant_id) |
1010 | - juju_log("Created new user '%s' tenant: %s" % (name, tenant_id)) |
1011 | + utils.juju_log('INFO', "Created new user '%s' tenant: %s" % \ |
1012 | + (name, tenant_id)) |
1013 | return |
1014 | - juju_log("A user named '%s' already exists" % name) |
1015 | + utils.juju_log('INFO', "A user named '%s' already exists" % name) |
1016 | + |
1017 | |
1018 | def create_role(name, user=None, tenant=None): |
1019 | """ creates a role if it doesn't already exist. grants role to user """ |
1020 | import manager |
1021 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
1022 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
1023 | token=get_admin_token()) |
1024 | roles = [r._info for r in manager.api.roles.list()] |
1025 | if not roles or name not in [r['name'] for r in roles]: |
1026 | manager.api.roles.create(name=name) |
1027 | - juju_log("Created new role '%s'" % name) |
1028 | + utils.juju_log('INFO', "Created new role '%s'" % name) |
1029 | else: |
1030 | - juju_log("A role named '%s' already exists" % name) |
1031 | + utils.juju_log('INFO', "A role named '%s' already exists" % name) |
1032 | |
1033 | if not user and not tenant: |
1034 | return |
1035 | @@ -308,49 +264,55 @@ |
1036 | tenant_id = manager.resolve_tenant_id(tenant) |
1037 | |
1038 | if None in [user_id, role_id, tenant_id]: |
1039 | - error_out("Could not resolve [user_id, role_id, tenant_id]" % |
1040 | - [user_id, role_id, tenant_id]) |
1041 | + error_out("Could not resolve [%s, %s, %s]" % |
1042 | + (user_id, role_id, tenant_id)) |
1043 | |
1044 | grant_role(user, name, tenant) |
1045 | |
1046 | + |
1047 | def grant_role(user, role, tenant): |
1048 | """grant user+tenant a specific role""" |
1049 | import manager |
1050 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
1051 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
1052 | token=get_admin_token()) |
1053 | - juju_log("Granting user '%s' role '%s' on tenant '%s'" %\ |
1054 | - (user, role, tenant)) |
1055 | + utils.juju_log('INFO', "Granting user '%s' role '%s' on tenant '%s'" % \ |
1056 | + (user, role, tenant)) |
1057 | user_id = manager.resolve_user_id(user) |
1058 | role_id = manager.resolve_role_id(role) |
1059 | tenant_id = manager.resolve_tenant_id(tenant) |
1060 | |
1061 | - cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id) |
1062 | + cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id) |
1063 | if not cur_roles or role_id not in [r.id for r in cur_roles]: |
1064 | manager.api.roles.add_user_role(user=user_id, |
1065 | role=role_id, |
1066 | tenant=tenant_id) |
1067 | - juju_log("Granted user '%s' role '%s' on tenant '%s'" %\ |
1068 | - (user, role, tenant)) |
1069 | + utils.juju_log('INFO', "Granted user '%s' role '%s' on tenant '%s'" % \ |
1070 | + (user, role, tenant)) |
1071 | else: |
1072 | - juju_log("User '%s' already has role '%s' on tenant '%s'" %\ |
1073 | - (user, role, tenant)) |
1074 | + utils.juju_log('INFO', |
1075 | + "User '%s' already has role '%s' on tenant '%s'" % \ |
1076 | + (user, role, tenant)) |
1077 | + |
1078 | |
1079 | def generate_admin_token(config): |
1080 | """ generate and add an admin token """ |
1081 | import manager |
1082 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
1083 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
1084 | token='ADMIN') |
1085 | if config["admin-token"] == "None": |
1086 | import random |
1087 | token = random.randrange(1000000000000, 9999999999999) |
1088 | else: |
1089 | return config["admin-token"] |
1090 | - manager.api.add_token(token, config["admin-user"], "admin", config["token-expiry"]) |
1091 | - juju_log("Generated and added new random admin token.") |
1092 | + manager.api.add_token(token, config["admin-user"], |
1093 | + "admin", config["token-expiry"]) |
1094 | + utils.juju_log('INFO', "Generated and added new random admin token.") |
1095 | return token |
1096 | |
1097 | + |
1098 | def ensure_initial_admin(config): |
1099 | - """ Ensures the minimum admin stuff exists in whatever database we're using. |
1100 | + """ Ensures the minimum admin stuff exists in whatever database we're |
1101 | + using. |
1102 | This and the helper functions it calls are meant to be idempotent and |
1103 | run during install as well as during db-changed. This will maintain |
1104 | the admin tenant, user, role, service entry and endpoint across every |
1105 | @@ -365,10 +327,11 @@ |
1106 | if config["admin-password"] != "None": |
1107 | passwd = config["admin-password"] |
1108 | elif os.path.isfile(stored_passwd): |
1109 | - juju_log("Loading stored passwd from %s" % stored_passwd) |
1110 | + utils.juju_log('INFO', "Loading stored passwd from %s" % stored_passwd) |
1111 | passwd = open(stored_passwd, 'r').readline().strip('\n') |
1112 | if passwd == "": |
1113 | - juju_log("Generating new passwd for user: %s" % config["admin-user"]) |
1114 | + utils.juju_log('INFO', "Generating new passwd for user: %s" % \ |
1115 | + config["admin-user"]) |
1116 | passwd = execute("pwgen -c 16 1", die=True)[0] |
1117 | open(stored_passwd, 'w+').writelines("%s\n" % passwd) |
1118 | |
1119 | @@ -380,22 +343,19 @@ |
1120 | create_role("KeystoneServiceAdmin", config["admin-user"], 'admin') |
1121 | create_service_entry("keystone", "identity", "Keystone Identity Service") |
1122 | |
1123 | - if is_clustered(): |
1124 | - juju_log("Creating endpoint for clustered configuration") |
1125 | - for region in config['region'].split(): |
1126 | - create_keystone_endpoint(service_host=config["vip"], |
1127 | - service_port=int(config["service-port"]) + 1, |
1128 | - auth_host=config["vip"], |
1129 | - auth_port=int(config["admin-port"]) + 1, |
1130 | - region=region) |
1131 | + if cluster.is_clustered(): |
1132 | + utils.juju_log('INFO', "Creating endpoint for clustered configuration") |
1133 | + service_host = auth_host = config["vip"] |
1134 | else: |
1135 | - juju_log("Creating standard endpoint") |
1136 | - for region in config['region'].split(): |
1137 | - create_keystone_endpoint(service_host=config["hostname"], |
1138 | - service_port=config["service-port"], |
1139 | - auth_host=config["hostname"], |
1140 | - auth_port=config["admin-port"], |
1141 | - region=region) |
1142 | + utils.juju_log('INFO', "Creating standard endpoint") |
1143 | + service_host = auth_host = config["hostname"] |
1144 | + |
1145 | + for region in config['region'].split(): |
1146 | + create_keystone_endpoint(service_host=service_host, |
1147 | + service_port=config["service-port"], |
1148 | + auth_host=auth_host, |
1149 | + auth_port=config["admin-port"], |
1150 | + region=region) |
1151 | |
1152 | |
1153 | def create_keystone_endpoint(service_host, service_port, |
1154 | @@ -409,16 +369,18 @@ |
1155 | |
1156 | def update_user_password(username, password): |
1157 | import manager |
1158 | - manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/', |
1159 | + manager = manager.KeystoneManager(endpoint=get_local_endpoint(), |
1160 | token=get_admin_token()) |
1161 | - juju_log("Updating password for user '%s'" % username) |
1162 | + utils.juju_log('INFO', "Updating password for user '%s'" % username) |
1163 | |
1164 | user_id = manager.resolve_user_id(username) |
1165 | if user_id is None: |
1166 | error_out("Could not resolve user id for '%s'" % username) |
1167 | |
1168 | manager.api.users.update_password(user=user_id, password=password) |
1169 | - juju_log("Successfully updated password for user '%s'" % username) |
1170 | + utils.juju_log('INFO', "Successfully updated password for user '%s'" % \ |
1171 | + username) |
1172 | + |
1173 | |
1174 | def load_stored_passwords(path=SERVICE_PASSWD_PATH): |
1175 | creds = {} |
1176 | @@ -431,10 +393,12 @@ |
1177 | creds[user] = passwd |
1178 | return creds |
1179 | |
1180 | + |
1181 | def save_stored_passwords(path=SERVICE_PASSWD_PATH, **creds): |
1182 | with open(path, 'wb') as stored_passwd: |
1183 | [stored_passwd.write('%s:%s\n' % (u, p)) for u, p in creds.iteritems()] |
1184 | |
1185 | + |
1186 | def get_service_password(service_username): |
1187 | creds = load_stored_passwords() |
1188 | if service_username in creds: |
1189 | @@ -446,12 +410,13 @@ |
1190 | |
1191 | return passwd |
1192 | |
1193 | + |
1194 | def configure_pki_tokens(config): |
1195 | '''Configure PKI token signing, if enabled.''' |
1196 | if config['enable-pki'] not in ['True', 'true']: |
1197 | update_config_block('signing', token_format='UUID') |
1198 | else: |
1199 | - juju_log('TODO: PKI Support, setting to UUID for now.') |
1200 | + utils.juju_log('INFO', 'TODO: PKI Support, setting to UUID for now.') |
1201 | update_config_block('signing', token_format='UUID') |
1202 | |
1203 | |
1204 | @@ -462,10 +427,12 @@ |
1205 | old_vers = get_os_codename_package('keystone') |
1206 | new_vers = get_os_codename_install_source(install_src) |
1207 | |
1208 | - juju_log("Beginning Keystone upgrade: %s -> %s" % (old_vers, new_vers)) |
1209 | + utils.juju_log('INFO', |
1210 | + "Beginning Keystone upgrade: %s -> %s" % \ |
1211 | + (old_vers, new_vers)) |
1212 | |
1213 | # Backup previous config. |
1214 | - juju_log("Backing up contents of /etc/keystone.") |
1215 | + utils.juju_log('INFO', "Backing up contents of /etc/keystone.") |
1216 | stamp = time.strftime('%Y%m%d%H%M') |
1217 | cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp |
1218 | execute(cmd, die=True, echo=True) |
1219 | @@ -482,15 +449,17 @@ |
1220 | set_admin_token(config['admin-token']) |
1221 | |
1222 | # set the sql connection string if a shared-db relation is found. |
1223 | - ids = relation_ids(relation_name='shared-db') |
1224 | + ids = utils.relation_ids('shared-db') |
1225 | |
1226 | if ids: |
1227 | - for id in ids: |
1228 | - for unit in relation_list(id): |
1229 | - juju_log('Configuring new keystone.conf for datbase access '\ |
1230 | - 'on existing database relation to %s' % unit) |
1231 | - relation_data = relation_get_dict(relation_id=id, |
1232 | - remote_unit=unit) |
1233 | + for rid in ids: |
1234 | + for unit in utils.relation_list(rid): |
1235 | + utils.juju_log('INFO', |
1236 | + 'Configuring new keystone.conf for ' |
1237 | + 'database access on existing database' |
1238 | + ' relation to %s' % unit) |
1239 | + relation_data = utils.relation_get_dict(relation_id=rid, |
1240 | + remote_unit=unit) |
1241 | |
1242 | update_config_block('sql', connection="mysql://%s:%s@%s/%s" % |
1243 | (config["database-user"], |
1244 | @@ -498,66 +467,21 @@ |
1245 | relation_data["private-address"], |
1246 | config["database"])) |
1247 | |
1248 | - execute('service keystone stop', echo=True) |
1249 | - if ((is_clustered() and is_leader()) or |
1250 | - not is_clustered()): |
1251 | - juju_log('Running database migrations for %s' % new_vers) |
1252 | + utils.stop('keystone') |
1253 | + if (cluster.eligible_leader(CLUSTER_RES)): |
1254 | + utils.juju_log('INFO', |
1255 | + 'Running database migrations for %s' % new_vers) |
1256 | execute('keystone-manage db_sync', echo=True, die=True) |
1257 | else: |
1258 | - juju_log('Not cluster leader; snoozing whilst leader upgrades DB') |
1259 | + utils.juju_log('INFO', |
1260 | + 'Not cluster leader; snoozing whilst' |
1261 | + ' leader upgrades DB') |
1262 | time.sleep(10) |
1263 | - execute('service keystone start', echo=True) |
1264 | + utils.start('keystone') |
1265 | time.sleep(5) |
1266 | - juju_log('Completed Keystone upgrade: %s -> %s' % (old_vers, new_vers)) |
1267 | - |
1268 | - |
1269 | -def is_clustered(): |
1270 | - for r_id in (relation_ids('ha') or []): |
1271 | - for unit in (relation_list(r_id) or []): |
1272 | - relation_data = \ |
1273 | - relation_get_dict(relation_id=r_id, |
1274 | - remote_unit=unit) |
1275 | - if 'clustered' in relation_data: |
1276 | - return True |
1277 | - return False |
1278 | - |
1279 | - |
1280 | -def is_leader(): |
1281 | - status = execute('crm resource show res_ks_vip', echo=True)[0].strip() |
1282 | - hostname = execute('hostname', echo=True)[0].strip() |
1283 | - if hostname in status: |
1284 | - return True |
1285 | - else: |
1286 | - return False |
1287 | - |
1288 | - |
1289 | -def peer_units(): |
1290 | - peers = [] |
1291 | - for r_id in (relation_ids('cluster') or []): |
1292 | - for unit in (relation_list(r_id) or []): |
1293 | - peers.append(unit) |
1294 | - return peers |
1295 | - |
1296 | -def oldest_peer(peers): |
1297 | - local_unit_no = os.getenv('JUJU_UNIT_NAME').split('/')[1] |
1298 | - for peer in peers: |
1299 | - remote_unit_no = peer.split('/')[1] |
1300 | - if remote_unit_no < local_unit_no: |
1301 | - return False |
1302 | - return True |
1303 | - |
1304 | - |
1305 | -def eligible_leader(): |
1306 | - if is_clustered(): |
1307 | - if not is_leader(): |
1308 | - juju_log('Deferring action to CRM leader.') |
1309 | - return False |
1310 | - else: |
1311 | - peers = peer_units() |
1312 | - if peers and not oldest_peer(peers): |
1313 | - juju_log('Deferring action to oldest service unit.') |
1314 | - return False |
1315 | - return True |
1316 | + utils.juju_log('INFO', |
1317 | + 'Completed Keystone upgrade: ' |
1318 | + '%s -> %s' % (old_vers, new_vers)) |
1319 | |
1320 | |
1321 | def synchronize_service_credentials(): |
1322 | @@ -565,15 +489,17 @@ |
1323 | Broadcast service credentials to peers or consume those that have been |
1324 | broadcasted by peer, depending on hook context. |
1325 | ''' |
1326 | - if (not eligible_leader() or |
1327 | + if (not cluster.eligible_leader(CLUSTER_RES) or |
1328 | not os.path.isfile(SERVICE_PASSWD_PATH)): |
1329 | return |
1330 | - juju_log('Synchronizing service passwords to all peers.') |
1331 | + utils.juju_log('INFO', 'Synchronizing service passwords to all peers.') |
1332 | unison.sync_to_peers(peer_interface='cluster', |
1333 | paths=[SERVICE_PASSWD_PATH], user=SSH_USER, |
1334 | verbose=True) |
1335 | |
1336 | CA = [] |
1337 | + |
1338 | + |
1339 | def get_ca(user='keystone', group='keystone'): |
1340 | """ |
1341 | Initialize a new CA object if one hasn't already been loaded. |
1342 | @@ -594,3 +520,11 @@ |
1343 | execute('chmod -R g+rwx %s' % SSL_DIR) |
1344 | CA.append(ca) |
1345 | return CA[0] |
1346 | + |
1347 | + |
1348 | +def https(): |
1349 | + if (utils.config_get('https-service-endpoints') in ["yes", "true", "True"] |
1350 | + or cluster.https()): |
1351 | + return True |
1352 | + else: |
1353 | + return False |
1354 | |
1355 | === added file 'hooks/lib/apache_utils.py' |
1356 | --- hooks/lib/apache_utils.py 1970-01-01 00:00:00 +0000 |
1357 | +++ hooks/lib/apache_utils.py 2013-03-19 14:13:27 +0000 |
1358 | @@ -0,0 +1,196 @@ |
1359 | +# |
1360 | +# Copyright 2012 Canonical Ltd. |
1361 | +# |
1362 | +# This file is sourced from lp:openstack-charm-helpers |
1363 | +# |
1364 | +# Authors: |
1365 | +# James Page <james.page@ubuntu.com> |
1366 | +# Adam Gandelman <adamg@ubuntu.com> |
1367 | +# |
1368 | + |
1369 | +from lib.utils import ( |
1370 | + relation_ids, |
1371 | + relation_list, |
1372 | + relation_get, |
1373 | + render_template, |
1374 | + juju_log, |
1375 | + config_get, |
1376 | + install, |
1377 | + get_host_ip, |
1378 | + restart |
1379 | + ) |
1380 | +from lib.cluster_utils import https |
1381 | + |
1382 | +import os |
1383 | +import subprocess |
1384 | +from base64 import b64decode |
1385 | + |
1386 | +APACHE_SITE_DIR = "/etc/apache2/sites-available" |
1387 | +SITE_TEMPLATE = "apache2_site.tmpl" |
1388 | +RELOAD_CHECK = "To activate the new configuration" |
1389 | + |
1390 | + |
1391 | +def get_cert(): |
1392 | + cert = config_get('ssl_cert') |
1393 | + key = config_get('ssl_key') |
1394 | + if not (cert and key): |
1395 | + juju_log('INFO', |
1396 | + "Inspecting identity-service relations for SSL certificate.") |
1397 | + cert = key = None |
1398 | + for r_id in relation_ids('identity-service'): |
1399 | + for unit in relation_list(r_id): |
1400 | + if not cert: |
1401 | + cert = relation_get('ssl_cert', |
1402 | + rid=r_id, unit=unit) |
1403 | + if not key: |
1404 | + key = relation_get('ssl_key', |
1405 | + rid=r_id, unit=unit) |
1406 | + return (cert, key) |
1407 | + |
1408 | + |
1409 | +def get_ca_cert(): |
1410 | + ca_cert = None |
1411 | + juju_log('INFO', |
1412 | + "Inspecting identity-service relations for CA SSL certificate.") |
1413 | + for r_id in relation_ids('identity-service'): |
1414 | + for unit in relation_list(r_id): |
1415 | + if not ca_cert: |
1416 | + ca_cert = relation_get('ca_cert', |
1417 | + rid=r_id, unit=unit) |
1418 | + return ca_cert |
1419 | + |
1420 | + |
1421 | +def install_ca_cert(ca_cert): |
1422 | + if ca_cert: |
1423 | + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', |
1424 | + 'w') as crt: |
1425 | + crt.write(ca_cert) |
1426 | + subprocess.check_call(['update-ca-certificates', '--fresh']) |
1427 | + |
1428 | + |
1429 | +def enable_https(port_maps, namespace, cert, key, ca_cert=None): |
1430 | + ''' |
1431 | + For a given number of port mappings, configures apache2 |
1432 | + HTTPs local reverse proxying using certficates and keys provided in |
1433 | + either configuration data (preferred) or relation data. Assumes ports |
1434 | + are not in use (calling charm should ensure that). |
1435 | + |
1436 | + port_maps: dict: external to internal port mappings |
1437 | + namespace: str: name of charm |
1438 | + ''' |
1439 | + def _write_if_changed(path, new_content): |
1440 | + content = None |
1441 | + if os.path.exists(path): |
1442 | + with open(path, 'r') as f: |
1443 | + content = f.read().strip() |
1444 | + if content != new_content: |
1445 | + with open(path, 'w') as f: |
1446 | + f.write(new_content) |
1447 | + return True |
1448 | + else: |
1449 | + return False |
1450 | + |
1451 | + juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) |
1452 | + http_restart = False |
1453 | + |
1454 | + if cert: |
1455 | + cert = b64decode(cert) |
1456 | + if key: |
1457 | + key = b64decode(key) |
1458 | + if ca_cert: |
1459 | + ca_cert = b64decode(ca_cert) |
1460 | + |
1461 | + if not cert and not key: |
1462 | + juju_log('ERROR', |
1463 | + "Expected but could not find SSL certificate data, not " |
1464 | + "configuring HTTPS!") |
1465 | + return False |
1466 | + |
1467 | + install('apache2') |
1468 | + if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', |
1469 | + 'proxy', 'proxy_http']): |
1470 | + http_restart = True |
1471 | + |
1472 | + ssl_dir = os.path.join('/etc/apache2/ssl', namespace) |
1473 | + if not os.path.exists(ssl_dir): |
1474 | + os.makedirs(ssl_dir) |
1475 | + |
1476 | + if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): |
1477 | + http_restart = True |
1478 | + if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): |
1479 | + http_restart = True |
1480 | + os.chmod(os.path.join(ssl_dir, 'key'), 0600) |
1481 | + |
1482 | + install_ca_cert(ca_cert) |
1483 | + |
1484 | + sites_dir = '/etc/apache2/sites-available' |
1485 | + for ext_port, int_port in port_maps.items(): |
1486 | + juju_log('INFO', |
1487 | + 'Creating apache2 reverse proxy vhost' |
1488 | + ' for {}:{}'.format(ext_port, |
1489 | + int_port)) |
1490 | + site = "{}_{}".format(namespace, ext_port) |
1491 | + site_path = os.path.join(sites_dir, site) |
1492 | + with open(site_path, 'w') as fsite: |
1493 | + context = { |
1494 | + "ext": ext_port, |
1495 | + "int": int_port, |
1496 | + "namespace": namespace, |
1497 | + "private_address": get_host_ip() |
1498 | + } |
1499 | + fsite.write(render_template(SITE_TEMPLATE, |
1500 | + context)) |
1501 | + |
1502 | + if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): |
1503 | + http_restart = True |
1504 | + |
1505 | + if http_restart: |
1506 | + restart('apache2') |
1507 | + |
1508 | + return True |
1509 | + |
1510 | + |
1511 | +def disable_https(port_maps, namespace): |
1512 | + ''' |
1513 | + Ensure HTTPS reverse proxying is disables for given port mappings |
1514 | + |
1515 | + port_maps: dict: of ext -> int port mappings |
1516 | + namespace: str: name of chamr |
1517 | + ''' |
1518 | + juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps)) |
1519 | + |
1520 | + if (not os.path.exists('/etc/apache2') or |
1521 | + not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))): |
1522 | + return |
1523 | + |
1524 | + http_restart = False |
1525 | + for ext_port in port_maps.keys(): |
1526 | + if os.path.exists(os.path.join(APACHE_SITE_DIR, |
1527 | + "{}_{}".format(namespace, |
1528 | + ext_port))): |
1529 | + juju_log('INFO', |
1530 | + "Disabling HTTPS reverse proxy" |
1531 | + " for {} {}.".format(namespace, |
1532 | + ext_port)) |
1533 | + if (RELOAD_CHECK in |
1534 | + subprocess.check_output(['a2dissite', |
1535 | + '{}_{}'.format(namespace, |
1536 | + ext_port)])): |
1537 | + http_restart = True |
1538 | + |
1539 | + if http_restart: |
1540 | + restart(['apache2']) |
1541 | + |
1542 | + |
1543 | +def setup_https(port_maps, namespace, cert, key, ca_cert=None): |
1544 | + ''' |
1545 | + Ensures HTTPS is either enabled or disabled for given port |
1546 | + mapping. |
1547 | + |
1548 | + port_maps: dict: of ext -> int port mappings |
1549 | + namespace: str: name of charm |
1550 | + ''' |
1551 | + if not https: |
1552 | + disable_https(port_maps, namespace) |
1553 | + else: |
1554 | + enable_https(port_maps, namespace, cert, key, ca_cert) |
1555 | |
1556 | === added file 'hooks/lib/cluster_utils.py' |
1557 | --- hooks/lib/cluster_utils.py 1970-01-01 00:00:00 +0000 |
1558 | +++ hooks/lib/cluster_utils.py 2013-03-19 14:13:27 +0000 |
1559 | @@ -0,0 +1,130 @@ |
1560 | +# |
1561 | +# Copyright 2012 Canonical Ltd. |
1562 | +# |
1563 | +# This file is sourced from lp:openstack-charm-helpers |
1564 | +# |
1565 | +# Authors: |
1566 | +# James Page <james.page@ubuntu.com> |
1567 | +# Adam Gandelman <adamg@ubuntu.com> |
1568 | +# |
1569 | + |
1570 | +from lib.utils import ( |
1571 | + juju_log, |
1572 | + relation_ids, |
1573 | + relation_list, |
1574 | + relation_get, |
1575 | + get_unit_hostname, |
1576 | + config_get |
1577 | + ) |
1578 | +import subprocess |
1579 | +import os |
1580 | + |
1581 | + |
1582 | +def is_clustered(): |
1583 | + for r_id in (relation_ids('ha') or []): |
1584 | + for unit in (relation_list(r_id) or []): |
1585 | + clustered = relation_get('clustered', |
1586 | + rid=r_id, |
1587 | + unit=unit) |
1588 | + if clustered: |
1589 | + return True |
1590 | + return False |
1591 | + |
1592 | + |
1593 | +def is_leader(resource): |
1594 | + cmd = [ |
1595 | + "crm", "resource", |
1596 | + "show", resource |
1597 | + ] |
1598 | + try: |
1599 | + status = subprocess.check_output(cmd) |
1600 | + except subprocess.CalledProcessError: |
1601 | + return False |
1602 | + else: |
1603 | + if get_unit_hostname() in status: |
1604 | + return True |
1605 | + else: |
1606 | + return False |
1607 | + |
1608 | + |
1609 | +def peer_units(): |
1610 | + peers = [] |
1611 | + for r_id in (relation_ids('cluster') or []): |
1612 | + for unit in (relation_list(r_id) or []): |
1613 | + peers.append(unit) |
1614 | + return peers |
1615 | + |
1616 | + |
1617 | +def oldest_peer(peers): |
1618 | + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
1619 | + for peer in peers: |
1620 | + remote_unit_no = int(peer.split('/')[1]) |
1621 | + if remote_unit_no < local_unit_no: |
1622 | + return False |
1623 | + return True |
1624 | + |
1625 | + |
1626 | +def eligible_leader(resource): |
1627 | + if is_clustered(): |
1628 | + if not is_leader(resource): |
1629 | + juju_log('INFO', 'Deferring action to CRM leader.') |
1630 | + return False |
1631 | + else: |
1632 | + peers = peer_units() |
1633 | + if peers and not oldest_peer(peers): |
1634 | + juju_log('INFO', 'Deferring action to oldest service unit.') |
1635 | + return False |
1636 | + return True |
1637 | + |
1638 | + |
1639 | +def https(): |
1640 | + ''' |
1641 | + Determines whether enough data has been provided in configuration |
1642 | + or relation data to configure HTTPS |
1643 | + . |
1644 | + returns: boolean |
1645 | + ''' |
1646 | + if config_get('use-https') == "yes": |
1647 | + return True |
1648 | + if config_get('ssl_cert') and config_get('ssl_key'): |
1649 | + return True |
1650 | + for r_id in relation_ids('identity-service'): |
1651 | + for unit in relation_list(r_id): |
1652 | + if (relation_get('https_keystone', rid=r_id, unit=unit) and |
1653 | + relation_get('ssl_cert', rid=r_id, unit=unit) and |
1654 | + relation_get('ssl_key', rid=r_id, unit=unit) and |
1655 | + relation_get('ca_cert', rid=r_id, unit=unit)): |
1656 | + return True |
1657 | + return False |
1658 | + |
1659 | + |
1660 | +def determine_api_port(public_port): |
1661 | + ''' |
1662 | + Determine correct API server listening port based on |
1663 | + existence of HTTPS reverse proxy and/or haproxy. |
1664 | + |
1665 | + public_port: int: standard public port for given service |
1666 | + |
1667 | + returns: int: the correct listening port for the API service |
1668 | + ''' |
1669 | + i = 0 |
1670 | + if len(peer_units()) > 0 or is_clustered(): |
1671 | + i += 1 |
1672 | + if https(): |
1673 | + i += 1 |
1674 | + return public_port - (i * 10) |
1675 | + |
1676 | + |
1677 | +def determine_haproxy_port(public_port): |
1678 | + ''' |
1679 | + Description: Determine correct proxy listening port based on public IP + |
1680 | + existence of HTTPS reverse proxy. |
1681 | + |
1682 | + public_port: int: standard public port for given service |
1683 | + |
1684 | + returns: int: the correct listening port for the HAProxy service |
1685 | + ''' |
1686 | + i = 0 |
1687 | + if https(): |
1688 | + i += 1 |
1689 | + return public_port - (i * 10) |
1690 | |
1691 | === added file 'hooks/lib/haproxy_utils.py' |
1692 | --- hooks/lib/haproxy_utils.py 1970-01-01 00:00:00 +0000 |
1693 | +++ hooks/lib/haproxy_utils.py 2013-03-19 14:13:27 +0000 |
1694 | @@ -0,0 +1,55 @@ |
1695 | +# |
1696 | +# Copyright 2012 Canonical Ltd. |
1697 | +# |
1698 | +# This file is sourced from lp:openstack-charm-helpers |
1699 | +# |
1700 | +# Authors: |
1701 | +# James Page <james.page@ubuntu.com> |
1702 | +# Adam Gandelman <adamg@ubuntu.com> |
1703 | +# |
1704 | + |
1705 | +from lib.utils import ( |
1706 | + relation_ids, |
1707 | + relation_list, |
1708 | + relation_get, |
1709 | + unit_get, |
1710 | + reload, |
1711 | + render_template |
1712 | + ) |
1713 | +import os |
1714 | + |
1715 | +HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
1716 | +HAPROXY_DEFAULT = '/etc/default/haproxy' |
1717 | + |
1718 | + |
1719 | +def configure_haproxy(service_ports): |
1720 | + ''' |
1721 | + Configure HAProxy based on the current peers in the service |
1722 | + cluster using the provided port map: |
1723 | + |
1724 | + "swift": [ 8080, 8070 ] |
1725 | + |
1726 | + HAproxy will also be reloaded/started if required |
1727 | + |
1728 | + service_ports: dict: dict of lists of [ frontend, backend ] |
1729 | + ''' |
1730 | + cluster_hosts = {} |
1731 | + cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ |
1732 | + unit_get('private-address') |
1733 | + for r_id in relation_ids('cluster'): |
1734 | + for unit in relation_list(r_id): |
1735 | + cluster_hosts[unit.replace('/', '-')] = \ |
1736 | + relation_get(attribute='private-address', |
1737 | + rid=r_id, |
1738 | + unit=unit) |
1739 | + context = { |
1740 | + 'units': cluster_hosts, |
1741 | + 'service_ports': service_ports |
1742 | + } |
1743 | + with open(HAPROXY_CONF, 'w') as f: |
1744 | + f.write(render_template(os.path.basename(HAPROXY_CONF), |
1745 | + context)) |
1746 | + with open(HAPROXY_DEFAULT, 'w') as f: |
1747 | + f.write('ENABLED=1') |
1748 | + |
1749 | + reload('haproxy') |
1750 | |
1751 | === modified file 'hooks/lib/openstack_common.py' |
1752 | --- hooks/lib/openstack_common.py 2013-03-08 11:50:40 +0000 |
1753 | +++ hooks/lib/openstack_common.py 2013-03-19 14:13:27 +0000 |
1754 | @@ -13,7 +13,7 @@ |
1755 | 'oneiric': 'diablo', |
1756 | 'precise': 'essex', |
1757 | 'quantal': 'folsom', |
1758 | - 'raring' : 'grizzly', |
1759 | + 'raring': 'grizzly', |
1760 | } |
1761 | |
1762 | |
1763 | @@ -34,6 +34,7 @@ |
1764 | '1.7.7': 'grizzly', |
1765 | } |
1766 | |
1767 | + |
1768 | def juju_log(msg): |
1769 | subprocess.check_call(['juju-log', msg]) |
1770 | |
1771 | @@ -78,6 +79,7 @@ |
1772 | if v in src: |
1773 | return v |
1774 | |
1775 | + |
1776 | def get_os_codename_version(vers): |
1777 | '''Determine OpenStack codename from version number.''' |
1778 | try: |
1779 | @@ -136,6 +138,7 @@ |
1780 | e = "Could not determine OpenStack version for package: %s" % pkg |
1781 | error_out(e) |
1782 | |
1783 | + |
1784 | def configure_installation_source(rel): |
1785 | '''Configure apt installation source.''' |
1786 | |
1787 | @@ -154,7 +157,7 @@ |
1788 | subprocess.check_call(["add-apt-repository", "-y", src]) |
1789 | elif rel[:3] == "deb": |
1790 | l = len(rel.split('|')) |
1791 | - if l == 2: |
1792 | + if l == 2: |
1793 | src, key = rel.split('|') |
1794 | juju_log("Importing PPA key from keyserver for %s" % src) |
1795 | _import_key(key) |
1796 | @@ -211,6 +214,7 @@ |
1797 | HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
1798 | HAPROXY_DEFAULT = '/etc/default/haproxy' |
1799 | |
1800 | + |
1801 | def configure_haproxy(units, service_ports, template_dir=None): |
1802 | template_dir = template_dir or 'templates' |
1803 | import jinja2 |
1804 | @@ -229,6 +233,7 @@ |
1805 | with open(HAPROXY_DEFAULT, 'w') as f: |
1806 | f.write('ENABLED=1') |
1807 | |
1808 | + |
1809 | def save_script_rc(script_path="scripts/scriptrc", **env_vars): |
1810 | """ |
1811 | Write an rc file in the charm-delivered directory containing |
1812 | @@ -238,7 +243,7 @@ |
1813 | service changes. |
1814 | """ |
1815 | unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-') |
1816 | - juju_rc_path="/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) |
1817 | + juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path) |
1818 | with open(juju_rc_path, 'wb') as rc_script: |
1819 | rc_script.write( |
1820 | "#!/bin/bash\n") |
1821 | |
1822 | === modified file 'hooks/lib/unison.py' |
1823 | --- hooks/lib/unison.py 2013-02-13 05:57:44 +0000 |
1824 | +++ hooks/lib/unison.py 2013-03-19 14:13:27 +0000 |
1825 | @@ -40,12 +40,10 @@ |
1826 | # Either copy required functionality to this library or depend on |
1827 | # something more generic. |
1828 | |
1829 | -import json |
1830 | import os |
1831 | import sys |
1832 | -import utils |
1833 | +import lib.utils as utils |
1834 | import subprocess |
1835 | -import shutil |
1836 | import grp |
1837 | import pwd |
1838 | |
1839 | @@ -55,7 +53,8 @@ |
1840 | user = pwd.getpwnam(user) |
1841 | return user.pw_dir |
1842 | except KeyError: |
1843 | - utils.juju_log('Could not get homedir for user %s: user exists?') |
1844 | + utils.juju_log('INFO', |
1845 | + 'Could not get homedir for user %s: user exists?') |
1846 | sys.exit(1) |
1847 | |
1848 | |
1849 | @@ -67,27 +66,29 @@ |
1850 | |
1851 | priv_key = os.path.join(ssh_dir, 'id_rsa') |
1852 | if not os.path.isfile(priv_key): |
1853 | - utils.juju_log('Generating new ssh key for user %s.' % user) |
1854 | + utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user) |
1855 | cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048', |
1856 | '-f', priv_key] |
1857 | subprocess.check_call(cmd) |
1858 | |
1859 | pub_key = '%s.pub' % priv_key |
1860 | if not os.path.isfile(pub_key): |
1861 | - utils.juju_log('Generatring missing ssh public key @ %s.' % pub_key) |
1862 | + utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' % \ |
1863 | + pub_key) |
1864 | cmd = ['ssh-keygen', '-y', '-f', priv_key] |
1865 | p = subprocess.check_output(cmd).strip() |
1866 | with open(pub_key, 'wb') as out: |
1867 | out.write(p) |
1868 | subprocess.check_call(['chown', '-R', user, ssh_dir]) |
1869 | - return open(priv_key, 'r').read().strip(), open(pub_key, 'r').read().strip() |
1870 | + return open(priv_key, 'r').read().strip(), \ |
1871 | + open(pub_key, 'r').read().strip() |
1872 | |
1873 | |
1874 | def write_authorized_keys(user, keys): |
1875 | home_dir = get_homedir(user) |
1876 | ssh_dir = os.path.join(home_dir, '.ssh') |
1877 | auth_keys = os.path.join(ssh_dir, 'authorized_keys') |
1878 | - utils.juju_log('Syncing authorized_keys @ %s.' % auth_keys) |
1879 | + utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys) |
1880 | with open(auth_keys, 'wb') as out: |
1881 | for k in keys: |
1882 | out.write('%s\n' % k) |
1883 | @@ -96,13 +97,13 @@ |
1884 | def write_known_hosts(user, hosts): |
1885 | home_dir = get_homedir(user) |
1886 | ssh_dir = os.path.join(home_dir, '.ssh') |
1887 | - known_hosts = os.path.join(ssh_dir, 'known_hosts') |
1888 | + known_hosts = os.path.join(ssh_dir, 'known_hosts') |
1889 | khosts = [] |
1890 | for host in hosts: |
1891 | cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] |
1892 | remote_key = subprocess.check_output(cmd).strip() |
1893 | khosts.append(remote_key) |
1894 | - utils.juju_log('Syncing known_hosts @ %s.' % known_hosts) |
1895 | + utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts) |
1896 | with open(known_hosts, 'wb') as out: |
1897 | for host in khosts: |
1898 | out.write('%s\n' % host) |
1899 | @@ -113,7 +114,7 @@ |
1900 | try: |
1901 | pwd.getpwnam(user) |
1902 | except KeyError: |
1903 | - utils.juju_log('Creating new user %s.%s.' % (user, group)) |
1904 | + utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group)) |
1905 | cmd = ['adduser', '--system', '--shell', '/bin/bash', user] |
1906 | if group: |
1907 | try: |
1908 | @@ -134,7 +135,7 @@ |
1909 | priv_key, pub_key = get_keypair(user) |
1910 | hook = os.path.basename(sys.argv[0]) |
1911 | if hook == '%s-relation-joined' % peer_interface: |
1912 | - utils.relation_set_2(ssh_pub_key=pub_key) |
1913 | + utils.relation_set(ssh_pub_key=pub_key) |
1914 | print 'joined' |
1915 | elif hook == '%s-relation-changed' % peer_interface: |
1916 | hosts = [] |
1917 | @@ -147,34 +148,38 @@ |
1918 | keys.append(settings['ssh_pub_key']) |
1919 | hosts.append(settings['private-address']) |
1920 | else: |
1921 | - utils.juju_log('ssh_authorized_peers(): ssh_pub_key '\ |
1922 | + utils.juju_log('INFO', |
1923 | + 'ssh_authorized_peers(): ssh_pub_key '\ |
1924 | 'missing for unit %s, skipping.' % unit) |
1925 | write_authorized_keys(user, keys) |
1926 | write_known_hosts(user, hosts) |
1927 | authed_hosts = ':'.join(hosts) |
1928 | - utils.relation_set_2(ssh_authorized_hosts=authed_hosts) |
1929 | + utils.relation_set(ssh_authorized_hosts=authed_hosts) |
1930 | |
1931 | |
1932 | def _run_as_user(user): |
1933 | try: |
1934 | user = pwd.getpwnam(user) |
1935 | except KeyError: |
1936 | - utils.juju_log('Invalid user: %s' % user) |
1937 | + utils.juju_log('INFO', 'Invalid user: %s' % user) |
1938 | sys.exit(1) |
1939 | uid, gid = user.pw_uid, user.pw_gid |
1940 | os.environ['HOME'] = user.pw_dir |
1941 | + |
1942 | def _inner(): |
1943 | os.setgid(gid) |
1944 | os.setuid(uid) |
1945 | return _inner |
1946 | |
1947 | + |
1948 | def run_as_user(user, cmd): |
1949 | return subprocess.check_output(cmd, preexec_fn=_run_as_user(user)) |
1950 | |
1951 | + |
1952 | def sync_to_peers(peer_interface, user, paths=[], verbose=False): |
1953 | base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', |
1954 | - '-fastcheck=true', '-group=false', '-owner=false', '-prefer=newer', |
1955 | - '-times=true'] |
1956 | + '-fastcheck=true', '-group=false', '-owner=false', |
1957 | + '-prefer=newer', '-times=true'] |
1958 | if not verbose: |
1959 | base_cmd.append('-silent') |
1960 | |
1961 | @@ -206,10 +211,10 @@ |
1962 | # removing trailing slash from directory paths, unison |
1963 | # doesn't like these. |
1964 | if path.endswith('/'): |
1965 | - path = path[:(len(path)-1)] |
1966 | + path = path[:(len(path) - 1)] |
1967 | for host in hosts: |
1968 | cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] |
1969 | - utils.juju_log('Syncing local path %s to %s@%s:%s' %\ |
1970 | + utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' %\ |
1971 | (path, user, host, path)) |
1972 | print ' '.join(cmd) |
1973 | run_as_user(user, cmd) |
1974 | |
1975 | === added file 'hooks/lib/utils.py' |
1976 | --- hooks/lib/utils.py 1970-01-01 00:00:00 +0000 |
1977 | +++ hooks/lib/utils.py 2013-03-19 14:13:27 +0000 |
1978 | @@ -0,0 +1,329 @@ |
1979 | +# |
1980 | +# Copyright 2012 Canonical Ltd. |
1981 | +# |
1982 | +# This file is sourced from lp:openstack-charm-helpers |
1983 | +# |
1984 | +# Authors: |
1985 | +# James Page <james.page@ubuntu.com> |
1986 | +# Paul Collins <paul.collins@canonical.com> |
1987 | +# Adam Gandelman <adamg@ubuntu.com> |
1988 | +# |
1989 | + |
1990 | +import json |
1991 | +import os |
1992 | +import subprocess |
1993 | +import socket |
1994 | +import sys |
1995 | + |
1996 | + |
1997 | +def do_hooks(hooks): |
1998 | + hook = os.path.basename(sys.argv[0]) |
1999 | + |
2000 | + try: |
2001 | + hook_func = hooks[hook] |
2002 | + except KeyError: |
2003 | + juju_log('INFO', |
2004 | + "This charm doesn't know how to handle '{}'.".format(hook)) |
2005 | + else: |
2006 | + hook_func() |
2007 | + |
2008 | + |
2009 | +def install(*pkgs): |
2010 | + cmd = [ |
2011 | + 'apt-get', |
2012 | + '-y', |
2013 | + 'install' |
2014 | + ] |
2015 | + for pkg in pkgs: |
2016 | + cmd.append(pkg) |
2017 | + subprocess.check_call(cmd) |
2018 | + |
2019 | +TEMPLATES_DIR = 'templates' |
2020 | + |
2021 | +try: |
2022 | + import jinja2 |
2023 | +except ImportError: |
2024 | + install('python-jinja2') |
2025 | + import jinja2 |
2026 | + |
2027 | +try: |
2028 | + import dns.resolver |
2029 | +except ImportError: |
2030 | + install('python-dnspython') |
2031 | + import dns.resolver |
2032 | + |
2033 | + |
2034 | +def render_template(template_name, context, template_dir=TEMPLATES_DIR): |
2035 | + templates = jinja2.Environment( |
2036 | + loader=jinja2.FileSystemLoader(template_dir) |
2037 | + ) |
2038 | + template = templates.get_template(template_name) |
2039 | + return template.render(context) |
2040 | + |
2041 | +CLOUD_ARCHIVE = \ |
2042 | +""" # Ubuntu Cloud Archive |
2043 | +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
2044 | +""" |
2045 | + |
2046 | +CLOUD_ARCHIVE_POCKETS = { |
2047 | + 'folsom': 'precise-updates/folsom', |
2048 | + 'folsom/updates': 'precise-updates/folsom', |
2049 | + 'folsom/proposed': 'precise-proposed/folsom', |
2050 | + 'grizzly': 'precise-updates/grizzly', |
2051 | + 'grizzly/updates': 'precise-updates/grizzly', |
2052 | + 'grizzly/proposed': 'precise-proposed/grizzly' |
2053 | + } |
2054 | + |
2055 | + |
2056 | +def configure_source(): |
2057 | + source = str(config_get('openstack-origin')) |
2058 | + if not source: |
2059 | + return |
2060 | + if source.startswith('ppa:'): |
2061 | + cmd = [ |
2062 | + 'add-apt-repository', |
2063 | + source |
2064 | + ] |
2065 | + subprocess.check_call(cmd) |
2066 | + if source.startswith('cloud:'): |
2067 | + install('ubuntu-cloud-keyring') |
2068 | + pocket = source.split(':')[1] |
2069 | + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
2070 | + apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) |
2071 | + if source.startswith('deb'): |
2072 | + l = len(source.split('|')) |
2073 | + if l == 2: |
2074 | + (apt_line, key) = source.split('|') |
2075 | + cmd = [ |
2076 | + 'apt-key', |
2077 | + 'adv', '--keyserver keyserver.ubuntu.com', |
2078 | + '--recv-keys', key |
2079 | + ] |
2080 | + subprocess.check_call(cmd) |
2081 | + elif l == 1: |
2082 | + apt_line = source |
2083 | + |
2084 | + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: |
2085 | + apt.write(apt_line + "\n") |
2086 | + cmd = [ |
2087 | + 'apt-get', |
2088 | + 'update' |
2089 | + ] |
2090 | + subprocess.check_call(cmd) |
2091 | + |
2092 | +# Protocols |
2093 | +TCP = 'TCP' |
2094 | +UDP = 'UDP' |
2095 | + |
2096 | + |
2097 | +def expose(port, protocol='TCP'): |
2098 | + cmd = [ |
2099 | + 'open-port', |
2100 | + '{}/{}'.format(port, protocol) |
2101 | + ] |
2102 | + subprocess.check_call(cmd) |
2103 | + |
2104 | + |
2105 | +def juju_log(severity, message): |
2106 | + cmd = [ |
2107 | + 'juju-log', |
2108 | + '--log-level', severity, |
2109 | + message |
2110 | + ] |
2111 | + subprocess.check_call(cmd) |
2112 | + |
2113 | + |
2114 | +cache = {} |
2115 | + |
2116 | + |
2117 | +def cached(func): |
2118 | + def wrapper(*args, **kwargs): |
2119 | + global cache |
2120 | + key = str((func, args, kwargs)) |
2121 | + try: |
2122 | + return cache[key] |
2123 | + except KeyError: |
2124 | + res = func(*args, **kwargs) |
2125 | + cache[key] = res |
2126 | + return res |
2127 | + return wrapper |
2128 | + |
2129 | + |
2130 | +@cached |
2131 | +def relation_ids(relation): |
2132 | + cmd = [ |
2133 | + 'relation-ids', |
2134 | + relation |
2135 | + ] |
2136 | + result = str(subprocess.check_output(cmd)).split() |
2137 | + if result == "": |
2138 | + return None |
2139 | + else: |
2140 | + return result |
2141 | + |
2142 | + |
2143 | +@cached |
2144 | +def relation_list(rid): |
2145 | + cmd = [ |
2146 | + 'relation-list', |
2147 | + '-r', rid, |
2148 | + ] |
2149 | + result = str(subprocess.check_output(cmd)).split() |
2150 | + if result == "": |
2151 | + return None |
2152 | + else: |
2153 | + return result |
2154 | + |
2155 | + |
2156 | +@cached |
2157 | +def relation_get(attribute, unit=None, rid=None): |
2158 | + cmd = [ |
2159 | + 'relation-get', |
2160 | + ] |
2161 | + if rid: |
2162 | + cmd.append('-r') |
2163 | + cmd.append(rid) |
2164 | + cmd.append(attribute) |
2165 | + if unit: |
2166 | + cmd.append(unit) |
2167 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
2168 | + if value == "": |
2169 | + return None |
2170 | + else: |
2171 | + return value |
2172 | + |
2173 | + |
2174 | +@cached |
2175 | +def relation_get_dict(relation_id=None, remote_unit=None): |
2176 | + """Obtain all relation data as dict by way of JSON""" |
2177 | + cmd = [ |
2178 | + 'relation-get', '--format=json' |
2179 | + ] |
2180 | + if relation_id: |
2181 | + cmd.append('-r') |
2182 | + cmd.append(relation_id) |
2183 | + if remote_unit: |
2184 | + remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) |
2185 | + os.environ['JUJU_REMOTE_UNIT'] = remote_unit |
2186 | + j = subprocess.check_output(cmd) |
2187 | + if remote_unit and remote_unit_orig: |
2188 | + os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig |
2189 | + d = json.loads(j) |
2190 | + settings = {} |
2191 | + # convert unicode to strings |
2192 | + for k, v in d.iteritems(): |
2193 | + settings[str(k)] = str(v) |
2194 | + return settings |
2195 | + |
2196 | + |
2197 | +def relation_set(**kwargs): |
2198 | + cmd = [ |
2199 | + 'relation-set' |
2200 | + ] |
2201 | + args = [] |
2202 | + for k, v in kwargs.items(): |
2203 | + if k == 'rid': |
2204 | + if v: |
2205 | + cmd.append('-r') |
2206 | + cmd.append(v) |
2207 | + else: |
2208 | + args.append('{}={}'.format(k, v)) |
2209 | + cmd += args |
2210 | + subprocess.check_call(cmd) |
2211 | + |
2212 | + |
2213 | +@cached |
2214 | +def unit_get(attribute): |
2215 | + cmd = [ |
2216 | + 'unit-get', |
2217 | + attribute |
2218 | + ] |
2219 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
2220 | + if value == "": |
2221 | + return None |
2222 | + else: |
2223 | + return value |
2224 | + |
2225 | + |
2226 | +@cached |
2227 | +def config_get(attribute): |
2228 | + cmd = [ |
2229 | + 'config-get', |
2230 | + '--format', |
2231 | + 'json', |
2232 | + ] |
2233 | + out = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
2234 | + cfg = json.loads(out) |
2235 | + |
2236 | + try: |
2237 | + return cfg[attribute] |
2238 | + except KeyError: |
2239 | + return None |
2240 | + |
2241 | + |
2242 | +@cached |
2243 | +def get_unit_hostname(): |
2244 | + return socket.gethostname() |
2245 | + |
2246 | + |
2247 | +@cached |
2248 | +def get_host_ip(hostname=unit_get('private-address')): |
2249 | + try: |
2250 | + # Test to see if already an IPv4 address |
2251 | + socket.inet_aton(hostname) |
2252 | + return hostname |
2253 | + except socket.error: |
2254 | + answers = dns.resolver.query(hostname, 'A') |
2255 | + if answers: |
2256 | + return answers[0].address |
2257 | + return None |
2258 | + |
2259 | + |
2260 | +def _svc_control(service, action): |
2261 | + subprocess.check_call(['service', service, action]) |
2262 | + |
2263 | + |
2264 | +def restart(*services): |
2265 | + for service in services: |
2266 | + _svc_control(service, 'restart') |
2267 | + |
2268 | + |
2269 | +def stop(*services): |
2270 | + for service in services: |
2271 | + _svc_control(service, 'stop') |
2272 | + |
2273 | + |
2274 | +def start(*services): |
2275 | + for service in services: |
2276 | + _svc_control(service, 'start') |
2277 | + |
2278 | + |
2279 | +def reload(*services): |
2280 | + for service in services: |
2281 | + try: |
2282 | + _svc_control(service, 'reload') |
2283 | + except subprocess.CalledProcessError: |
2284 | + # Reload failed - either service does not support reload |
2285 | + # or it was not running - restart will fixup most things |
2286 | + _svc_control(service, 'restart') |
2287 | + |
2288 | + |
2289 | +def running(service): |
2290 | + try: |
2291 | + output = subprocess.check_output(['service', service, 'status']) |
2292 | + except subprocess.CalledProcessError: |
2293 | + return False |
2294 | + else: |
2295 | + if ("start/running" in output or |
2296 | + "is running" in output): |
2297 | + return True |
2298 | + else: |
2299 | + return False |
2300 | + |
2301 | + |
2302 | +def is_relation_made(relation, key='private-address'): |
2303 | + for r_id in (relation_ids(relation) or []): |
2304 | + for unit in (relation_list(r_id) or []): |
2305 | + if relation_get(key, rid=r_id, unit=unit): |
2306 | + return True |
2307 | + return False |
2308 | |
2309 | === modified file 'hooks/manager.py' |
2310 | --- hooks/manager.py 2012-02-29 01:18:17 +0000 |
2311 | +++ hooks/manager.py 2013-03-19 14:13:27 +0000 |
2312 | @@ -1,6 +1,7 @@ |
2313 | #!/usr/bin/python |
2314 | from keystoneclient.v2_0 import client |
2315 | |
2316 | + |
2317 | class KeystoneManager(object): |
2318 | def __init__(self, endpoint, token): |
2319 | self.api = client.Client(endpoint=endpoint, token=token) |
2320 | |
2321 | === modified symlink 'hooks/shared-db-relation-changed' |
2322 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
2323 | === modified symlink 'hooks/shared-db-relation-joined' |
2324 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
2325 | === modified symlink 'hooks/upgrade-charm' |
2326 | === target changed u'keystone-hooks' => u'keystone_hooks.py' |
2327 | === modified file 'templates/haproxy.cfg' |
2328 | --- templates/haproxy.cfg 2013-02-15 15:56:14 +0000 |
2329 | +++ templates/haproxy.cfg 2013-03-19 14:13:27 +0000 |
2330 | @@ -1,7 +1,7 @@ |
2331 | global |
2332 | log 127.0.0.1 local0 |
2333 | log 127.0.0.1 local1 notice |
2334 | - maxconn 4096 |
2335 | + maxconn 20000 |
2336 | user haproxy |
2337 | group haproxy |
2338 | spread-checks 0 |
2339 | @@ -14,8 +14,8 @@ |
2340 | retries 3 |
2341 | timeout queue 1000 |
2342 | timeout connect 1000 |
2343 | - timeout client 10000 |
2344 | - timeout server 10000 |
2345 | + timeout client 30000 |
2346 | + timeout server 30000 |
2347 | |
2348 | listen stats :8888 |
2349 | mode http |
2350 | @@ -25,11 +25,11 @@ |
2351 | stats uri / |
2352 | stats auth admin:password |
2353 | |
2354 | -{% for service, port in service_ports.iteritems() -%} |
2355 | -listen {{ service }} 0.0.0.0:{{ port }} |
2356 | +{% for service, ports in service_ports.iteritems() -%} |
2357 | +listen {{ service }} 0.0.0.0:{{ ports[0] }} |
2358 | balance roundrobin |
2359 | option tcplog |
2360 | {% for unit, address in units.iteritems() -%} |
2361 | - server {{ unit }} {{ address }}:{{ port - 1 }} check |
2362 | + server {{ unit }} {{ address }}:{{ ports[1] }} check |
2363 | {% endfor %} |
2364 | {% endfor %} |