Merge lp:~openstack-charmers/charms/precise/swift-proxy/ha-support into lp:~charmers/charms/precise/swift-proxy/trunk
- Precise Pangolin (12.04)
- ha-support
- Merge into trunk
Proposed by
Adam Gandelman
Status: | Merged |
---|---|
Merged at revision: | 41 |
Proposed branch: | lp:~openstack-charmers/charms/precise/swift-proxy/ha-support |
Merge into: | lp:~charmers/charms/precise/swift-proxy/trunk |
Diff against target: |
2143 lines (+1290/-485) 20 files modified
.project (+17/-0) .pydevproject (+8/-0) config.yaml (+51/-1) hooks/lib/apache_utils.py (+193/-0) hooks/lib/cluster_utils.py (+130/-0) hooks/lib/haproxy_utils.py (+52/-0) hooks/lib/openstack_common.py (+41/-35) hooks/lib/utils.py (+332/-0) hooks/swift-hooks.py (+0/-164) hooks/swift_hooks.py (+257/-0) hooks/swift_utils.py (+121/-47) hooks/utils.py (+0/-237) metadata.yaml (+6/-0) revision (+1/-1) scripts/add_to_cluster (+13/-0) scripts/remove_from_cluster (+4/-0) templates/apache2_site.tmpl (+19/-0) templates/essex/proxy-server.conf (+9/-0) templates/grizzly/proxy-server.conf (+1/-0) templates/haproxy.cfg (+35/-0) |
To merge this branch: | bzr merge lp:~openstack-charmers/charms/precise/swift-proxy/ha-support |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
charmers | Pending | ||
Review via email: mp+166338@code.launchpad.net |
Commit message
Description of the change
* Python rewrite
* Support for manually assigned storage zones
* Support for high availability via hacluster subordinate.
* Adds Grizzly compatibility.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.project' |
2 | --- .project 1970-01-01 00:00:00 +0000 |
3 | +++ .project 2013-05-29 18:03:27 +0000 |
4 | @@ -0,0 +1,17 @@ |
5 | +<?xml version="1.0" encoding="UTF-8"?> |
6 | +<projectDescription> |
7 | + <name>swift-proxy</name> |
8 | + <comment></comment> |
9 | + <projects> |
10 | + </projects> |
11 | + <buildSpec> |
12 | + <buildCommand> |
13 | + <name>org.python.pydev.PyDevBuilder</name> |
14 | + <arguments> |
15 | + </arguments> |
16 | + </buildCommand> |
17 | + </buildSpec> |
18 | + <natures> |
19 | + <nature>org.python.pydev.pythonNature</nature> |
20 | + </natures> |
21 | +</projectDescription> |
22 | |
23 | === added file '.pydevproject' |
24 | --- .pydevproject 1970-01-01 00:00:00 +0000 |
25 | +++ .pydevproject 2013-05-29 18:03:27 +0000 |
26 | @@ -0,0 +1,8 @@ |
27 | +<?xml version="1.0" encoding="UTF-8" standalone="no"?> |
28 | +<?eclipse-pydev version="1.0"?><pydev_project> |
29 | +<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property> |
30 | +<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property> |
31 | +<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH"> |
32 | +<path>/swift-proxy/hooks</path> |
33 | +</pydev_pathproperty> |
34 | +</pydev_project> |
35 | |
36 | === modified file 'config.yaml' |
37 | --- config.yaml 2012-12-14 23:07:01 +0000 |
38 | +++ config.yaml 2013-05-29 18:03:27 +0000 |
39 | @@ -46,7 +46,25 @@ |
40 | zones before the storage ring will be initially balance. Deployment |
41 | requirements differ based on the zone-assignment policy configured, see |
42 | this charm's README for details. |
43 | - # CA Cert info |
44 | + # User provided SSL cert and key |
45 | + ssl_cert: |
46 | + type: string |
47 | + description: | |
48 | + Base64 encoded SSL certificate to install and use for API ports. |
49 | + . |
50 | + juju set swift-proxy ssl_cert="$(cat cert | base64)" \ |
51 | + ssl_key="$(cat key | base64)" |
52 | + . |
53 | + Setting this value (and ssl_key) will enable reverse proxying, point |
54 | + Swifts's entry in the Keystone catalog to use https, and override |
55 | + any certficiate and key issued by Keystone (if it is configured to |
56 | + do so). |
57 | + ssl_key: |
58 | + type: string |
59 | + description: | |
60 | + Base64 encoded SSL key to use with certificate specified as ssl_cert. |
61 | + # Locally generated CA Cert info (only use without keystone) |
62 | + # These options are deprecated and will be removed sometime |
63 | use-https: |
64 | default: "yes" |
65 | type: string |
66 | @@ -67,6 +85,7 @@ |
67 | default: CN |
68 | type: string |
69 | description: Common Name |
70 | + # General Swift Proxy configuration |
71 | bind-port: |
72 | default: 8080 |
73 | type: int |
74 | @@ -83,6 +102,10 @@ |
75 | default: tempauth |
76 | type: string |
77 | description: Auth method to use, tempauth or keystone |
78 | + delay-auth-decision: |
79 | + default: true |
80 | + type: boolean |
81 | + description: Delay authentication to downstream WSGI services. |
82 | # Manual Keystone configuration. |
83 | keystone-auth-host: |
84 | type: string |
85 | @@ -105,3 +128,30 @@ |
86 | keystone-admin-password: |
87 | type: string |
88 | description: Keystone admin password |
89 | + # HA configuration settings |
90 | + swift-hash: |
91 | + type: string |
92 | + description: Hash to use across all swift-proxy servers - don't loose |
93 | + vip: |
94 | + type: string |
95 | + description: "Virtual IP to use to front swift-proxy in ha configuration" |
96 | + vip_iface: |
97 | + type: string |
98 | + default: eth0 |
99 | + description: "Network Interface where to place the Virtual IP" |
100 | + vip_cidr: |
101 | + type: int |
102 | + default: 24 |
103 | + description: "Netmask that will be used for the Virtual IP" |
104 | + ha-bindiface: |
105 | + type: string |
106 | + default: eth0 |
107 | + description: | |
108 | + Default network interface on which HA cluster will bind to communication |
109 | + with the other members of the HA Cluster. |
110 | + ha-mcastport: |
111 | + type: int |
112 | + default: 5414 |
113 | + description: | |
114 | + Default multicast port number that will be used to communicate between |
115 | + HA Cluster nodes. |
116 | |
117 | === added symlink 'hooks/cluster-relation-changed' |
118 | === target is u'swift_hooks.py' |
119 | === added symlink 'hooks/cluster-relation-joined' |
120 | === target is u'swift_hooks.py' |
121 | === modified symlink 'hooks/config-changed' |
122 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
123 | === added symlink 'hooks/ha-relation-changed' |
124 | === target is u'swift_hooks.py' |
125 | === added symlink 'hooks/ha-relation-joined' |
126 | === target is u'swift_hooks.py' |
127 | === modified symlink 'hooks/identity-service-relation-changed' |
128 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
129 | === modified symlink 'hooks/identity-service-relation-joined' |
130 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
131 | === modified symlink 'hooks/install' |
132 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
133 | === added file 'hooks/lib/apache_utils.py' |
134 | --- hooks/lib/apache_utils.py 1970-01-01 00:00:00 +0000 |
135 | +++ hooks/lib/apache_utils.py 2013-05-29 18:03:27 +0000 |
136 | @@ -0,0 +1,193 @@ |
137 | +# |
138 | +# Copyright 2012 Canonical Ltd. |
139 | +# |
140 | +# Authors: |
141 | +# James Page <james.page@ubuntu.com> |
142 | +# |
143 | + |
144 | +from lib.utils import ( |
145 | + relation_ids, |
146 | + relation_list, |
147 | + relation_get, |
148 | + render_template, |
149 | + juju_log, |
150 | + config_get, |
151 | + install, |
152 | + get_host_ip, |
153 | + restart |
154 | + ) |
155 | +from lib.cluster_utils import https |
156 | + |
157 | +import os |
158 | +import subprocess |
159 | +from base64 import b64decode |
160 | + |
161 | +APACHE_SITE_DIR = "/etc/apache2/sites-available" |
162 | +SITE_TEMPLATE = "apache2_site.tmpl" |
163 | +RELOAD_CHECK = "To activate the new configuration" |
164 | + |
165 | + |
166 | +def get_cert(): |
167 | + cert = config_get('ssl_cert') |
168 | + key = config_get('ssl_key') |
169 | + if not (cert and key): |
170 | + juju_log('INFO', |
171 | + "Inspecting identity-service relations for SSL certificate.") |
172 | + cert = key = None |
173 | + for r_id in relation_ids('identity-service'): |
174 | + for unit in relation_list(r_id): |
175 | + if not cert: |
176 | + cert = relation_get('ssl_cert', |
177 | + rid=r_id, unit=unit) |
178 | + if not key: |
179 | + key = relation_get('ssl_key', |
180 | + rid=r_id, unit=unit) |
181 | + return (cert, key) |
182 | + |
183 | + |
184 | +def get_ca_cert(): |
185 | + ca_cert = None |
186 | + juju_log('INFO', |
187 | + "Inspecting identity-service relations for CA SSL certificate.") |
188 | + for r_id in relation_ids('identity-service'): |
189 | + for unit in relation_list(r_id): |
190 | + if not ca_cert: |
191 | + ca_cert = relation_get('ca_cert', |
192 | + rid=r_id, unit=unit) |
193 | + return ca_cert |
194 | + |
195 | + |
196 | +def install_ca_cert(ca_cert): |
197 | + if ca_cert: |
198 | + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', |
199 | + 'w') as crt: |
200 | + crt.write(ca_cert) |
201 | + subprocess.check_call(['update-ca-certificates', '--fresh']) |
202 | + |
203 | + |
204 | +def enable_https(port_maps, namespace, cert, key, ca_cert=None): |
205 | + ''' |
206 | + For a given number of port mappings, configures apache2 |
207 | + HTTPs local reverse proxying using certficates and keys provided in |
208 | + either configuration data (preferred) or relation data. Assumes ports |
209 | + are not in use (calling charm should ensure that). |
210 | + |
211 | + port_maps: dict: external to internal port mappings |
212 | + namespace: str: name of charm |
213 | + ''' |
214 | + def _write_if_changed(path, new_content): |
215 | + content = None |
216 | + if os.path.exists(path): |
217 | + with open(path, 'r') as f: |
218 | + content = f.read().strip() |
219 | + if content != new_content: |
220 | + with open(path, 'w') as f: |
221 | + f.write(new_content) |
222 | + return True |
223 | + else: |
224 | + return False |
225 | + |
226 | + juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps)) |
227 | + http_restart = False |
228 | + |
229 | + if cert: |
230 | + cert = b64decode(cert) |
231 | + if key: |
232 | + key = b64decode(key) |
233 | + if ca_cert: |
234 | + ca_cert = b64decode(ca_cert) |
235 | + |
236 | + if not cert and not key: |
237 | + juju_log('ERROR', |
238 | + "Expected but could not find SSL certificate data, not " |
239 | + "configuring HTTPS!") |
240 | + return False |
241 | + |
242 | + install('apache2') |
243 | + if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl', |
244 | + 'proxy', 'proxy_http']): |
245 | + http_restart = True |
246 | + |
247 | + ssl_dir = os.path.join('/etc/apache2/ssl', namespace) |
248 | + if not os.path.exists(ssl_dir): |
249 | + os.makedirs(ssl_dir) |
250 | + |
251 | + if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)): |
252 | + http_restart = True |
253 | + if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)): |
254 | + http_restart = True |
255 | + os.chmod(os.path.join(ssl_dir, 'key'), 0600) |
256 | + |
257 | + install_ca_cert(ca_cert) |
258 | + |
259 | + sites_dir = '/etc/apache2/sites-available' |
260 | + for ext_port, int_port in port_maps.items(): |
261 | + juju_log('INFO', |
262 | + 'Creating apache2 reverse proxy vhost' |
263 | + ' for {}:{}'.format(ext_port, |
264 | + int_port)) |
265 | + site = "{}_{}".format(namespace, ext_port) |
266 | + site_path = os.path.join(sites_dir, site) |
267 | + with open(site_path, 'w') as fsite: |
268 | + context = { |
269 | + "ext": ext_port, |
270 | + "int": int_port, |
271 | + "namespace": namespace, |
272 | + "private_address": get_host_ip() |
273 | + } |
274 | + fsite.write(render_template(SITE_TEMPLATE, |
275 | + context)) |
276 | + |
277 | + if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]): |
278 | + http_restart = True |
279 | + |
280 | + if http_restart: |
281 | + restart('apache2') |
282 | + |
283 | + return True |
284 | + |
285 | + |
286 | +def disable_https(port_maps, namespace): |
287 | + ''' |
288 | + Ensure HTTPS reverse proxying is disables for given port mappings |
289 | + |
290 | + port_maps: dict: of ext -> int port mappings |
291 | + namespace: str: name of chamr |
292 | + ''' |
293 | + juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps)) |
294 | + |
295 | + if (not os.path.exists('/etc/apache2') or |
296 | + not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))): |
297 | + return |
298 | + |
299 | + http_restart = False |
300 | + for ext_port in port_maps.keys(): |
301 | + if os.path.exists(os.path.join(APACHE_SITE_DIR, |
302 | + "{}_{}".format(namespace, |
303 | + ext_port))): |
304 | + juju_log('INFO', |
305 | + "Disabling HTTPS reverse proxy" |
306 | + " for {} {}.".format(namespace, |
307 | + ext_port)) |
308 | + if (RELOAD_CHECK in |
309 | + subprocess.check_output(['a2dissite', |
310 | + '{}_{}'.format(namespace, |
311 | + ext_port)])): |
312 | + http_restart = True |
313 | + |
314 | + if http_restart: |
315 | + restart(['apache2']) |
316 | + |
317 | + |
318 | +def setup_https(port_maps, namespace, cert, key, ca_cert=None): |
319 | + ''' |
320 | + Ensures HTTPS is either enabled or disabled for given port |
321 | + mapping. |
322 | + |
323 | + port_maps: dict: of ext -> int port mappings |
324 | + namespace: str: name of charm |
325 | + ''' |
326 | + if not https: |
327 | + disable_https(port_maps, namespace) |
328 | + else: |
329 | + enable_https(port_maps, namespace, cert, key, ca_cert) |
330 | |
331 | === added file 'hooks/lib/cluster_utils.py' |
332 | --- hooks/lib/cluster_utils.py 1970-01-01 00:00:00 +0000 |
333 | +++ hooks/lib/cluster_utils.py 2013-05-29 18:03:27 +0000 |
334 | @@ -0,0 +1,130 @@ |
335 | +# |
336 | +# Copyright 2012 Canonical Ltd. |
337 | +# |
338 | +# This file is sourced from lp:openstack-charm-helpers |
339 | +# |
340 | +# Authors: |
341 | +# James Page <james.page@ubuntu.com> |
342 | +# Adam Gandelman <adamg@ubuntu.com> |
343 | +# |
344 | + |
345 | +from lib.utils import ( |
346 | + juju_log, |
347 | + relation_ids, |
348 | + relation_list, |
349 | + relation_get, |
350 | + get_unit_hostname, |
351 | + config_get |
352 | + ) |
353 | +import subprocess |
354 | +import os |
355 | + |
356 | + |
357 | +def is_clustered(): |
358 | + for r_id in (relation_ids('ha') or []): |
359 | + for unit in (relation_list(r_id) or []): |
360 | + clustered = relation_get('clustered', |
361 | + rid=r_id, |
362 | + unit=unit) |
363 | + if clustered: |
364 | + return True |
365 | + return False |
366 | + |
367 | + |
368 | +def is_leader(resource): |
369 | + cmd = [ |
370 | + "crm", "resource", |
371 | + "show", resource |
372 | + ] |
373 | + try: |
374 | + status = subprocess.check_output(cmd) |
375 | + except subprocess.CalledProcessError: |
376 | + return False |
377 | + else: |
378 | + if get_unit_hostname() in status: |
379 | + return True |
380 | + else: |
381 | + return False |
382 | + |
383 | + |
384 | +def peer_units(): |
385 | + peers = [] |
386 | + for r_id in (relation_ids('cluster') or []): |
387 | + for unit in (relation_list(r_id) or []): |
388 | + peers.append(unit) |
389 | + return peers |
390 | + |
391 | + |
392 | +def oldest_peer(peers): |
393 | + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) |
394 | + for peer in peers: |
395 | + remote_unit_no = int(peer.split('/')[1]) |
396 | + if remote_unit_no < local_unit_no: |
397 | + return False |
398 | + return True |
399 | + |
400 | + |
401 | +def eligible_leader(resource): |
402 | + if is_clustered(): |
403 | + if not is_leader(resource): |
404 | + juju_log('INFO', 'Deferring action to CRM leader.') |
405 | + return False |
406 | + else: |
407 | + peers = peer_units() |
408 | + if peers and not oldest_peer(peers): |
409 | + juju_log('INFO', 'Deferring action to oldest service unit.') |
410 | + return False |
411 | + return True |
412 | + |
413 | + |
414 | +def https(): |
415 | + ''' |
416 | + Determines whether enough data has been provided in configuration |
417 | + or relation data to configure HTTPS |
418 | + . |
419 | + returns: boolean |
420 | + ''' |
421 | + if config_get('use-https') == "yes": |
422 | + return True |
423 | + if config_get('ssl_cert') and config_get('ssl_key'): |
424 | + return True |
425 | + for r_id in relation_ids('identity-service'): |
426 | + for unit in relation_list(r_id): |
427 | + if (relation_get('https_keystone', rid=r_id, unit=unit) and |
428 | + relation_get('ssl_cert', rid=r_id, unit=unit) and |
429 | + relation_get('ssl_key', rid=r_id, unit=unit) and |
430 | + relation_get('ca_cert', rid=r_id, unit=unit)): |
431 | + return True |
432 | + return False |
433 | + |
434 | + |
435 | +def determine_api_port(public_port): |
436 | + ''' |
437 | + Determine correct API server listening port based on |
438 | + existence of HTTPS reverse proxy and/or haproxy. |
439 | + |
440 | + public_port: int: standard public port for given service |
441 | + |
442 | + returns: int: the correct listening port for the API service |
443 | + ''' |
444 | + i = 0 |
445 | + if len(peer_units()) > 0 or is_clustered(): |
446 | + i += 1 |
447 | + if https(): |
448 | + i += 1 |
449 | + return public_port - (i * 10) |
450 | + |
451 | + |
452 | +def determine_haproxy_port(public_port): |
453 | + ''' |
454 | + Description: Determine correct proxy listening port based on public IP + |
455 | + existence of HTTPS reverse proxy. |
456 | + |
457 | + public_port: int: standard public port for given service |
458 | + |
459 | + returns: int: the correct listening port for the HAProxy service |
460 | + ''' |
461 | + i = 0 |
462 | + if https(): |
463 | + i += 1 |
464 | + return public_port - (i * 10) |
465 | |
466 | === added file 'hooks/lib/haproxy_utils.py' |
467 | --- hooks/lib/haproxy_utils.py 1970-01-01 00:00:00 +0000 |
468 | +++ hooks/lib/haproxy_utils.py 2013-05-29 18:03:27 +0000 |
469 | @@ -0,0 +1,52 @@ |
470 | +# |
471 | +# Copyright 2012 Canonical Ltd. |
472 | +# |
473 | +# Authors: |
474 | +# James Page <james.page@ubuntu.com> |
475 | +# |
476 | + |
477 | +from lib.utils import ( |
478 | + relation_ids, |
479 | + relation_list, |
480 | + relation_get, |
481 | + unit_get, |
482 | + reload, |
483 | + render_template |
484 | + ) |
485 | +import os |
486 | + |
487 | +HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' |
488 | +HAPROXY_DEFAULT = '/etc/default/haproxy' |
489 | + |
490 | + |
491 | +def configure_haproxy(service_ports): |
492 | + ''' |
493 | + Configure HAProxy based on the current peers in the service |
494 | + cluster using the provided port map: |
495 | + |
496 | + "swift": [ 8080, 8070 ] |
497 | + |
498 | + HAproxy will also be reloaded/started if required |
499 | + |
500 | + service_ports: dict: dict of lists of [ frontend, backend ] |
501 | + ''' |
502 | + cluster_hosts = {} |
503 | + cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \ |
504 | + unit_get('private-address') |
505 | + for r_id in relation_ids('cluster'): |
506 | + for unit in relation_list(r_id): |
507 | + cluster_hosts[unit.replace('/', '-')] = \ |
508 | + relation_get(attribute='private-address', |
509 | + rid=r_id, |
510 | + unit=unit) |
511 | + context = { |
512 | + 'units': cluster_hosts, |
513 | + 'service_ports': service_ports |
514 | + } |
515 | + with open(HAPROXY_CONF, 'w') as f: |
516 | + f.write(render_template(os.path.basename(HAPROXY_CONF), |
517 | + context)) |
518 | + with open(HAPROXY_DEFAULT, 'w') as f: |
519 | + f.write('ENABLED=1') |
520 | + |
521 | + reload('haproxy') |
522 | |
523 | === modified file 'hooks/lib/openstack_common.py' |
524 | --- hooks/lib/openstack_common.py 2013-04-26 10:34:27 +0000 |
525 | +++ hooks/lib/openstack_common.py 2013-05-29 18:03:27 +0000 |
526 | @@ -2,7 +2,9 @@ |
527 | |
528 | # Common python helper functions used for OpenStack charms. |
529 | |
530 | +import apt_pkg as apt |
531 | import subprocess |
532 | +import os |
533 | |
534 | CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" |
535 | CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' |
536 | @@ -11,7 +13,7 @@ |
537 | 'oneiric': 'diablo', |
538 | 'precise': 'essex', |
539 | 'quantal': 'folsom', |
540 | - 'raring' : 'grizzly' |
541 | + 'raring': 'grizzly', |
542 | } |
543 | |
544 | |
545 | @@ -19,7 +21,8 @@ |
546 | '2011.2': 'diablo', |
547 | '2012.1': 'essex', |
548 | '2012.2': 'folsom', |
549 | - '2013.1': 'grizzly' |
550 | + '2013.1': 'grizzly', |
551 | + '2013.2': 'havana', |
552 | } |
553 | |
554 | # The ugly duckling |
555 | @@ -27,9 +30,12 @@ |
556 | '1.4.3': 'diablo', |
557 | '1.4.8': 'essex', |
558 | '1.7.4': 'folsom', |
559 | - '1.8.0': 'grizzly' |
560 | + '1.7.6': 'grizzly', |
561 | + '1.7.7': 'grizzly', |
562 | + '1.8.0': 'grizzly', |
563 | } |
564 | |
565 | + |
566 | def juju_log(msg): |
567 | subprocess.check_call(['juju-log', msg]) |
568 | |
569 | @@ -68,12 +74,13 @@ |
570 | ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] |
571 | return ca_rel |
572 | |
573 | - # Best guess match based on deb or ppa provided strings |
574 | + # Best guess match based on deb string provided |
575 | if src.startswith('deb') or src.startswith('ppa'): |
576 | for k, v in openstack_codenames.iteritems(): |
577 | if v in src: |
578 | return v |
579 | |
580 | + |
581 | def get_os_codename_version(vers): |
582 | '''Determine OpenStack codename from version number.''' |
583 | try: |
584 | @@ -95,37 +102,18 @@ |
585 | |
586 | def get_os_codename_package(pkg): |
587 | '''Derive OpenStack release codename from an installed package.''' |
588 | - cmd = ['dpkg', '-l', pkg] |
589 | - |
590 | + apt.init() |
591 | + cache = apt.Cache() |
592 | try: |
593 | - output = subprocess.check_output(cmd) |
594 | - except subprocess.CalledProcessError: |
595 | - e = 'Could not derive OpenStack version from package that is not '\ |
596 | - 'installed; %s' % pkg |
597 | - error_out(e) |
598 | - |
599 | - def _clean(line): |
600 | - line = line.split(' ') |
601 | - clean = [] |
602 | - for c in line: |
603 | - if c != '': |
604 | - clean.append(c) |
605 | - return clean |
606 | - |
607 | - vers = None |
608 | - for l in output.split('\n'): |
609 | - if l.startswith('ii'): |
610 | - l = _clean(l) |
611 | - if l[1] == pkg: |
612 | - vers = l[2] |
613 | - |
614 | - if not vers: |
615 | + pkg = cache[pkg] |
616 | + except: |
617 | e = 'Could not determine version of installed package: %s' % pkg |
618 | error_out(e) |
619 | |
620 | - vers = vers[:6] |
621 | + vers = apt.UpstreamVersion(pkg.current_ver.ver_str) |
622 | + |
623 | try: |
624 | - if 'swift' in pkg: |
625 | + if 'swift' in pkg.name: |
626 | vers = vers[:5] |
627 | return swift_codenames[vers] |
628 | else: |
629 | @@ -151,16 +139,17 @@ |
630 | e = "Could not determine OpenStack version for package: %s" % pkg |
631 | error_out(e) |
632 | |
633 | + |
634 | def configure_installation_source(rel): |
635 | '''Configure apt installation source.''' |
636 | |
637 | - def _import_key(id): |
638 | + def _import_key(keyid): |
639 | cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ |
640 | - "--recv-keys %s" % id |
641 | + "--recv-keys %s" % keyid |
642 | try: |
643 | subprocess.check_call(cmd.split(' ')) |
644 | - except: |
645 | - error_out("Error importing repo key %s" % id) |
646 | + except subprocess.CalledProcessError: |
647 | + error_out("Error importing repo key %s" % keyid) |
648 | |
649 | if rel == 'distro': |
650 | return |
651 | @@ -169,7 +158,7 @@ |
652 | subprocess.check_call(["add-apt-repository", "-y", src]) |
653 | elif rel[:3] == "deb": |
654 | l = len(rel.split('|')) |
655 | - if l == 2: |
656 | + if l == 2: |
657 | src, key = rel.split('|') |
658 | juju_log("Importing PPA key from keyserver for %s" % src) |
659 | _import_key(key) |
660 | @@ -222,3 +211,20 @@ |
661 | f.write(src) |
662 | else: |
663 | error_out("Invalid openstack-release specified: %s" % rel) |
664 | + |
665 | + |
666 | +def save_script_rc(script_path="scripts/scriptrc", **env_vars): |
667 | + """ |
668 | + Write an rc file in the charm-delivered directory containing |
669 | + exported environment variables provided by env_vars. Any charm scripts run |
670 | + outside the juju hook environment can source this scriptrc to obtain |
671 | + updated config information necessary to perform health checks or |
672 | + service changes. |
673 | + """ |
674 | + charm_dir = os.getenv('CHARM_DIR') |
675 | + juju_rc_path = "%s/%s" % (charm_dir, script_path) |
676 | + with open(juju_rc_path, 'wb') as rc_script: |
677 | + rc_script.write( |
678 | + "#!/bin/bash\n") |
679 | + [rc_script.write('export %s=%s\n' % (u, p)) |
680 | + for u, p in env_vars.iteritems() if u != "script_path"] |
681 | |
682 | === added file 'hooks/lib/utils.py' |
683 | --- hooks/lib/utils.py 1970-01-01 00:00:00 +0000 |
684 | +++ hooks/lib/utils.py 2013-05-29 18:03:27 +0000 |
685 | @@ -0,0 +1,332 @@ |
686 | +# |
687 | +# Copyright 2012 Canonical Ltd. |
688 | +# |
689 | +# This file is sourced from lp:openstack-charm-helpers |
690 | +# |
691 | +# Authors: |
692 | +# James Page <james.page@ubuntu.com> |
693 | +# Paul Collins <paul.collins@canonical.com> |
694 | +# Adam Gandelman <adamg@ubuntu.com> |
695 | +# |
696 | + |
697 | +import json |
698 | +import os |
699 | +import subprocess |
700 | +import socket |
701 | +import sys |
702 | + |
703 | + |
704 | +def do_hooks(hooks): |
705 | + hook = os.path.basename(sys.argv[0]) |
706 | + |
707 | + try: |
708 | + hook_func = hooks[hook] |
709 | + except KeyError: |
710 | + juju_log('INFO', |
711 | + "This charm doesn't know how to handle '{}'.".format(hook)) |
712 | + else: |
713 | + hook_func() |
714 | + |
715 | + |
716 | +def install(*pkgs): |
717 | + cmd = [ |
718 | + 'apt-get', |
719 | + '-y', |
720 | + 'install' |
721 | + ] |
722 | + for pkg in pkgs: |
723 | + cmd.append(pkg) |
724 | + subprocess.check_call(cmd) |
725 | + |
726 | +TEMPLATES_DIR = 'templates' |
727 | + |
728 | +try: |
729 | + import jinja2 |
730 | +except ImportError: |
731 | + install('python-jinja2') |
732 | + import jinja2 |
733 | + |
734 | +try: |
735 | + import dns.resolver |
736 | +except ImportError: |
737 | + install('python-dnspython') |
738 | + import dns.resolver |
739 | + |
740 | + |
741 | +def render_template(template_name, context, template_dir=TEMPLATES_DIR): |
742 | + templates = jinja2.Environment( |
743 | + loader=jinja2.FileSystemLoader(template_dir) |
744 | + ) |
745 | + template = templates.get_template(template_name) |
746 | + return template.render(context) |
747 | + |
748 | +CLOUD_ARCHIVE = \ |
749 | +""" # Ubuntu Cloud Archive |
750 | +deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
751 | +""" |
752 | + |
753 | +CLOUD_ARCHIVE_POCKETS = { |
754 | + 'folsom': 'precise-updates/folsom', |
755 | + 'folsom/updates': 'precise-updates/folsom', |
756 | + 'folsom/proposed': 'precise-proposed/folsom', |
757 | + 'grizzly': 'precise-updates/grizzly', |
758 | + 'grizzly/updates': 'precise-updates/grizzly', |
759 | + 'grizzly/proposed': 'precise-proposed/grizzly' |
760 | + } |
761 | + |
762 | + |
763 | +def configure_source(): |
764 | + source = str(config_get('openstack-origin')) |
765 | + if not source: |
766 | + return |
767 | + if source.startswith('ppa:'): |
768 | + cmd = [ |
769 | + 'add-apt-repository', |
770 | + source |
771 | + ] |
772 | + subprocess.check_call(cmd) |
773 | + if source.startswith('cloud:'): |
774 | + # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg: |
775 | + # cloud:precise-folsom/updates or cloud:precise-folsom/proposed |
776 | + install('ubuntu-cloud-keyring') |
777 | + pocket = source.split(':')[1] |
778 | + pocket = pocket.split('-')[1] |
779 | + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
780 | + apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) |
781 | + if source.startswith('deb'): |
782 | + l = len(source.split('|')) |
783 | + if l == 2: |
784 | + (apt_line, key) = source.split('|') |
785 | + cmd = [ |
786 | + 'apt-key', |
787 | + 'adv', '--keyserver keyserver.ubuntu.com', |
788 | + '--recv-keys', key |
789 | + ] |
790 | + subprocess.check_call(cmd) |
791 | + elif l == 1: |
792 | + apt_line = source |
793 | + |
794 | + with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: |
795 | + apt.write(apt_line + "\n") |
796 | + cmd = [ |
797 | + 'apt-get', |
798 | + 'update' |
799 | + ] |
800 | + subprocess.check_call(cmd) |
801 | + |
802 | +# Protocols |
803 | +TCP = 'TCP' |
804 | +UDP = 'UDP' |
805 | + |
806 | + |
807 | +def expose(port, protocol='TCP'): |
808 | + cmd = [ |
809 | + 'open-port', |
810 | + '{}/{}'.format(port, protocol) |
811 | + ] |
812 | + subprocess.check_call(cmd) |
813 | + |
814 | + |
815 | +def juju_log(severity, message): |
816 | + cmd = [ |
817 | + 'juju-log', |
818 | + '--log-level', severity, |
819 | + message |
820 | + ] |
821 | + subprocess.check_call(cmd) |
822 | + |
823 | + |
824 | +cache = {} |
825 | + |
826 | + |
827 | +def cached(func): |
828 | + def wrapper(*args, **kwargs): |
829 | + global cache |
830 | + key = str((func, args, kwargs)) |
831 | + try: |
832 | + return cache[key] |
833 | + except KeyError: |
834 | + res = func(*args, **kwargs) |
835 | + cache[key] = res |
836 | + return res |
837 | + return wrapper |
838 | + |
839 | + |
840 | +@cached |
841 | +def relation_ids(relation): |
842 | + cmd = [ |
843 | + 'relation-ids', |
844 | + relation |
845 | + ] |
846 | + result = str(subprocess.check_output(cmd)).split() |
847 | + if result == "": |
848 | + return None |
849 | + else: |
850 | + return result |
851 | + |
852 | + |
853 | +@cached |
854 | +def relation_list(rid): |
855 | + cmd = [ |
856 | + 'relation-list', |
857 | + '-r', rid, |
858 | + ] |
859 | + result = str(subprocess.check_output(cmd)).split() |
860 | + if result == "": |
861 | + return None |
862 | + else: |
863 | + return result |
864 | + |
865 | + |
866 | +@cached |
867 | +def relation_get(attribute, unit=None, rid=None): |
868 | + cmd = [ |
869 | + 'relation-get', |
870 | + ] |
871 | + if rid: |
872 | + cmd.append('-r') |
873 | + cmd.append(rid) |
874 | + cmd.append(attribute) |
875 | + if unit: |
876 | + cmd.append(unit) |
877 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
878 | + if value == "": |
879 | + return None |
880 | + else: |
881 | + return value |
882 | + |
883 | + |
884 | +@cached |
885 | +def relation_get_dict(relation_id=None, remote_unit=None): |
886 | + """Obtain all relation data as dict by way of JSON""" |
887 | + cmd = [ |
888 | + 'relation-get', '--format=json' |
889 | + ] |
890 | + if relation_id: |
891 | + cmd.append('-r') |
892 | + cmd.append(relation_id) |
893 | + if remote_unit: |
894 | + remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None) |
895 | + os.environ['JUJU_REMOTE_UNIT'] = remote_unit |
896 | + j = subprocess.check_output(cmd) |
897 | + if remote_unit and remote_unit_orig: |
898 | + os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig |
899 | + d = json.loads(j) |
900 | + settings = {} |
901 | + # convert unicode to strings |
902 | + for k, v in d.iteritems(): |
903 | + settings[str(k)] = str(v) |
904 | + return settings |
905 | + |
906 | + |
907 | +def relation_set(**kwargs): |
908 | + cmd = [ |
909 | + 'relation-set' |
910 | + ] |
911 | + args = [] |
912 | + for k, v in kwargs.items(): |
913 | + if k == 'rid': |
914 | + if v: |
915 | + cmd.append('-r') |
916 | + cmd.append(v) |
917 | + else: |
918 | + args.append('{}={}'.format(k, v)) |
919 | + cmd += args |
920 | + subprocess.check_call(cmd) |
921 | + |
922 | + |
923 | +@cached |
924 | +def unit_get(attribute): |
925 | + cmd = [ |
926 | + 'unit-get', |
927 | + attribute |
928 | + ] |
929 | + value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
930 | + if value == "": |
931 | + return None |
932 | + else: |
933 | + return value |
934 | + |
935 | + |
936 | +@cached |
937 | +def config_get(attribute): |
938 | + cmd = [ |
939 | + 'config-get', |
940 | + '--format', |
941 | + 'json', |
942 | + ] |
943 | + out = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
944 | + cfg = json.loads(out) |
945 | + |
946 | + try: |
947 | + return cfg[attribute] |
948 | + except KeyError: |
949 | + return None |
950 | + |
951 | + |
952 | +@cached |
953 | +def get_unit_hostname(): |
954 | + return socket.gethostname() |
955 | + |
956 | + |
957 | +@cached |
958 | +def get_host_ip(hostname=unit_get('private-address')): |
959 | + try: |
960 | + # Test to see if already an IPv4 address |
961 | + socket.inet_aton(hostname) |
962 | + return hostname |
963 | + except socket.error: |
964 | + answers = dns.resolver.query(hostname, 'A') |
965 | + if answers: |
966 | + return answers[0].address |
967 | + return None |
968 | + |
969 | + |
970 | +def _svc_control(service, action): |
971 | + subprocess.check_call(['service', service, action]) |
972 | + |
973 | + |
974 | +def restart(*services): |
975 | + for service in services: |
976 | + _svc_control(service, 'restart') |
977 | + |
978 | + |
979 | +def stop(*services): |
980 | + for service in services: |
981 | + _svc_control(service, 'stop') |
982 | + |
983 | + |
984 | +def start(*services): |
985 | + for service in services: |
986 | + _svc_control(service, 'start') |
987 | + |
988 | + |
989 | +def reload(*services): |
990 | + for service in services: |
991 | + try: |
992 | + _svc_control(service, 'reload') |
993 | + except subprocess.CalledProcessError: |
994 | + # Reload failed - either service does not support reload |
995 | + # or it was not running - restart will fixup most things |
996 | + _svc_control(service, 'restart') |
997 | + |
998 | + |
999 | +def running(service): |
1000 | + try: |
1001 | + output = subprocess.check_output(['service', service, 'status']) |
1002 | + except subprocess.CalledProcessError: |
1003 | + return False |
1004 | + else: |
1005 | + if ("start/running" in output or |
1006 | + "is running" in output): |
1007 | + return True |
1008 | + else: |
1009 | + return False |
1010 | + |
1011 | + |
1012 | +def is_relation_made(relation, key='private-address'): |
1013 | + for r_id in (relation_ids(relation) or []): |
1014 | + for unit in (relation_list(r_id) or []): |
1015 | + if relation_get(key, rid=r_id, unit=unit): |
1016 | + return True |
1017 | + return False |
1018 | |
1019 | === modified symlink 'hooks/object-store-relation-joined' |
1020 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
1021 | === removed file 'hooks/swift-hooks.py' |
1022 | --- hooks/swift-hooks.py 2013-04-10 20:33:37 +0000 |
1023 | +++ hooks/swift-hooks.py 1970-01-01 00:00:00 +0000 |
1024 | @@ -1,164 +0,0 @@ |
1025 | -#!/usr/bin/python |
1026 | - |
1027 | -import os |
1028 | -import utils |
1029 | -import sys |
1030 | -import shutil |
1031 | -import uuid |
1032 | -from subprocess import check_call |
1033 | - |
1034 | -import lib.openstack_common as openstack |
1035 | -import swift_utils as swift |
1036 | - |
1037 | -def install(): |
1038 | - src = utils.config_get('openstack-origin') |
1039 | - if src != 'distro': |
1040 | - openstack.configure_installation_source(src) |
1041 | - check_call(['apt-get', 'update']) |
1042 | - rel = openstack.get_os_codename_install_source(src) |
1043 | - |
1044 | - pkgs = swift.determine_packages(rel) |
1045 | - utils.install(*pkgs) |
1046 | - |
1047 | - swift.ensure_swift_dir() |
1048 | - |
1049 | - # initialize swift configs. |
1050 | - # swift.conf hash |
1051 | - ctxt = { |
1052 | - 'swift_hash': swift.get_swift_hash() |
1053 | - } |
1054 | - with open(swift.SWIFT_CONF, 'w') as conf: |
1055 | - conf.write(swift.render_config(swift.SWIFT_CONF, ctxt)) |
1056 | - |
1057 | - # swift-proxy.conf |
1058 | - swift.write_proxy_config() |
1059 | - |
1060 | - # memcached.conf |
1061 | - ctxt = { 'proxy_ip': utils.get_host_ip() } |
1062 | - with open(swift.MEMCACHED_CONF, 'w') as conf: |
1063 | - conf.write(swift.render_config(swift.MEMCACHED_CONF, ctxt)) |
1064 | - check_call(['service', 'memcached', 'restart']) |
1065 | - |
1066 | - # generate or setup SSL certificate |
1067 | - swift.configure_ssl() |
1068 | - |
1069 | - # initialize new storage rings. |
1070 | - for ring in swift.SWIFT_RINGS.iteritems(): |
1071 | - swift.initialize_ring(ring[1], |
1072 | - utils.config_get('partition-power'), |
1073 | - utils.config_get('replicas'), |
1074 | - utils.config_get('min-hours')) |
1075 | - |
1076 | - # configure a directory on webserver for distributing rings. |
1077 | - if not os.path.isdir(swift.WWW_DIR): |
1078 | - os.mkdir(swift.WWW_DIR, 0755) |
1079 | - uid, gid = swift.swift_user() |
1080 | - os.chown(swift.WWW_DIR, uid, gid) |
1081 | - swift.write_apache_config() |
1082 | - |
1083 | - |
1084 | -def keystone_joined(relid=None): |
1085 | - hostname = utils.unit_get('private-address') |
1086 | - port = utils.config_get('bind-port') |
1087 | - ssl = utils.config_get('use-https') |
1088 | - if ssl == 'yes': |
1089 | - proto = 'https' |
1090 | - else: |
1091 | - proto = 'http' |
1092 | - admin_url = '%s://%s:%s' % (proto, hostname, port) |
1093 | - internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url |
1094 | - utils.relation_set(service='swift', |
1095 | - region=utils.config_get('region'), |
1096 | - public_url=public_url, internal_url=internal_url, |
1097 | - admin_url=admin_url, |
1098 | - requested_roles=utils.config_get('operator-roles'), |
1099 | - rid=relid) |
1100 | - |
1101 | - |
1102 | -def keystone_changed(): |
1103 | - swift.write_proxy_config() |
1104 | - |
1105 | - |
1106 | -def balance_rings(): |
1107 | - '''handle doing ring balancing and distribution.''' |
1108 | - new_ring = False |
1109 | - for ring in swift.SWIFT_RINGS.itervalues(): |
1110 | - if swift.balance_ring(ring): |
1111 | - utils.juju_log('INFO', 'Balanced ring %s' % ring) |
1112 | - new_ring = True |
1113 | - if not new_ring: |
1114 | - return |
1115 | - |
1116 | - for ring in swift.SWIFT_RINGS.keys(): |
1117 | - f = '%s.ring.gz' % ring |
1118 | - shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f), |
1119 | - os.path.join(swift.WWW_DIR, f)) |
1120 | - |
1121 | - msg = 'Broadcasting notification to all storage nodes that new '\ |
1122 | - 'ring is ready for consumption.' |
1123 | - utils.juju_log('INFO', msg) |
1124 | - |
1125 | - www_dir = swift.WWW_DIR.split('/var/www/')[1] |
1126 | - trigger = uuid.uuid4() |
1127 | - swift_hash = swift.get_swift_hash() |
1128 | - # notify storage nodes that there is a new ring to fetch. |
1129 | - for relid in utils.relation_ids('swift-storage'): |
1130 | - utils.relation_set(rid=relid, swift_hash=swift_hash, |
1131 | - www_dir=www_dir, trigger=trigger) |
1132 | - swift.proxy_control('restart') |
1133 | - |
1134 | -def storage_changed(): |
1135 | - account_port = utils.config_get('account-ring-port') |
1136 | - object_port = utils.config_get('object-ring-port') |
1137 | - container_port = utils.config_get('container-ring-port') |
1138 | - zone = swift.get_zone(utils.config_get('zone-assignment')) |
1139 | - node_settings = { |
1140 | - 'ip': utils.get_host_ip(utils.relation_get('private-address')), |
1141 | - 'zone': zone, |
1142 | - 'account_port': utils.relation_get('account_port'), |
1143 | - 'object_port': utils.relation_get('object_port'), |
1144 | - 'container_port': utils.relation_get('container_port'), |
1145 | - } |
1146 | - if None in node_settings.itervalues(): |
1147 | - utils.juju_log('INFO', 'storage_changed: Relation not ready.') |
1148 | - return None |
1149 | - |
1150 | - for k in ['zone', 'account_port', 'object_port', 'container_port']: |
1151 | - node_settings[k] = int(node_settings[k]) |
1152 | - |
1153 | - # Grant new node access to rings via apache. |
1154 | - swift.write_apache_config() |
1155 | - |
1156 | - # allow for multiple devs per unit, passed along as a : separated list |
1157 | - devs = utils.relation_get('device').split(':') |
1158 | - for dev in devs: |
1159 | - node_settings['device'] = dev |
1160 | - for ring in swift.SWIFT_RINGS.itervalues(): |
1161 | - if not swift.exists_in_ring(ring, node_settings): |
1162 | - swift.add_to_ring(ring, node_settings) |
1163 | - |
1164 | - if swift.should_balance([r for r in swift.SWIFT_RINGS.itervalues()]): |
1165 | - balance_rings() |
1166 | - |
1167 | -def storage_broken(): |
1168 | - swift.write_apache_config() |
1169 | - |
1170 | -def config_changed(): |
1171 | - relids = utils.relation_ids('identity-service') |
1172 | - if relids: |
1173 | - for relid in relids: |
1174 | - keystone_joined(relid) |
1175 | - swift.write_proxy_config() |
1176 | - |
1177 | -hooks = { |
1178 | - 'install': install, |
1179 | - 'config-changed': config_changed, |
1180 | - 'identity-service-relation-joined': keystone_joined, |
1181 | - 'identity-service-relation-changed': keystone_changed, |
1182 | - 'swift-storage-relation-changed': storage_changed, |
1183 | - 'swift-storage-relation-broken': storage_broken, |
1184 | -} |
1185 | - |
1186 | -utils.do_hooks(hooks) |
1187 | - |
1188 | -sys.exit(0) |
1189 | |
1190 | === modified symlink 'hooks/swift-storage-relation-broken' |
1191 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
1192 | === modified symlink 'hooks/swift-storage-relation-changed' |
1193 | === target changed u'swift-hooks.py' => u'swift_hooks.py' |
1194 | === added file 'hooks/swift_hooks.py' |
1195 | --- hooks/swift_hooks.py 1970-01-01 00:00:00 +0000 |
1196 | +++ hooks/swift_hooks.py 2013-05-29 18:03:27 +0000 |
1197 | @@ -0,0 +1,257 @@ |
1198 | +#!/usr/bin/python |
1199 | + |
1200 | +import os |
1201 | +import sys |
1202 | +import shutil |
1203 | +import uuid |
1204 | +from subprocess import check_call |
1205 | + |
1206 | +import lib.openstack_common as openstack |
1207 | +import lib.utils as utils |
1208 | +import lib.cluster_utils as cluster |
1209 | +import swift_utils as swift |
1210 | + |
1211 | +extra_pkgs = [ |
1212 | + "haproxy", |
1213 | + "python-jinja2" |
1214 | + ] |
1215 | + |
1216 | + |
1217 | +def install(): |
1218 | + src = utils.config_get('openstack-origin') |
1219 | + if src != 'distro': |
1220 | + openstack.configure_installation_source(src) |
1221 | + check_call(['apt-get', 'update']) |
1222 | + rel = openstack.get_os_codename_install_source(src) |
1223 | + |
1224 | + pkgs = swift.determine_packages(rel) |
1225 | + utils.install(*pkgs) |
1226 | + utils.install(*extra_pkgs) |
1227 | + |
1228 | + swift.ensure_swift_dir() |
1229 | + |
1230 | + # initialize swift configs. |
1231 | + # swift.conf hash |
1232 | + ctxt = { |
1233 | + 'swift_hash': swift.get_swift_hash() |
1234 | + } |
1235 | + with open(swift.SWIFT_CONF, 'w') as conf: |
1236 | + conf.write(swift.render_config(swift.SWIFT_CONF, ctxt)) |
1237 | + |
1238 | + # swift-proxy.conf |
1239 | + swift.write_proxy_config() |
1240 | + |
1241 | + # memcached.conf |
1242 | + ctxt = {'proxy_ip': utils.get_host_ip()} |
1243 | + with open(swift.MEMCACHED_CONF, 'w') as conf: |
1244 | + conf.write(swift.render_config(swift.MEMCACHED_CONF, ctxt)) |
1245 | + check_call(['service', 'memcached', 'restart']) |
1246 | + |
1247 | + # initialize new storage rings. |
1248 | + for ring in swift.SWIFT_RINGS.iteritems(): |
1249 | + swift.initialize_ring(ring[1], |
1250 | + utils.config_get('partition-power'), |
1251 | + utils.config_get('replicas'), |
1252 | + utils.config_get('min-hours')) |
1253 | + |
1254 | + # configure a directory on webserver for distributing rings. |
1255 | + if not os.path.isdir(swift.WWW_DIR): |
1256 | + os.mkdir(swift.WWW_DIR, 0755) |
1257 | + uid, gid = swift.swift_user() |
1258 | + os.chown(swift.WWW_DIR, uid, gid) |
1259 | + swift.write_apache_config() |
1260 | + swift.configure_https() |
1261 | + |
1262 | + |
1263 | +def keystone_joined(relid=None): |
1264 | + if not cluster.eligible_leader(swift.SWIFT_HA_RES): |
1265 | + return |
1266 | + if cluster.is_clustered(): |
1267 | + hostname = utils.config_get('vip') |
1268 | + else: |
1269 | + hostname = utils.unit_get('private-address') |
1270 | + port = utils.config_get('bind-port') |
1271 | + if cluster.https(): |
1272 | + proto = 'https' |
1273 | + else: |
1274 | + proto = 'http' |
1275 | + admin_url = '%s://%s:%s' % (proto, hostname, port) |
1276 | + internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url |
1277 | + utils.relation_set(service='swift', |
1278 | + region=utils.config_get('region'), |
1279 | + public_url=public_url, internal_url=internal_url, |
1280 | + admin_url=admin_url, |
1281 | + requested_roles=utils.config_get('operator-roles'), |
1282 | + rid=relid) |
1283 | + |
1284 | + |
1285 | +def keystone_changed(): |
1286 | + swift.write_proxy_config() |
1287 | + swift.configure_https() |
1288 | + # Re-fire keystone hooks to ripple back the HTTPS service entry |
1289 | + for relid in utils.relation_ids('identity-service'): |
1290 | + keystone_joined(relid=relid) |
1291 | + |
1292 | + |
1293 | +def balance_rings(): |
1294 | + '''handle doing ring balancing and distribution.''' |
1295 | + new_ring = False |
1296 | + for ring in swift.SWIFT_RINGS.itervalues(): |
1297 | + if swift.balance_ring(ring): |
1298 | + utils.juju_log('INFO', 'Balanced ring %s' % ring) |
1299 | + new_ring = True |
1300 | + if not new_ring: |
1301 | + return |
1302 | + |
1303 | + for ring in swift.SWIFT_RINGS.keys(): |
1304 | + f = '%s.ring.gz' % ring |
1305 | + shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f), |
1306 | + os.path.join(swift.WWW_DIR, f)) |
1307 | + |
1308 | + if cluster.eligible_leader(swift.SWIFT_HA_RES): |
1309 | + msg = 'Broadcasting notification to all storage nodes that new '\ |
1310 | + 'ring is ready for consumption.' |
1311 | + utils.juju_log('INFO', msg) |
1312 | + path = swift.WWW_DIR.split('/var/www/')[1] |
1313 | + trigger = uuid.uuid4() |
1314 | + swift_hash = swift.get_swift_hash() |
1315 | + |
1316 | + if cluster.is_clustered(): |
1317 | + hostname = utils.config_get('vip') |
1318 | + else: |
1319 | + hostname = utils.unit_get('private-address') |
1320 | + |
1321 | + rings_url = 'http://%s/%s' % (hostname, path) |
1322 | + # notify storage nodes that there is a new ring to fetch. |
1323 | + for relid in utils.relation_ids('swift-storage'): |
1324 | + utils.relation_set(rid=relid, swift_hash=swift_hash, |
1325 | + rings_url=rings_url, trigger=trigger) |
1326 | + |
1327 | + swift.proxy_control('restart') |
1328 | + |
1329 | + |
1330 | +def storage_changed(): |
1331 | + zone = swift.get_zone(utils.config_get('zone-assignment')) |
1332 | + node_settings = { |
1333 | + 'ip': utils.get_host_ip(utils.relation_get('private-address')), |
1334 | + 'zone': zone, |
1335 | + 'account_port': utils.relation_get('account_port'), |
1336 | + 'object_port': utils.relation_get('object_port'), |
1337 | + 'container_port': utils.relation_get('container_port'), |
1338 | + } |
1339 | + if None in node_settings.itervalues(): |
1340 | + utils.juju_log('INFO', 'storage_changed: Relation not ready.') |
1341 | + return None |
1342 | + |
1343 | + for k in ['zone', 'account_port', 'object_port', 'container_port']: |
1344 | + node_settings[k] = int(node_settings[k]) |
1345 | + |
1346 | + # Grant new node access to rings via apache. |
1347 | + swift.write_apache_config() |
1348 | + |
1349 | + # allow for multiple devs per unit, passed along as a : separated list |
1350 | + devs = utils.relation_get('device').split(':') |
1351 | + for dev in devs: |
1352 | + node_settings['device'] = dev |
1353 | + for ring in swift.SWIFT_RINGS.itervalues(): |
1354 | + if not swift.exists_in_ring(ring, node_settings): |
1355 | + swift.add_to_ring(ring, node_settings) |
1356 | + |
1357 | + if swift.should_balance([r for r in swift.SWIFT_RINGS.itervalues()]): |
1358 | + balance_rings() |
1359 | + |
1360 | + |
1361 | +def storage_broken(): |
1362 | + swift.write_apache_config() |
1363 | + |
1364 | + |
1365 | +def config_changed(): |
1366 | + # Determine whether or not we should do an upgrade, based on the |
1367 | + # the version offered in keyston-release. |
1368 | + src = utils.config_get('openstack-origin') |
1369 | + available = openstack.get_os_codename_install_source(src) |
1370 | + installed = openstack.get_os_codename_package('python-swift') |
1371 | + if (available and |
1372 | + openstack.get_os_version_codename(available) > \ |
1373 | + openstack.get_os_version_codename(installed)): |
1374 | + pkgs = swift.determine_packages(available) |
1375 | + swift.do_openstack_upgrade(src, pkgs) |
1376 | + |
1377 | + relids = utils.relation_ids('identity-service') |
1378 | + if relids: |
1379 | + for relid in relids: |
1380 | + keystone_joined(relid) |
1381 | + swift.write_proxy_config() |
1382 | + swift.configure_https() |
1383 | + |
1384 | + |
1385 | +def cluster_changed(): |
1386 | + swift.configure_haproxy() |
1387 | + |
1388 | + |
1389 | +def ha_relation_changed(): |
1390 | + clustered = utils.relation_get('clustered') |
1391 | + if clustered and cluster.is_leader(swift.SWIFT_HA_RES): |
1392 | + utils.juju_log('INFO', |
1393 | + 'Cluster configured, notifying other services and' |
1394 | + 'updating keystone endpoint configuration') |
1395 | + # Tell all related services to start using |
1396 | + # the VIP instead |
1397 | + for r_id in utils.relation_ids('identity-service'): |
1398 | + keystone_joined(relid=r_id) |
1399 | + |
1400 | + |
1401 | +def ha_relation_joined(): |
1402 | + # Obtain the config values necessary for the cluster config. These |
1403 | + # include multicast port and interface to bind to. |
1404 | + corosync_bindiface = utils.config_get('ha-bindiface') |
1405 | + corosync_mcastport = utils.config_get('ha-mcastport') |
1406 | + vip = utils.config_get('vip') |
1407 | + vip_cidr = utils.config_get('vip_cidr') |
1408 | + vip_iface = utils.config_get('vip_iface') |
1409 | + if not vip: |
1410 | + utils.juju_log('ERROR', |
1411 | + 'Unable to configure hacluster as vip not provided') |
1412 | + sys.exit(1) |
1413 | + |
1414 | + # Obtain resources |
1415 | + resources = { |
1416 | + 'res_swift_vip': 'ocf:heartbeat:IPaddr2', |
1417 | + 'res_swift_haproxy': 'lsb:haproxy' |
1418 | + } |
1419 | + resource_params = { |
1420 | + 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ |
1421 | + (vip, vip_cidr, vip_iface), |
1422 | + 'res_swift_haproxy': 'op monitor interval="5s"' |
1423 | + } |
1424 | + init_services = { |
1425 | + 'res_swift_haproxy': 'haproxy' |
1426 | + } |
1427 | + clones = { |
1428 | + 'cl_swift_haproxy': 'res_swift_haproxy' |
1429 | + } |
1430 | + |
1431 | + utils.relation_set(init_services=init_services, |
1432 | + corosync_bindiface=corosync_bindiface, |
1433 | + corosync_mcastport=corosync_mcastport, |
1434 | + resources=resources, |
1435 | + resource_params=resource_params, |
1436 | + clones=clones) |
1437 | + |
1438 | + |
1439 | +hooks = { |
1440 | + 'install': install, |
1441 | + 'config-changed': config_changed, |
1442 | + 'identity-service-relation-joined': keystone_joined, |
1443 | + 'identity-service-relation-changed': keystone_changed, |
1444 | + 'swift-storage-relation-changed': storage_changed, |
1445 | + 'swift-storage-relation-broken': storage_broken, |
1446 | + "cluster-relation-joined": cluster_changed, |
1447 | + "cluster-relation-changed": cluster_changed, |
1448 | + "ha-relation-joined": ha_relation_joined, |
1449 | + "ha-relation-changed": ha_relation_changed |
1450 | +} |
1451 | + |
1452 | +utils.do_hooks(hooks) |
1453 | + |
1454 | +sys.exit(0) |
1455 | |
1456 | === modified file 'hooks/swift_utils.py' |
1457 | --- hooks/swift_utils.py 2013-01-17 18:35:04 +0000 |
1458 | +++ hooks/swift_utils.py 2013-05-29 18:03:27 +0000 |
1459 | @@ -2,10 +2,16 @@ |
1460 | import pwd |
1461 | import subprocess |
1462 | import lib.openstack_common as openstack |
1463 | -import utils |
1464 | +import lib.utils as utils |
1465 | +import lib.haproxy_utils as haproxy |
1466 | +import lib.apache_utils as apache |
1467 | +import lib.cluster_utils as cluster |
1468 | +import sys |
1469 | +from base64 import b64encode |
1470 | + |
1471 | |
1472 | # Various config files that are managed via templating. |
1473 | -SWIFT_HASH_FILE='/var/lib/juju/swift-hash-path.conf' |
1474 | +SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf' |
1475 | SWIFT_CONF = '/etc/swift/swift.conf' |
1476 | SWIFT_PROXY_CONF = '/etc/swift/proxy-server.conf' |
1477 | SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF) |
1478 | @@ -32,9 +38,12 @@ |
1479 | 'python-keystone', |
1480 | ] |
1481 | |
1482 | +SWIFT_HA_RES = 'res_swift_vip' |
1483 | + |
1484 | # Folsom-specific packages |
1485 | FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3'] |
1486 | |
1487 | + |
1488 | def proxy_control(action): |
1489 | '''utility to work around swift-init's bad RCs.''' |
1490 | def _cmd(action): |
1491 | @@ -49,8 +58,9 @@ |
1492 | elif status == 0: |
1493 | return subprocess.check_call(_cmd('stop')) |
1494 | |
1495 | - # the proxy will not start unless there are balanced rings, gzip'd in /etc/swift |
1496 | - missing=False |
1497 | + # the proxy will not start unless there are balanced rings |
1498 | + # gzip'd in /etc/swift |
1499 | + missing = False |
1500 | for k in SWIFT_RINGS.keys(): |
1501 | if not os.path.exists(os.path.join(SWIFT_CONF_DIR, '%s.ring.gz' % k)): |
1502 | missing = True |
1503 | @@ -69,8 +79,9 @@ |
1504 | elif status == 1: |
1505 | return subprocess.check_call(_cmd('start')) |
1506 | |
1507 | + |
1508 | def swift_user(username='swift'): |
1509 | - user = pwd.getpwnam('swift') |
1510 | + user = pwd.getpwnam(username) |
1511 | return (user.pw_uid, user.pw_gid) |
1512 | |
1513 | |
1514 | @@ -105,6 +116,10 @@ |
1515 | if os.path.isfile(SWIFT_HASH_FILE): |
1516 | with open(SWIFT_HASH_FILE, 'r') as hashfile: |
1517 | swift_hash = hashfile.read().strip() |
1518 | + elif utils.config_get('swift-hash'): |
1519 | + swift_hash = utils.config_get('swift-hash') |
1520 | + with open(SWIFT_HASH_FILE, 'w') as hashfile: |
1521 | + hashfile.write(swift_hash) |
1522 | else: |
1523 | cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n'] |
1524 | rand = open('/dev/random', 'r') |
1525 | @@ -148,11 +163,16 @@ |
1526 | 'keystone_host': utils.relation_get('auth_host', |
1527 | unit, relid), |
1528 | 'auth_port': utils.relation_get('auth_port', unit, relid), |
1529 | - 'service_user': utils.relation_get('service_username', unit, relid), |
1530 | - 'service_password': utils.relation_get('service_password', unit, relid), |
1531 | - 'service_tenant': utils.relation_get('service_tenant', unit, relid), |
1532 | - 'service_port': utils.relation_get('service_port', unit, relid), |
1533 | - 'admin_token': utils.relation_get('admin_token', unit, relid), |
1534 | + 'service_user': utils.relation_get('service_username', |
1535 | + unit, relid), |
1536 | + 'service_password': utils.relation_get('service_password', |
1537 | + unit, relid), |
1538 | + 'service_tenant': utils.relation_get('service_tenant', |
1539 | + unit, relid), |
1540 | + 'service_port': utils.relation_get('service_port', |
1541 | + unit, relid), |
1542 | + 'admin_token': utils.relation_get('admin_token', |
1543 | + unit, relid), |
1544 | } |
1545 | if None not in ks_auth.itervalues(): |
1546 | return ks_auth |
1547 | @@ -167,19 +187,20 @@ |
1548 | import multiprocessing |
1549 | workers = multiprocessing.cpu_count() |
1550 | |
1551 | + env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server', |
1552 | + 'OPENSTACK_PORT_API': bind_port, |
1553 | + 'OPENSTACK_PORT_MEMCACHED': 11211} |
1554 | + openstack.save_script_rc(**env_vars) |
1555 | + |
1556 | ctxt = { |
1557 | 'proxy_ip': utils.get_host_ip(), |
1558 | - 'bind_port': bind_port, |
1559 | + 'bind_port': cluster.determine_api_port(bind_port), |
1560 | 'workers': workers, |
1561 | - 'operator_roles': utils.config_get('operator-roles') |
1562 | + 'operator_roles': utils.config_get('operator-roles'), |
1563 | + 'delay_auth_decision': utils.config_get('delay-auth-decision') |
1564 | } |
1565 | |
1566 | - if utils.config_get('use-https') == 'no': |
1567 | - ctxt['ssl'] = False |
1568 | - else: |
1569 | - ctxt['ssl'] = True |
1570 | - ctxt['ssl_cert'] = SSL_CERT |
1571 | - ctxt['ssl_key'] = SSL_KEY |
1572 | + ctxt['ssl'] = False |
1573 | |
1574 | ks_auth = get_keystone_auth() |
1575 | if ks_auth: |
1576 | @@ -193,23 +214,10 @@ |
1577 | proxy_control('restart') |
1578 | subprocess.check_call(['open-port', str(bind_port)]) |
1579 | |
1580 | -def configure_ssl(): |
1581 | - # this should be expanded to cover setting up user-specified certificates |
1582 | - if (utils.config_get('use-https') == 'yes' and |
1583 | - not os.path.isfile(SSL_CERT) and |
1584 | - not os.path.isfile(SSL_KEY)): |
1585 | - subj = '/C=%s/ST=%s/L=%s/CN=%s' %\ |
1586 | - (utils.config_get('country'), utils.config_get('state'), |
1587 | - utils.config_get('locale'), utils.config_get('common-name')) |
1588 | - cmd = ['openssl', 'req', '-new', '-x509', '-nodes', |
1589 | - '-out', SSL_CERT, '-keyout', SSL_KEY, |
1590 | - '-subj', subj] |
1591 | - subprocess.check_call(cmd) |
1592 | - |
1593 | |
1594 | def _load_builder(path): |
1595 | # lifted straight from /usr/bin/swift-ring-builder |
1596 | - from swift.common.ring import RingBuilder, Ring |
1597 | + from swift.common.ring import RingBuilder |
1598 | import cPickle as pickle |
1599 | try: |
1600 | builder = pickle.load(open(path, 'rb')) |
1601 | @@ -218,10 +226,8 @@ |
1602 | builder = RingBuilder(1, 1, 1) |
1603 | builder.copy_from(builder_dict) |
1604 | except ImportError: # Happens with really old builder pickles |
1605 | - modules['swift.ring_builder'] = \ |
1606 | - modules['swift.common.ring.builder'] |
1607 | builder = RingBuilder(1, 1, 1) |
1608 | - builder.copy_from(pickle.load(open(argv[1], 'rb'))) |
1609 | + builder.copy_from(pickle.load(open(path, 'rb'))) |
1610 | for dev in builder.devs: |
1611 | if dev and 'meta' not in dev: |
1612 | dev['meta'] = '' |
1613 | @@ -233,8 +239,6 @@ |
1614 | pickle.dump(ring.to_dict(), open(ring_path, 'wb'), protocol=2) |
1615 | |
1616 | |
1617 | - |
1618 | - |
1619 | def ring_port(ring_path, node): |
1620 | '''determine correct port from relation settings for a given ring file.''' |
1621 | for name in ['account', 'object', 'container']: |
1622 | @@ -248,8 +252,8 @@ |
1623 | ring = RingBuilder(part_power, replicas, min_hours) |
1624 | _write_ring(ring, path) |
1625 | |
1626 | + |
1627 | def exists_in_ring(ring_path, node): |
1628 | - from swift.common.ring import RingBuilder, Ring |
1629 | ring = _load_builder(ring_path).to_dict() |
1630 | node['port'] = ring_port(ring_path, node) |
1631 | |
1632 | @@ -266,7 +270,6 @@ |
1633 | |
1634 | |
1635 | def add_to_ring(ring_path, node): |
1636 | - from swift.common.ring import RingBuilder, Ring |
1637 | ring = _load_builder(ring_path) |
1638 | port = ring_port(ring_path, node) |
1639 | |
1640 | @@ -286,8 +289,9 @@ |
1641 | } |
1642 | ring.add_dev(new_dev) |
1643 | _write_ring(ring, ring_path) |
1644 | - msg = 'Added new device to ring %s: %s' % (ring_path, |
1645 | - [k for k in new_dev.iteritems()]) |
1646 | + msg = 'Added new device to ring %s: %s' %\ |
1647 | + (ring_path, |
1648 | + [k for k in new_dev.iteritems()]) |
1649 | utils.juju_log('INFO', msg) |
1650 | |
1651 | |
1652 | @@ -332,8 +336,8 @@ |
1653 | potential_zones.append(_get_zone(builder)) |
1654 | return set(potential_zones).pop() |
1655 | else: |
1656 | - utils.juju_log('Invalid zone assignment policy: %s' %\ |
1657 | - assignemnt_policy) |
1658 | + utils.juju_log('ERROR', 'Invalid zone assignment policy: %s' %\ |
1659 | + assignment_policy) |
1660 | sys.exit(1) |
1661 | |
1662 | |
1663 | @@ -351,9 +355,10 @@ |
1664 | # swift-ring-builder returns 1 on WARNING (ring didn't require balance) |
1665 | return False |
1666 | else: |
1667 | - utils.juju_log('balance_ring: %s returned %s' % (cmd, rc)) |
1668 | + utils.juju_log('ERROR', 'balance_ring: %s returned %s' % (cmd, rc)) |
1669 | sys.exit(1) |
1670 | |
1671 | + |
1672 | def should_balance(rings): |
1673 | '''Based on zones vs min. replicas, determine whether or not the rings |
1674 | should be balanaced during initial configuration.''' |
1675 | @@ -379,8 +384,77 @@ |
1676 | host = utils.relation_get('private-address', unit, relid) |
1677 | allowed_hosts.append(utils.get_host_ip(host)) |
1678 | |
1679 | - ctxt = { 'www_dir': WWW_DIR, 'allowed_hosts': allowed_hosts } |
1680 | + ctxt = { |
1681 | + 'www_dir': WWW_DIR, |
1682 | + 'allowed_hosts': allowed_hosts |
1683 | + } |
1684 | with open(APACHE_CONF, 'w') as conf: |
1685 | conf.write(render_config(APACHE_CONF, ctxt)) |
1686 | - subprocess.check_call(['service', 'apache2', 'reload']) |
1687 | - |
1688 | + utils.reload('apache2') |
1689 | + |
1690 | + |
1691 | +def generate_cert(): |
1692 | + ''' |
1693 | + Generates a self signed certificate and key using the |
1694 | + provided charm configuration data. |
1695 | + |
1696 | + returns: tuple of (cert, key) |
1697 | + ''' |
1698 | + CERT = '/etc/swift/ssl.cert' |
1699 | + KEY = '/etc/swift/ssl.key' |
1700 | + if (not os.path.exists(CERT) and |
1701 | + not os.path.exists(KEY)): |
1702 | + subj = '/C=%s/ST=%s/L=%s/CN=%s' %\ |
1703 | + (utils.config_get('country'), utils.config_get('state'), |
1704 | + utils.config_get('locale'), utils.config_get('common-name')) |
1705 | + cmd = ['openssl', 'req', '-new', '-x509', '-nodes', |
1706 | + '-out', CERT, '-keyout', KEY, |
1707 | + '-subj', subj] |
1708 | + subprocess.check_call(cmd) |
1709 | + os.chmod(KEY, 0600) |
1710 | + # Slurp as base64 encoded - makes handling easier up the stack |
1711 | + with open(CERT, 'r') as cfile: |
1712 | + ssl_cert = b64encode(cfile.read()) |
1713 | + with open(KEY, 'r') as kfile: |
1714 | + ssl_key = b64encode(kfile.read()) |
1715 | + return (ssl_cert, ssl_key) |
1716 | + |
1717 | + |
1718 | +def configure_haproxy(): |
1719 | + api_port = utils.config_get('bind-port') |
1720 | + service_ports = { |
1721 | + "swift": [ |
1722 | + cluster.determine_haproxy_port(api_port), |
1723 | + cluster.determine_api_port(api_port) |
1724 | + ] |
1725 | + } |
1726 | + write_proxy_config() |
1727 | + haproxy.configure_haproxy(service_ports) |
1728 | + |
1729 | + |
1730 | +def configure_https(): |
1731 | + if cluster.https(): |
1732 | + api_port = utils.config_get('bind-port') |
1733 | + if (len(cluster.peer_units()) > 0 or |
1734 | + cluster.is_clustered()): |
1735 | + target_port = cluster.determine_haproxy_port(api_port) |
1736 | + configure_haproxy() |
1737 | + else: |
1738 | + target_port = cluster.determine_api_port(api_port) |
1739 | + write_proxy_config() |
1740 | + cert, key = apache.get_cert() |
1741 | + if None in (cert, key): |
1742 | + cert, key = generate_cert() |
1743 | + ca_cert = apache.get_ca_cert() |
1744 | + apache.setup_https(namespace="swift", |
1745 | + port_maps={api_port: target_port}, |
1746 | + cert=cert, key=key, ca_cert=ca_cert) |
1747 | + |
1748 | + |
1749 | +def do_openstack_upgrade(source, packages): |
1750 | + openstack.configure_installation_source(source) |
1751 | + os.environ['DEBIAN_FRONTEND'] = 'noninteractive' |
1752 | + subprocess.check_call(['apt-get', 'update']) |
1753 | + cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confnew', '-y', |
1754 | + 'install'] + packages |
1755 | + subprocess.check_call(cmd) |
1756 | |
1757 | === removed file 'hooks/utils.py' |
1758 | --- hooks/utils.py 2012-12-14 23:07:01 +0000 |
1759 | +++ hooks/utils.py 1970-01-01 00:00:00 +0000 |
1760 | @@ -1,237 +0,0 @@ |
1761 | - |
1762 | -# |
1763 | -# Copyright 2012 Canonical Ltd. |
1764 | -# |
1765 | -# Authors: |
1766 | -# James Page <james.page@ubuntu.com> |
1767 | -# Paul Collins <paul.collins@canonical.com> |
1768 | -# |
1769 | - |
1770 | -import json |
1771 | -import os |
1772 | -import subprocess |
1773 | -import socket |
1774 | -import sys |
1775 | - |
1776 | - |
1777 | -def do_hooks(hooks): |
1778 | - hook = os.path.basename(sys.argv[0]) |
1779 | - |
1780 | - try: |
1781 | - hooks[hook]() |
1782 | - except KeyError: |
1783 | - juju_log('INFO', |
1784 | - "This charm doesn't know how to handle '{}'.".format(hook)) |
1785 | - |
1786 | - |
1787 | -def install(*pkgs): |
1788 | - cmd = [ |
1789 | - 'apt-get', |
1790 | - '-y', |
1791 | - 'install' |
1792 | - ] |
1793 | - for pkg in pkgs: |
1794 | - cmd.append(pkg) |
1795 | - subprocess.check_call(cmd) |
1796 | - |
1797 | -TEMPLATES_DIR = 'hooks/templates' |
1798 | - |
1799 | -try: |
1800 | - import jinja2 |
1801 | -except ImportError: |
1802 | - install('python-jinja2') |
1803 | - import jinja2 |
1804 | - |
1805 | -try: |
1806 | - import dns.resolver |
1807 | - import dns.ipv4 |
1808 | -except ImportError: |
1809 | - install('python-dnspython') |
1810 | - import dns.resolver |
1811 | - import dns.ipv4 |
1812 | - |
1813 | - |
1814 | -def render_template(template_name, context, template_dir=TEMPLATES_DIR): |
1815 | - templates = jinja2.Environment( |
1816 | - loader=jinja2.FileSystemLoader(template_dir) |
1817 | - ) |
1818 | - template = templates.get_template(template_name) |
1819 | - return template.render(context) |
1820 | - |
1821 | -CLOUD_ARCHIVE = \ |
1822 | -""" # Ubuntu Cloud Archive |
1823 | -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main |
1824 | -""" |
1825 | - |
1826 | -CLOUD_ARCHIVE_POCKETS = { |
1827 | - 'folsom': 'precise-updates/folsom', |
1828 | - 'folsom/updates': 'precise-updates/folsom', |
1829 | - 'folsom/proposed': 'precise-proposed/folsom' |
1830 | - } |
1831 | - |
1832 | - |
1833 | -def configure_source(): |
1834 | - source = str(config_get('openstack-origin')) |
1835 | - if not source: |
1836 | - return |
1837 | - if source.startswith('ppa:'): |
1838 | - cmd = [ |
1839 | - 'add-apt-repository', |
1840 | - source |
1841 | - ] |
1842 | - subprocess.check_call(cmd) |
1843 | - if source.startswith('cloud:'): |
1844 | - install('ubuntu-cloud-keyring') |
1845 | - pocket = source.split(':')[1] |
1846 | - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: |
1847 | - apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket])) |
1848 | - if source.startswith('deb'): |
1849 | - l = len(source.split('|')) |
1850 | - if l == 2: |
1851 | - (apt_line, key) = source.split('|') |
1852 | - cmd = [ |
1853 | - 'apt-key', |
1854 | - 'adv', '--keyserver keyserver.ubuntu.com', |
1855 | - '--recv-keys', key |
1856 | - ] |
1857 | - subprocess.check_call(cmd) |
1858 | - elif l == 1: |
1859 | - apt_line = source |
1860 | - |
1861 | - with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt: |
1862 | - apt.write(apt_line + "\n") |
1863 | - cmd = [ |
1864 | - 'apt-get', |
1865 | - 'update' |
1866 | - ] |
1867 | - subprocess.check_call(cmd) |
1868 | - |
1869 | -# Protocols |
1870 | -TCP = 'TCP' |
1871 | -UDP = 'UDP' |
1872 | - |
1873 | - |
1874 | -def expose(port, protocol='TCP'): |
1875 | - cmd = [ |
1876 | - 'open-port', |
1877 | - '{}/{}'.format(port, protocol) |
1878 | - ] |
1879 | - subprocess.check_call(cmd) |
1880 | - |
1881 | - |
1882 | -def juju_log(severity, message): |
1883 | - cmd = [ |
1884 | - 'juju-log', |
1885 | - '--log-level', severity, |
1886 | - message |
1887 | - ] |
1888 | - subprocess.check_call(cmd) |
1889 | - |
1890 | - |
1891 | -def relation_ids(relation): |
1892 | - cmd = [ |
1893 | - 'relation-ids', |
1894 | - relation |
1895 | - ] |
1896 | - return subprocess.check_output(cmd).split() # IGNORE:E1103 |
1897 | - |
1898 | - |
1899 | -def relation_list(rid): |
1900 | - cmd = [ |
1901 | - 'relation-list', |
1902 | - '-r', rid, |
1903 | - ] |
1904 | - return subprocess.check_output(cmd).split() # IGNORE:E1103 |
1905 | - |
1906 | - |
1907 | -def relation_get(attribute, unit=None, rid=None): |
1908 | - cmd = [ |
1909 | - 'relation-get', |
1910 | - ] |
1911 | - if rid: |
1912 | - cmd.append('-r') |
1913 | - cmd.append(rid) |
1914 | - cmd.append(attribute) |
1915 | - if unit: |
1916 | - cmd.append(unit) |
1917 | - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1918 | - if value == "": |
1919 | - return None |
1920 | - else: |
1921 | - return value |
1922 | - |
1923 | - |
1924 | -def relation_set(**kwargs): |
1925 | - cmd = [ |
1926 | - 'relation-set' |
1927 | - ] |
1928 | - args = [] |
1929 | - for k, v in kwargs.items(): |
1930 | - if k == 'rid': |
1931 | - if v: |
1932 | - cmd.append('-r') |
1933 | - cmd.append(v) |
1934 | - else: |
1935 | - args.append('{}={}'.format(k, v)) |
1936 | - cmd += args |
1937 | - subprocess.check_call(cmd) |
1938 | - |
1939 | - |
1940 | -def unit_get(attribute): |
1941 | - cmd = [ |
1942 | - 'unit-get', |
1943 | - attribute |
1944 | - ] |
1945 | - value = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1946 | - if value == "": |
1947 | - return None |
1948 | - else: |
1949 | - return value |
1950 | - |
1951 | - |
1952 | -def config_get(attribute): |
1953 | - cmd = [ |
1954 | - 'config-get', |
1955 | - '--format', |
1956 | - 'json', |
1957 | - ] |
1958 | - out = subprocess.check_output(cmd).strip() # IGNORE:E1103 |
1959 | - cfg = json.loads(out) |
1960 | - |
1961 | - try: |
1962 | - return cfg[attribute] |
1963 | - except KeyError: |
1964 | - return None |
1965 | - |
1966 | -def get_unit_hostname(): |
1967 | - return socket.gethostname() |
1968 | - |
1969 | - |
1970 | -def get_host_ip(hostname=unit_get('private-address')): |
1971 | - try: |
1972 | - # Test to see if already an IPv4 address |
1973 | - socket.inet_aton(hostname) |
1974 | - return hostname |
1975 | - except socket.error: |
1976 | - try: |
1977 | - answers = dns.resolver.query(hostname, 'A') |
1978 | - if answers: |
1979 | - return answers[0].address |
1980 | - except dns.resolver.NXDOMAIN: |
1981 | - pass |
1982 | - return None |
1983 | - |
1984 | - |
1985 | -def restart(*services): |
1986 | - for service in services: |
1987 | - subprocess.check_call(['service', service, 'restart']) |
1988 | - |
1989 | - |
1990 | -def stop(*services): |
1991 | - for service in services: |
1992 | - subprocess.check_call(['service', service, 'stop']) |
1993 | - |
1994 | - |
1995 | -def start(*services): |
1996 | - for service in services: |
1997 | - subprocess.check_call(['service', service, 'start']) |
1998 | |
1999 | === modified file 'metadata.yaml' |
2000 | --- metadata.yaml 2012-12-18 19:59:19 +0000 |
2001 | +++ metadata.yaml 2013-05-29 18:03:27 +0000 |
2002 | @@ -12,3 +12,9 @@ |
2003 | interface: swift |
2004 | identity-service: |
2005 | interface: keystone |
2006 | + ha: |
2007 | + interface: hacluster |
2008 | + scope: container |
2009 | +peers: |
2010 | + cluster: |
2011 | + interface: swift-ha |
2012 | |
2013 | === modified file 'revision' |
2014 | --- revision 2013-04-26 10:34:27 +0000 |
2015 | +++ revision 2013-05-29 18:03:27 +0000 |
2016 | @@ -1,1 +1,1 @@ |
2017 | -112 |
2018 | +132 |
2019 | |
2020 | === added directory 'scripts' |
2021 | === added file 'scripts/add_to_cluster' |
2022 | --- scripts/add_to_cluster 1970-01-01 00:00:00 +0000 |
2023 | +++ scripts/add_to_cluster 2013-05-29 18:03:27 +0000 |
2024 | @@ -0,0 +1,13 @@ |
2025 | +#!/bin/bash |
2026 | +service corosync start || /bin/true |
2027 | +sleep 2 |
2028 | +while ! service pacemaker start; do |
2029 | + echo "Attempting to start pacemaker" |
2030 | + sleep 1; |
2031 | +done; |
2032 | +crm node online |
2033 | +sleep 2 |
2034 | +while crm status | egrep -q 'Stopped$'; do |
2035 | + echo "Waiting for nodes to come online" |
2036 | + sleep 1 |
2037 | +done |
2038 | |
2039 | === added file 'scripts/remove_from_cluster' |
2040 | --- scripts/remove_from_cluster 1970-01-01 00:00:00 +0000 |
2041 | +++ scripts/remove_from_cluster 2013-05-29 18:03:27 +0000 |
2042 | @@ -0,0 +1,4 @@ |
2043 | +#!/bin/bash |
2044 | +crm node standby |
2045 | +service pacemaker stop |
2046 | +service corosync stop |
2047 | |
2048 | === renamed directory 'hooks/templates' => 'templates' |
2049 | === added file 'templates/apache2_site.tmpl' |
2050 | --- templates/apache2_site.tmpl 1970-01-01 00:00:00 +0000 |
2051 | +++ templates/apache2_site.tmpl 2013-05-29 18:03:27 +0000 |
2052 | @@ -0,0 +1,19 @@ |
2053 | +Listen {{ ext }} |
2054 | +NameVirtualHost *:{{ ext }} |
2055 | +<VirtualHost *:{{ ext }}> |
2056 | + ServerName {{ private_address }} |
2057 | + SSLEngine on |
2058 | + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert |
2059 | + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key |
2060 | + ProxyPass / http://localhost:{{ int }}/ |
2061 | + ProxyPassReverse / http://localhost:{{ int }}/ |
2062 | + ProxyPreserveHost on |
2063 | +</VirtualHost> |
2064 | +<Proxy *> |
2065 | + Order deny,allow |
2066 | + Allow from all |
2067 | +</Proxy> |
2068 | +<Location /> |
2069 | + Order allow,deny |
2070 | + Allow from all |
2071 | +</Location> |
2072 | |
2073 | === modified file 'templates/essex/proxy-server.conf' |
2074 | --- hooks/templates/essex/proxy-server.conf 2012-12-14 23:07:01 +0000 |
2075 | +++ templates/essex/proxy-server.conf 2013-05-29 18:03:27 +0000 |
2076 | @@ -45,6 +45,15 @@ |
2077 | admin_tenant_name = {{ service_tenant }} |
2078 | admin_user = {{ service_user }} |
2079 | admin_password = {{ service_password }} |
2080 | +{% if os_release == 'essex' %} |
2081 | +{% if delay_auth_decision|lower == 'true' %} |
2082 | +delay_auth_decision = 1 |
2083 | +{% else %} |
2084 | +delay_auth_decision = 0 |
2085 | +{% endif %} |
2086 | +{% else %} |
2087 | +delay_auth_decision = {{ delay_auth_decision|lower }} |
2088 | +{% endif %} |
2089 | {% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %} |
2090 | |
2091 | |
2092 | |
2093 | === modified file 'templates/grizzly/proxy-server.conf' |
2094 | --- hooks/templates/grizzly/proxy-server.conf 2013-01-29 00:41:52 +0000 |
2095 | +++ templates/grizzly/proxy-server.conf 2013-05-29 18:03:27 +0000 |
2096 | @@ -45,6 +45,7 @@ |
2097 | admin_tenant_name = {{ service_tenant }} |
2098 | admin_user = {{ service_user }} |
2099 | admin_password = {{ service_password }} |
2100 | +delay_auth_decision = {{ delay_auth_decision|lower }} |
2101 | {% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %} |
2102 | |
2103 | |
2104 | |
2105 | === added file 'templates/haproxy.cfg' |
2106 | --- templates/haproxy.cfg 1970-01-01 00:00:00 +0000 |
2107 | +++ templates/haproxy.cfg 2013-05-29 18:03:27 +0000 |
2108 | @@ -0,0 +1,35 @@ |
2109 | +global |
2110 | + log 127.0.0.1 local0 |
2111 | + log 127.0.0.1 local1 notice |
2112 | + maxconn 20000 |
2113 | + user haproxy |
2114 | + group haproxy |
2115 | + spread-checks 0 |
2116 | + |
2117 | +defaults |
2118 | + log global |
2119 | + mode http |
2120 | + option httplog |
2121 | + option dontlognull |
2122 | + retries 3 |
2123 | + timeout queue 1000 |
2124 | + timeout connect 1000 |
2125 | + timeout client 30000 |
2126 | + timeout server 30000 |
2127 | + |
2128 | +listen stats :8888 |
2129 | + mode http |
2130 | + stats enable |
2131 | + stats hide-version |
2132 | + stats realm Haproxy\ Statistics |
2133 | + stats uri / |
2134 | + stats auth admin:password |
2135 | + |
2136 | +{% for service, ports in service_ports.iteritems() -%} |
2137 | +listen {{ service }} 0.0.0.0:{{ ports[0] }} |
2138 | + balance roundrobin |
2139 | + option tcplog |
2140 | + {% for unit, address in units.iteritems() -%} |
2141 | + server {{ unit }} {{ address }}:{{ ports[1] }} check |
2142 | + {% endfor %} |
2143 | +{% endfor %} |