Merge lp:~openstack-charmers/charms/precise/swift-proxy/python-redux into lp:~charmers/charms/precise/swift-proxy/trunk

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 45
Proposed branch: lp:~openstack-charmers/charms/precise/swift-proxy/python-redux
Merge into: lp:~charmers/charms/precise/swift-proxy/trunk
Diff against target: 4996 lines (+3213/-1404)
33 files modified
Makefile (+14/-0)
charm-helpers.yaml (+10/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+58/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+183/-0)
hooks/charmhelpers/contrib/openstack/context.py (+522/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+117/-0)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+2/-0)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+11/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+37/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+23/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+23/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+280/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+365/-0)
hooks/charmhelpers/core/hookenv.py (+340/-0)
hooks/charmhelpers/core/host.py (+241/-0)
hooks/charmhelpers/fetch/__init__.py (+209/-0)
hooks/charmhelpers/fetch/archiveurl.py (+48/-0)
hooks/charmhelpers/fetch/bzrurl.py (+49/-0)
hooks/charmhelpers/payload/__init__.py (+1/-0)
hooks/charmhelpers/payload/execd.py (+50/-0)
hooks/lib/apache_utils.py (+0/-193)
hooks/lib/cluster_utils.py (+0/-130)
hooks/lib/haproxy_utils.py (+0/-52)
hooks/lib/openstack_common.py (+0/-231)
hooks/lib/utils.py (+0/-332)
hooks/swift_context.py (+223/-0)
hooks/swift_hooks.py (+208/-164)
hooks/swift_utils.py (+133/-246)
revision (+1/-1)
templates/apache2_site.tmpl (+0/-19)
templates/grizzly/proxy-server.conf (+0/-1)
templates/haproxy.cfg (+0/-35)
templates/havana/proxy-server.conf (+65/-0)
To merge this branch: bzr merge lp:~openstack-charmers/charms/precise/swift-proxy/python-redux
Reviewer Review Type Date Requested Status
charmers Pending
Review via email: mp+191087@code.launchpad.net

Description of the change

Update of all Havana / Saucy / python-redux work:

* Full python rewrite using new OpenStack charm-helpers.

* Test coverage

* Havana support

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'Makefile'
2--- Makefile 1970-01-01 00:00:00 +0000
3+++ Makefile 2013-10-15 01:36:24 +0000
4@@ -0,0 +1,14 @@
5+#!/usr/bin/make
6+PYTHON := /usr/bin/env python
7+
8+lint:
9+ @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks
10+ #@flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests
11+ @charm proof
12+
13+test:
14+ @echo Starting tests...
15+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
16+
17+sync:
18+ @charm-helper-sync -c charm-helpers.yaml
19
20=== added file 'charm-helpers.yaml'
21--- charm-helpers.yaml 1970-01-01 00:00:00 +0000
22+++ charm-helpers.yaml 2013-10-15 01:36:24 +0000
23@@ -0,0 +1,10 @@
24+branch: lp:charm-helpers
25+destination: hooks/charmhelpers
26+include:
27+ - core
28+ - fetch
29+ - contrib.openstack|inc=*
30+ - contrib.hahelpers:
31+ - apache
32+ - cluster
33+ - payload.execd
34
35=== added directory 'hooks/charmhelpers'
36=== added file 'hooks/charmhelpers/__init__.py'
37=== added directory 'hooks/charmhelpers/contrib'
38=== added file 'hooks/charmhelpers/contrib/__init__.py'
39=== added directory 'hooks/charmhelpers/contrib/hahelpers'
40=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
41=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
42--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
43+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2013-10-15 01:36:24 +0000
44@@ -0,0 +1,58 @@
45+#
46+# Copyright 2012 Canonical Ltd.
47+#
48+# This file is sourced from lp:openstack-charm-helpers
49+#
50+# Authors:
51+# James Page <james.page@ubuntu.com>
52+# Adam Gandelman <adamg@ubuntu.com>
53+#
54+
55+import subprocess
56+
57+from charmhelpers.core.hookenv import (
58+ config as config_get,
59+ relation_get,
60+ relation_ids,
61+ related_units as relation_list,
62+ log,
63+ INFO,
64+)
65+
66+
67+def get_cert():
68+ cert = config_get('ssl_cert')
69+ key = config_get('ssl_key')
70+ if not (cert and key):
71+ log("Inspecting identity-service relations for SSL certificate.",
72+ level=INFO)
73+ cert = key = None
74+ for r_id in relation_ids('identity-service'):
75+ for unit in relation_list(r_id):
76+ if not cert:
77+ cert = relation_get('ssl_cert',
78+ rid=r_id, unit=unit)
79+ if not key:
80+ key = relation_get('ssl_key',
81+ rid=r_id, unit=unit)
82+ return (cert, key)
83+
84+
85+def get_ca_cert():
86+ ca_cert = None
87+ log("Inspecting identity-service relations for CA SSL certificate.",
88+ level=INFO)
89+ for r_id in relation_ids('identity-service'):
90+ for unit in relation_list(r_id):
91+ if not ca_cert:
92+ ca_cert = relation_get('ca_cert',
93+ rid=r_id, unit=unit)
94+ return ca_cert
95+
96+
97+def install_ca_cert(ca_cert):
98+ if ca_cert:
99+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
100+ 'w') as crt:
101+ crt.write(ca_cert)
102+ subprocess.check_call(['update-ca-certificates', '--fresh'])
103
104=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
105--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
106+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2013-10-15 01:36:24 +0000
107@@ -0,0 +1,183 @@
108+#
109+# Copyright 2012 Canonical Ltd.
110+#
111+# Authors:
112+# James Page <james.page@ubuntu.com>
113+# Adam Gandelman <adamg@ubuntu.com>
114+#
115+
116+import subprocess
117+import os
118+
119+from socket import gethostname as get_unit_hostname
120+
121+from charmhelpers.core.hookenv import (
122+ log,
123+ relation_ids,
124+ related_units as relation_list,
125+ relation_get,
126+ config as config_get,
127+ INFO,
128+ ERROR,
129+ unit_get,
130+)
131+
132+
133+class HAIncompleteConfig(Exception):
134+ pass
135+
136+
137+def is_clustered():
138+ for r_id in (relation_ids('ha') or []):
139+ for unit in (relation_list(r_id) or []):
140+ clustered = relation_get('clustered',
141+ rid=r_id,
142+ unit=unit)
143+ if clustered:
144+ return True
145+ return False
146+
147+
148+def is_leader(resource):
149+ cmd = [
150+ "crm", "resource",
151+ "show", resource
152+ ]
153+ try:
154+ status = subprocess.check_output(cmd)
155+ except subprocess.CalledProcessError:
156+ return False
157+ else:
158+ if get_unit_hostname() in status:
159+ return True
160+ else:
161+ return False
162+
163+
164+def peer_units():
165+ peers = []
166+ for r_id in (relation_ids('cluster') or []):
167+ for unit in (relation_list(r_id) or []):
168+ peers.append(unit)
169+ return peers
170+
171+
172+def oldest_peer(peers):
173+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
174+ for peer in peers:
175+ remote_unit_no = int(peer.split('/')[1])
176+ if remote_unit_no < local_unit_no:
177+ return False
178+ return True
179+
180+
181+def eligible_leader(resource):
182+ if is_clustered():
183+ if not is_leader(resource):
184+ log('Deferring action to CRM leader.', level=INFO)
185+ return False
186+ else:
187+ peers = peer_units()
188+ if peers and not oldest_peer(peers):
189+ log('Deferring action to oldest service unit.', level=INFO)
190+ return False
191+ return True
192+
193+
194+def https():
195+ '''
196+ Determines whether enough data has been provided in configuration
197+ or relation data to configure HTTPS
198+ .
199+ returns: boolean
200+ '''
201+ if config_get('use-https') == "yes":
202+ return True
203+ if config_get('ssl_cert') and config_get('ssl_key'):
204+ return True
205+ for r_id in relation_ids('identity-service'):
206+ for unit in relation_list(r_id):
207+ rel_state = [
208+ relation_get('https_keystone', rid=r_id, unit=unit),
209+ relation_get('ssl_cert', rid=r_id, unit=unit),
210+ relation_get('ssl_key', rid=r_id, unit=unit),
211+ relation_get('ca_cert', rid=r_id, unit=unit),
212+ ]
213+ # NOTE: works around (LP: #1203241)
214+ if (None not in rel_state) and ('' not in rel_state):
215+ return True
216+ return False
217+
218+
219+def determine_api_port(public_port):
220+ '''
221+ Determine correct API server listening port based on
222+ existence of HTTPS reverse proxy and/or haproxy.
223+
224+ public_port: int: standard public port for given service
225+
226+ returns: int: the correct listening port for the API service
227+ '''
228+ i = 0
229+ if len(peer_units()) > 0 or is_clustered():
230+ i += 1
231+ if https():
232+ i += 1
233+ return public_port - (i * 10)
234+
235+
236+def determine_haproxy_port(public_port):
237+ '''
238+ Description: Determine correct proxy listening port based on public IP +
239+ existence of HTTPS reverse proxy.
240+
241+ public_port: int: standard public port for given service
242+
243+ returns: int: the correct listening port for the HAProxy service
244+ '''
245+ i = 0
246+ if https():
247+ i += 1
248+ return public_port - (i * 10)
249+
250+
251+def get_hacluster_config():
252+ '''
253+ Obtains all relevant configuration from charm configuration required
254+ for initiating a relation to hacluster:
255+
256+ ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
257+
258+ returns: dict: A dict containing settings keyed by setting name.
259+ raises: HAIncompleteConfig if settings are missing.
260+ '''
261+ settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
262+ conf = {}
263+ for setting in settings:
264+ conf[setting] = config_get(setting)
265+ missing = []
266+ [missing.append(s) for s, v in conf.iteritems() if v is None]
267+ if missing:
268+ log('Insufficient config data to configure hacluster.', level=ERROR)
269+ raise HAIncompleteConfig
270+ return conf
271+
272+
273+def canonical_url(configs, vip_setting='vip'):
274+ '''
275+ Returns the correct HTTP URL to this host given the state of HTTPS
276+ configuration and hacluster.
277+
278+ :configs : OSTemplateRenderer: A config tempating object to inspect for
279+ a complete https context.
280+ :vip_setting: str: Setting in charm config that specifies
281+ VIP address.
282+ '''
283+ scheme = 'http'
284+ if 'https' in configs.complete_contexts():
285+ scheme = 'https'
286+ if is_clustered():
287+ addr = config_get(vip_setting)
288+ else:
289+ addr = unit_get('private-address')
290+ return '%s://%s' % (scheme, addr)
291
292=== added directory 'hooks/charmhelpers/contrib/openstack'
293=== added file 'hooks/charmhelpers/contrib/openstack/__init__.py'
294=== added file 'hooks/charmhelpers/contrib/openstack/context.py'
295--- hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
296+++ hooks/charmhelpers/contrib/openstack/context.py 2013-10-15 01:36:24 +0000
297@@ -0,0 +1,522 @@
298+import json
299+import os
300+
301+from base64 import b64decode
302+
303+from subprocess import (
304+ check_call
305+)
306+
307+
308+from charmhelpers.fetch import (
309+ apt_install,
310+ filter_installed_packages,
311+)
312+
313+from charmhelpers.core.hookenv import (
314+ config,
315+ local_unit,
316+ log,
317+ relation_get,
318+ relation_ids,
319+ related_units,
320+ unit_get,
321+ unit_private_ip,
322+ ERROR,
323+ WARNING,
324+)
325+
326+from charmhelpers.contrib.hahelpers.cluster import (
327+ determine_api_port,
328+ determine_haproxy_port,
329+ https,
330+ is_clustered,
331+ peer_units,
332+)
333+
334+from charmhelpers.contrib.hahelpers.apache import (
335+ get_cert,
336+ get_ca_cert,
337+)
338+
339+from charmhelpers.contrib.openstack.neutron import (
340+ neutron_plugin_attribute,
341+)
342+
343+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
344+
345+
346+class OSContextError(Exception):
347+ pass
348+
349+
350+def ensure_packages(packages):
351+ '''Install but do not upgrade required plugin packages'''
352+ required = filter_installed_packages(packages)
353+ if required:
354+ apt_install(required, fatal=True)
355+
356+
357+def context_complete(ctxt):
358+ _missing = []
359+ for k, v in ctxt.iteritems():
360+ if v is None or v == '':
361+ _missing.append(k)
362+ if _missing:
363+ log('Missing required data: %s' % ' '.join(_missing), level='INFO')
364+ return False
365+ return True
366+
367+
368+class OSContextGenerator(object):
369+ interfaces = []
370+
371+ def __call__(self):
372+ raise NotImplementedError
373+
374+
375+class SharedDBContext(OSContextGenerator):
376+ interfaces = ['shared-db']
377+
378+ def __init__(self, database=None, user=None, relation_prefix=None):
379+ '''
380+ Allows inspecting relation for settings prefixed with relation_prefix.
381+ This is useful for parsing access for multiple databases returned via
382+ the shared-db interface (eg, nova_password, quantum_password)
383+ '''
384+ self.relation_prefix = relation_prefix
385+ self.database = database
386+ self.user = user
387+
388+ def __call__(self):
389+ self.database = self.database or config('database')
390+ self.user = self.user or config('database-user')
391+ if None in [self.database, self.user]:
392+ log('Could not generate shared_db context. '
393+ 'Missing required charm config options. '
394+ '(database name and user)')
395+ raise OSContextError
396+ ctxt = {}
397+
398+ password_setting = 'password'
399+ if self.relation_prefix:
400+ password_setting = self.relation_prefix + '_password'
401+
402+ for rid in relation_ids('shared-db'):
403+ for unit in related_units(rid):
404+ passwd = relation_get(password_setting, rid=rid, unit=unit)
405+ ctxt = {
406+ 'database_host': relation_get('db_host', rid=rid,
407+ unit=unit),
408+ 'database': self.database,
409+ 'database_user': self.user,
410+ 'database_password': passwd,
411+ }
412+ if context_complete(ctxt):
413+ return ctxt
414+ return {}
415+
416+
417+class IdentityServiceContext(OSContextGenerator):
418+ interfaces = ['identity-service']
419+
420+ def __call__(self):
421+ log('Generating template context for identity-service')
422+ ctxt = {}
423+
424+ for rid in relation_ids('identity-service'):
425+ for unit in related_units(rid):
426+ ctxt = {
427+ 'service_port': relation_get('service_port', rid=rid,
428+ unit=unit),
429+ 'service_host': relation_get('service_host', rid=rid,
430+ unit=unit),
431+ 'auth_host': relation_get('auth_host', rid=rid, unit=unit),
432+ 'auth_port': relation_get('auth_port', rid=rid, unit=unit),
433+ 'admin_tenant_name': relation_get('service_tenant',
434+ rid=rid, unit=unit),
435+ 'admin_user': relation_get('service_username', rid=rid,
436+ unit=unit),
437+ 'admin_password': relation_get('service_password', rid=rid,
438+ unit=unit),
439+ # XXX: Hard-coded http.
440+ 'service_protocol': 'http',
441+ 'auth_protocol': 'http',
442+ }
443+ if context_complete(ctxt):
444+ return ctxt
445+ return {}
446+
447+
448+class AMQPContext(OSContextGenerator):
449+ interfaces = ['amqp']
450+
451+ def __call__(self):
452+ log('Generating template context for amqp')
453+ conf = config()
454+ try:
455+ username = conf['rabbit-user']
456+ vhost = conf['rabbit-vhost']
457+ except KeyError as e:
458+ log('Could not generate shared_db context. '
459+ 'Missing required charm config options: %s.' % e)
460+ raise OSContextError
461+
462+ ctxt = {}
463+ for rid in relation_ids('amqp'):
464+ for unit in related_units(rid):
465+ if relation_get('clustered', rid=rid, unit=unit):
466+ ctxt['clustered'] = True
467+ ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
468+ unit=unit)
469+ else:
470+ ctxt['rabbitmq_host'] = relation_get('private-address',
471+ rid=rid, unit=unit)
472+ ctxt.update({
473+ 'rabbitmq_user': username,
474+ 'rabbitmq_password': relation_get('password', rid=rid,
475+ unit=unit),
476+ 'rabbitmq_virtual_host': vhost,
477+ })
478+ if context_complete(ctxt):
479+ # Sufficient information found = break out!
480+ break
481+ # Used for active/active rabbitmq >= grizzly
482+ ctxt['rabbitmq_hosts'] = []
483+ for unit in related_units(rid):
484+ ctxt['rabbitmq_hosts'].append(relation_get('private-address',
485+ rid=rid, unit=unit))
486+ if not context_complete(ctxt):
487+ return {}
488+ else:
489+ return ctxt
490+
491+
492+class CephContext(OSContextGenerator):
493+ interfaces = ['ceph']
494+
495+ def __call__(self):
496+ '''This generates context for /etc/ceph/ceph.conf templates'''
497+ if not relation_ids('ceph'):
498+ return {}
499+ log('Generating template context for ceph')
500+ mon_hosts = []
501+ auth = None
502+ key = None
503+ for rid in relation_ids('ceph'):
504+ for unit in related_units(rid):
505+ mon_hosts.append(relation_get('private-address', rid=rid,
506+ unit=unit))
507+ auth = relation_get('auth', rid=rid, unit=unit)
508+ key = relation_get('key', rid=rid, unit=unit)
509+
510+ ctxt = {
511+ 'mon_hosts': ' '.join(mon_hosts),
512+ 'auth': auth,
513+ 'key': key,
514+ }
515+
516+ if not os.path.isdir('/etc/ceph'):
517+ os.mkdir('/etc/ceph')
518+
519+ if not context_complete(ctxt):
520+ return {}
521+
522+ ensure_packages(['ceph-common'])
523+
524+ return ctxt
525+
526+
527+class HAProxyContext(OSContextGenerator):
528+ interfaces = ['cluster']
529+
530+ def __call__(self):
531+ '''
532+ Builds half a context for the haproxy template, which describes
533+ all peers to be included in the cluster. Each charm needs to include
534+ its own context generator that describes the port mapping.
535+ '''
536+ if not relation_ids('cluster'):
537+ return {}
538+
539+ cluster_hosts = {}
540+ l_unit = local_unit().replace('/', '-')
541+ cluster_hosts[l_unit] = unit_get('private-address')
542+
543+ for rid in relation_ids('cluster'):
544+ for unit in related_units(rid):
545+ _unit = unit.replace('/', '-')
546+ addr = relation_get('private-address', rid=rid, unit=unit)
547+ cluster_hosts[_unit] = addr
548+
549+ ctxt = {
550+ 'units': cluster_hosts,
551+ }
552+ if len(cluster_hosts.keys()) > 1:
553+ # Enable haproxy when we have enough peers.
554+ log('Ensuring haproxy enabled in /etc/default/haproxy.')
555+ with open('/etc/default/haproxy', 'w') as out:
556+ out.write('ENABLED=1\n')
557+ return ctxt
558+ log('HAProxy context is incomplete, this unit has no peers.')
559+ return {}
560+
561+
562+class ImageServiceContext(OSContextGenerator):
563+ interfaces = ['image-service']
564+
565+ def __call__(self):
566+ '''
567+ Obtains the glance API server from the image-service relation. Useful
568+ in nova and cinder (currently).
569+ '''
570+ log('Generating template context for image-service.')
571+ rids = relation_ids('image-service')
572+ if not rids:
573+ return {}
574+ for rid in rids:
575+ for unit in related_units(rid):
576+ api_server = relation_get('glance-api-server',
577+ rid=rid, unit=unit)
578+ if api_server:
579+ return {'glance_api_servers': api_server}
580+ log('ImageService context is incomplete. '
581+ 'Missing required relation data.')
582+ return {}
583+
584+
585+class ApacheSSLContext(OSContextGenerator):
586+ """
587+ Generates a context for an apache vhost configuration that configures
588+ HTTPS reverse proxying for one or many endpoints. Generated context
589+ looks something like:
590+ {
591+ 'namespace': 'cinder',
592+ 'private_address': 'iscsi.mycinderhost.com',
593+ 'endpoints': [(8776, 8766), (8777, 8767)]
594+ }
595+
596+ The endpoints list consists of a tuples mapping external ports
597+ to internal ports.
598+ """
599+ interfaces = ['https']
600+
601+ # charms should inherit this context and set external ports
602+ # and service namespace accordingly.
603+ external_ports = []
604+ service_namespace = None
605+
606+ def enable_modules(self):
607+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
608+ check_call(cmd)
609+
610+ def configure_cert(self):
611+ if not os.path.isdir('/etc/apache2/ssl'):
612+ os.mkdir('/etc/apache2/ssl')
613+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
614+ if not os.path.isdir(ssl_dir):
615+ os.mkdir(ssl_dir)
616+ cert, key = get_cert()
617+ with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
618+ cert_out.write(b64decode(cert))
619+ with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
620+ key_out.write(b64decode(key))
621+ ca_cert = get_ca_cert()
622+ if ca_cert:
623+ with open(CA_CERT_PATH, 'w') as ca_out:
624+ ca_out.write(b64decode(ca_cert))
625+ check_call(['update-ca-certificates'])
626+
627+ def __call__(self):
628+ if isinstance(self.external_ports, basestring):
629+ self.external_ports = [self.external_ports]
630+ if (not self.external_ports or not https()):
631+ return {}
632+
633+ self.configure_cert()
634+ self.enable_modules()
635+
636+ ctxt = {
637+ 'namespace': self.service_namespace,
638+ 'private_address': unit_get('private-address'),
639+ 'endpoints': []
640+ }
641+ for ext_port in self.external_ports:
642+ if peer_units() or is_clustered():
643+ int_port = determine_haproxy_port(ext_port)
644+ else:
645+ int_port = determine_api_port(ext_port)
646+ portmap = (int(ext_port), int(int_port))
647+ ctxt['endpoints'].append(portmap)
648+ return ctxt
649+
650+
651+class NeutronContext(object):
652+ interfaces = []
653+
654+ @property
655+ def plugin(self):
656+ return None
657+
658+ @property
659+ def network_manager(self):
660+ return None
661+
662+ @property
663+ def packages(self):
664+ return neutron_plugin_attribute(
665+ self.plugin, 'packages', self.network_manager)
666+
667+ @property
668+ def neutron_security_groups(self):
669+ return None
670+
671+ def _ensure_packages(self):
672+ [ensure_packages(pkgs) for pkgs in self.packages]
673+
674+ def _save_flag_file(self):
675+ if self.network_manager == 'quantum':
676+ _file = '/etc/nova/quantum_plugin.conf'
677+ else:
678+ _file = '/etc/nova/neutron_plugin.conf'
679+ with open(_file, 'wb') as out:
680+ out.write(self.plugin + '\n')
681+
682+ def ovs_ctxt(self):
683+ driver = neutron_plugin_attribute(self.plugin, 'driver',
684+ self.network_manager)
685+
686+ ovs_ctxt = {
687+ 'core_plugin': driver,
688+ 'neutron_plugin': 'ovs',
689+ 'neutron_security_groups': self.neutron_security_groups,
690+ 'local_ip': unit_private_ip(),
691+ }
692+
693+ return ovs_ctxt
694+
695+ def __call__(self):
696+ self._ensure_packages()
697+
698+ if self.network_manager not in ['quantum', 'neutron']:
699+ return {}
700+
701+ if not self.plugin:
702+ return {}
703+
704+ ctxt = {'network_manager': self.network_manager}
705+
706+ if self.plugin == 'ovs':
707+ ctxt.update(self.ovs_ctxt())
708+
709+ self._save_flag_file()
710+ return ctxt
711+
712+
713+class OSConfigFlagContext(OSContextGenerator):
714+ '''
715+ Responsible adding user-defined config-flags in charm config to a
716+ to a template context.
717+ '''
718+ def __call__(self):
719+ config_flags = config('config-flags')
720+ if not config_flags or config_flags in ['None', '']:
721+ return {}
722+ config_flags = config_flags.split(',')
723+ flags = {}
724+ for flag in config_flags:
725+ if '=' not in flag:
726+ log('Improperly formatted config-flag, expected k=v '
727+ 'got %s' % flag, level=WARNING)
728+ continue
729+ k, v = flag.split('=')
730+ flags[k.strip()] = v
731+ ctxt = {'user_config_flags': flags}
732+ return ctxt
733+
734+
735+class SubordinateConfigContext(OSContextGenerator):
736+ """
737+ Responsible for inspecting relations to subordinates that
738+ may be exporting required config via a json blob.
739+
740+ The subordinate interface allows subordinates to export their
741+ configuration requirements to the principle for multiple config
742+ files and multiple serivces. Ie, a subordinate that has interfaces
743+ to both glance and nova may export to following yaml blob as json:
744+
745+ glance:
746+ /etc/glance/glance-api.conf:
747+ sections:
748+ DEFAULT:
749+ - [key1, value1]
750+ /etc/glance/glance-registry.conf:
751+ MYSECTION:
752+ - [key2, value2]
753+ nova:
754+ /etc/nova/nova.conf:
755+ sections:
756+ DEFAULT:
757+ - [key3, value3]
758+
759+
760+ It is then up to the principle charms to subscribe this context to
761+ the service+config file it is interestd in. Configuration data will
762+ be available in the template context, in glance's case, as:
763+ ctxt = {
764+ ... other context ...
765+ 'subordinate_config': {
766+ 'DEFAULT': {
767+ 'key1': 'value1',
768+ },
769+ 'MYSECTION': {
770+ 'key2': 'value2',
771+ },
772+ }
773+ }
774+
775+ """
776+ def __init__(self, service, config_file, interface):
777+ """
778+ :param service : Service name key to query in any subordinate
779+ data found
780+ :param config_file : Service's config file to query sections
781+ :param interface : Subordinate interface to inspect
782+ """
783+ self.service = service
784+ self.config_file = config_file
785+ self.interface = interface
786+
787+ def __call__(self):
788+ ctxt = {}
789+ for rid in relation_ids(self.interface):
790+ for unit in related_units(rid):
791+ sub_config = relation_get('subordinate_configuration',
792+ rid=rid, unit=unit)
793+ if sub_config and sub_config != '':
794+ try:
795+ sub_config = json.loads(sub_config)
796+ except:
797+ log('Could not parse JSON from subordinate_config '
798+ 'setting from %s' % rid, level=ERROR)
799+ continue
800+
801+ if self.service not in sub_config:
802+ log('Found subordinate_config on %s but it contained'
803+ 'nothing for %s service' % (rid, self.service))
804+ continue
805+
806+ sub_config = sub_config[self.service]
807+ if self.config_file not in sub_config:
808+ log('Found subordinate_config on %s but it contained'
809+ 'nothing for %s' % (rid, self.config_file))
810+ continue
811+
812+ sub_config = sub_config[self.config_file]
813+ for k, v in sub_config.iteritems():
814+ ctxt[k] = v
815+
816+ if not ctxt:
817+ ctxt['sections'] = {}
818+
819+ return ctxt
820
821=== added file 'hooks/charmhelpers/contrib/openstack/neutron.py'
822--- hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
823+++ hooks/charmhelpers/contrib/openstack/neutron.py 2013-10-15 01:36:24 +0000
824@@ -0,0 +1,117 @@
825+# Various utilies for dealing with Neutron and the renaming from Quantum.
826+
827+from subprocess import check_output
828+
829+from charmhelpers.core.hookenv import (
830+ config,
831+ log,
832+ ERROR,
833+)
834+
835+from charmhelpers.contrib.openstack.utils import os_release
836+
837+
838+def headers_package():
839+ """Ensures correct linux-headers for running kernel are installed,
840+ for building DKMS package"""
841+ kver = check_output(['uname', '-r']).strip()
842+ return 'linux-headers-%s' % kver
843+
844+
845+# legacy
846+def quantum_plugins():
847+ from charmhelpers.contrib.openstack import context
848+ return {
849+ 'ovs': {
850+ 'config': '/etc/quantum/plugins/openvswitch/'
851+ 'ovs_quantum_plugin.ini',
852+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
853+ 'OVSQuantumPluginV2',
854+ 'contexts': [
855+ context.SharedDBContext(user=config('neutron-database-user'),
856+ database=config('neutron-database'),
857+ relation_prefix='neutron')],
858+ 'services': ['quantum-plugin-openvswitch-agent'],
859+ 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
860+ ['quantum-plugin-openvswitch-agent']],
861+ },
862+ 'nvp': {
863+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
864+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
865+ 'QuantumPlugin.NvpPluginV2',
866+ 'services': [],
867+ 'packages': [],
868+ }
869+ }
870+
871+
872+def neutron_plugins():
873+ from charmhelpers.contrib.openstack import context
874+ return {
875+ 'ovs': {
876+ 'config': '/etc/neutron/plugins/openvswitch/'
877+ 'ovs_neutron_plugin.ini',
878+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
879+ 'OVSNeutronPluginV2',
880+ 'contexts': [
881+ context.SharedDBContext(user=config('neutron-database-user'),
882+ database=config('neutron-database'),
883+ relation_prefix='neutron')],
884+ 'services': ['neutron-plugin-openvswitch-agent'],
885+ 'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
886+ ['quantum-plugin-openvswitch-agent']],
887+ },
888+ 'nvp': {
889+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
890+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
891+ 'NeutronPlugin.NvpPluginV2',
892+ 'services': [],
893+ 'packages': [],
894+ }
895+ }
896+
897+
898+def neutron_plugin_attribute(plugin, attr, net_manager=None):
899+ manager = net_manager or network_manager()
900+ if manager == 'quantum':
901+ plugins = quantum_plugins()
902+ elif manager == 'neutron':
903+ plugins = neutron_plugins()
904+ else:
905+ log('Error: Network manager does not support plugins.')
906+ raise Exception
907+
908+ try:
909+ _plugin = plugins[plugin]
910+ except KeyError:
911+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
912+ raise Exception
913+
914+ try:
915+ return _plugin[attr]
916+ except KeyError:
917+ return None
918+
919+
920+def network_manager():
921+ '''
922+ Deals with the renaming of Quantum to Neutron in H and any situations
923+ that require compatability (eg, deploying H with network-manager=quantum,
924+ upgrading from G).
925+ '''
926+ release = os_release('nova-common')
927+ manager = config('network-manager').lower()
928+
929+ if manager not in ['quantum', 'neutron']:
930+ return manager
931+
932+ if release in ['essex']:
933+ # E does not support neutron
934+ log('Neutron networking not supported in Essex.', level=ERROR)
935+ raise Exception
936+ elif release in ['folsom', 'grizzly']:
937+ # neutron is named quantum in F and G
938+ return 'quantum'
939+ else:
940+ # ensure accurate naming for all releases post-H
941+ return 'neutron'
942
943=== added directory 'hooks/charmhelpers/contrib/openstack/templates'
944=== added file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
945--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
946+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 2013-10-15 01:36:24 +0000
947@@ -0,0 +1,2 @@
948+# dummy __init__.py to fool syncer into thinking this is a syncable python
949+# module
950
951=== added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
952--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
953+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2013-10-15 01:36:24 +0000
954@@ -0,0 +1,11 @@
955+###############################################################################
956+# [ WARNING ]
957+# cinder configuration file maintained by Juju
958+# local changes may be overwritten.
959+###############################################################################
960+{% if auth -%}
961+[global]
962+ auth_supported = {{ auth }}
963+ keyring = /etc/ceph/$cluster.$name.keyring
964+ mon host = {{ mon_hosts }}
965+{% endif -%}
966
967=== added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
968--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
969+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2013-10-15 01:36:24 +0000
970@@ -0,0 +1,37 @@
971+global
972+ log 127.0.0.1 local0
973+ log 127.0.0.1 local1 notice
974+ maxconn 20000
975+ user haproxy
976+ group haproxy
977+ spread-checks 0
978+
979+defaults
980+ log global
981+ mode http
982+ option httplog
983+ option dontlognull
984+ retries 3
985+ timeout queue 1000
986+ timeout connect 1000
987+ timeout client 30000
988+ timeout server 30000
989+
990+listen stats :8888
991+ mode http
992+ stats enable
993+ stats hide-version
994+ stats realm Haproxy\ Statistics
995+ stats uri /
996+ stats auth admin:password
997+
998+{% if units -%}
999+{% for service, ports in service_ports.iteritems() -%}
1000+listen {{ service }} 0.0.0.0:{{ ports[0] }}
1001+ balance roundrobin
1002+ option tcplog
1003+ {% for unit, address in units.iteritems() -%}
1004+ server {{ unit }} {{ address }}:{{ ports[1] }} check
1005+ {% endfor %}
1006+{% endfor -%}
1007+{% endif -%}
1008
1009=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
1010--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000
1011+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2013-10-15 01:36:24 +0000
1012@@ -0,0 +1,23 @@
1013+{% if endpoints -%}
1014+{% for ext, int in endpoints -%}
1015+Listen {{ ext }}
1016+NameVirtualHost *:{{ ext }}
1017+<VirtualHost *:{{ ext }}>
1018+ ServerName {{ private_address }}
1019+ SSLEngine on
1020+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1021+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1022+ ProxyPass / http://localhost:{{ int }}/
1023+ ProxyPassReverse / http://localhost:{{ int }}/
1024+ ProxyPreserveHost on
1025+</VirtualHost>
1026+<Proxy *>
1027+ Order deny,allow
1028+ Allow from all
1029+</Proxy>
1030+<Location />
1031+ Order allow,deny
1032+ Allow from all
1033+</Location>
1034+{% endfor -%}
1035+{% endif -%}
1036
1037=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
1038--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000
1039+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2013-10-15 01:36:24 +0000
1040@@ -0,0 +1,23 @@
1041+{% if endpoints -%}
1042+{% for ext, int in endpoints -%}
1043+Listen {{ ext }}
1044+NameVirtualHost *:{{ ext }}
1045+<VirtualHost *:{{ ext }}>
1046+ ServerName {{ private_address }}
1047+ SSLEngine on
1048+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1049+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1050+ ProxyPass / http://localhost:{{ int }}/
1051+ ProxyPassReverse / http://localhost:{{ int }}/
1052+ ProxyPreserveHost on
1053+</VirtualHost>
1054+<Proxy *>
1055+ Order deny,allow
1056+ Allow from all
1057+</Proxy>
1058+<Location />
1059+ Order allow,deny
1060+ Allow from all
1061+</Location>
1062+{% endfor -%}
1063+{% endif -%}
1064
1065=== added file 'hooks/charmhelpers/contrib/openstack/templating.py'
1066--- hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
1067+++ hooks/charmhelpers/contrib/openstack/templating.py 2013-10-15 01:36:24 +0000
1068@@ -0,0 +1,280 @@
1069+import os
1070+
1071+from charmhelpers.fetch import apt_install
1072+
1073+from charmhelpers.core.hookenv import (
1074+ log,
1075+ ERROR,
1076+ INFO
1077+)
1078+
1079+from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1080+
1081+try:
1082+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
1083+except ImportError:
1084+ # python-jinja2 may not be installed yet, or we're running unittests.
1085+ FileSystemLoader = ChoiceLoader = Environment = exceptions = None
1086+
1087+
1088+class OSConfigException(Exception):
1089+ pass
1090+
1091+
1092+def get_loader(templates_dir, os_release):
1093+ """
1094+ Create a jinja2.ChoiceLoader containing template dirs up to
1095+ and including os_release. If directory template directory
1096+ is missing at templates_dir, it will be omitted from the loader.
1097+ templates_dir is added to the bottom of the search list as a base
1098+ loading dir.
1099+
1100+ A charm may also ship a templates dir with this module
1101+ and it will be appended to the bottom of the search list, eg:
1102+ hooks/charmhelpers/contrib/openstack/templates.
1103+
1104+ :param templates_dir: str: Base template directory containing release
1105+ sub-directories.
1106+ :param os_release : str: OpenStack release codename to construct template
1107+ loader.
1108+
1109+ :returns : jinja2.ChoiceLoader constructed with a list of
1110+ jinja2.FilesystemLoaders, ordered in descending
1111+ order by OpenStack release.
1112+ """
1113+ tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1114+ for rel in OPENSTACK_CODENAMES.itervalues()]
1115+
1116+ if not os.path.isdir(templates_dir):
1117+ log('Templates directory not found @ %s.' % templates_dir,
1118+ level=ERROR)
1119+ raise OSConfigException
1120+
1121+ # the bottom contains tempaltes_dir and possibly a common templates dir
1122+ # shipped with the helper.
1123+ loaders = [FileSystemLoader(templates_dir)]
1124+ helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
1125+ if os.path.isdir(helper_templates):
1126+ loaders.append(FileSystemLoader(helper_templates))
1127+
1128+ for rel, tmpl_dir in tmpl_dirs:
1129+ if os.path.isdir(tmpl_dir):
1130+ loaders.insert(0, FileSystemLoader(tmpl_dir))
1131+ if rel == os_release:
1132+ break
1133+ log('Creating choice loader with dirs: %s' %
1134+ [l.searchpath for l in loaders], level=INFO)
1135+ return ChoiceLoader(loaders)
1136+
1137+
1138+class OSConfigTemplate(object):
1139+ """
1140+ Associates a config file template with a list of context generators.
1141+ Responsible for constructing a template context based on those generators.
1142+ """
1143+ def __init__(self, config_file, contexts):
1144+ self.config_file = config_file
1145+
1146+ if hasattr(contexts, '__call__'):
1147+ self.contexts = [contexts]
1148+ else:
1149+ self.contexts = contexts
1150+
1151+ self._complete_contexts = []
1152+
1153+ def context(self):
1154+ ctxt = {}
1155+ for context in self.contexts:
1156+ _ctxt = context()
1157+ if _ctxt:
1158+ ctxt.update(_ctxt)
1159+ # track interfaces for every complete context.
1160+ [self._complete_contexts.append(interface)
1161+ for interface in context.interfaces
1162+ if interface not in self._complete_contexts]
1163+ return ctxt
1164+
1165+ def complete_contexts(self):
1166+ '''
1167+ Return a list of interfaces that have atisfied contexts.
1168+ '''
1169+ if self._complete_contexts:
1170+ return self._complete_contexts
1171+ self.context()
1172+ return self._complete_contexts
1173+
1174+
1175+class OSConfigRenderer(object):
1176+ """
1177+ This class provides a common templating system to be used by OpenStack
1178+ charms. It is intended to help charms share common code and templates,
1179+ and ease the burden of managing config templates across multiple OpenStack
1180+ releases.
1181+
1182+ Basic usage:
1183+ # import some common context generates from charmhelpers
1184+ from charmhelpers.contrib.openstack import context
1185+
1186+ # Create a renderer object for a specific OS release.
1187+ configs = OSConfigRenderer(templates_dir='/tmp/templates',
1188+ openstack_release='folsom')
1189+ # register some config files with context generators.
1190+ configs.register(config_file='/etc/nova/nova.conf',
1191+ contexts=[context.SharedDBContext(),
1192+ context.AMQPContext()])
1193+ configs.register(config_file='/etc/nova/api-paste.ini',
1194+ contexts=[context.IdentityServiceContext()])
1195+ configs.register(config_file='/etc/haproxy/haproxy.conf',
1196+ contexts=[context.HAProxyContext()])
1197+ # write out a single config
1198+ configs.write('/etc/nova/nova.conf')
1199+ # write out all registered configs
1200+ configs.write_all()
1201+
1202+ Details:
1203+
1204+ OpenStack Releases and template loading
1205+ ---------------------------------------
1206+ When the object is instantiated, it is associated with a specific OS
1207+ release. This dictates how the template loader will be constructed.
1208+
1209+ The constructed loader attempts to load the template from several places
1210+ in the following order:
1211+ - from the most recent OS release-specific template dir (if one exists)
1212+ - the base templates_dir
1213+ - a template directory shipped in the charm with this helper file.
1214+
1215+
1216+ For the example above, '/tmp/templates' contains the following structure:
1217+ /tmp/templates/nova.conf
1218+ /tmp/templates/api-paste.ini
1219+ /tmp/templates/grizzly/api-paste.ini
1220+ /tmp/templates/havana/api-paste.ini
1221+
1222+ Since it was registered with the grizzly release, it first seraches
1223+ the grizzly directory for nova.conf, then the templates dir.
1224+
1225+ When writing api-paste.ini, it will find the template in the grizzly
1226+ directory.
1227+
1228+ If the object were created with folsom, it would fall back to the
1229+ base templates dir for its api-paste.ini template.
1230+
1231+ This system should help manage changes in config files through
1232+ openstack releases, allowing charms to fall back to the most recently
1233+ updated config template for a given release
1234+
1235+ The haproxy.conf, since it is not shipped in the templates dir, will
1236+ be loaded from the module directory's template directory, eg
1237+ $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1238+ us to ship common templates (haproxy, apache) with the helpers.
1239+
1240+ Context generators
1241+ ---------------------------------------
1242+ Context generators are used to generate template contexts during hook
1243+ execution. Doing so may require inspecting service relations, charm
1244+ config, etc. When registered, a config file is associated with a list
1245+ of generators. When a template is rendered and written, all context
1246+ generates are called in a chain to generate the context dictionary
1247+ passed to the jinja2 template. See context.py for more info.
1248+ """
1249+ def __init__(self, templates_dir, openstack_release):
1250+ if not os.path.isdir(templates_dir):
1251+ log('Could not locate templates dir %s' % templates_dir,
1252+ level=ERROR)
1253+ raise OSConfigException
1254+
1255+ self.templates_dir = templates_dir
1256+ self.openstack_release = openstack_release
1257+ self.templates = {}
1258+ self._tmpl_env = None
1259+
1260+ if None in [Environment, ChoiceLoader, FileSystemLoader]:
1261+ # if this code is running, the object is created pre-install hook.
1262+ # jinja2 shouldn't get touched until the module is reloaded on next
1263+ # hook execution, with proper jinja2 bits successfully imported.
1264+ apt_install('python-jinja2')
1265+
1266+ def register(self, config_file, contexts):
1267+ """
1268+ Register a config file with a list of context generators to be called
1269+ during rendering.
1270+ """
1271+ self.templates[config_file] = OSConfigTemplate(config_file=config_file,
1272+ contexts=contexts)
1273+ log('Registered config file: %s' % config_file, level=INFO)
1274+
1275+ def _get_tmpl_env(self):
1276+ if not self._tmpl_env:
1277+ loader = get_loader(self.templates_dir, self.openstack_release)
1278+ self._tmpl_env = Environment(loader=loader)
1279+
1280+ def _get_template(self, template):
1281+ self._get_tmpl_env()
1282+ template = self._tmpl_env.get_template(template)
1283+ log('Loaded template from %s' % template.filename, level=INFO)
1284+ return template
1285+
1286+ def render(self, config_file):
1287+ if config_file not in self.templates:
1288+ log('Config not registered: %s' % config_file, level=ERROR)
1289+ raise OSConfigException
1290+ ctxt = self.templates[config_file].context()
1291+
1292+ _tmpl = os.path.basename(config_file)
1293+ try:
1294+ template = self._get_template(_tmpl)
1295+ except exceptions.TemplateNotFound:
1296+ # if no template is found with basename, try looking for it
1297+ # using a munged full path, eg:
1298+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
1299+ _tmpl = '_'.join(config_file.split('/')[1:])
1300+ try:
1301+ template = self._get_template(_tmpl)
1302+ except exceptions.TemplateNotFound as e:
1303+ log('Could not load template from %s by %s or %s.' %
1304+ (self.templates_dir, os.path.basename(config_file), _tmpl),
1305+ level=ERROR)
1306+ raise e
1307+
1308+ log('Rendering from template: %s' % _tmpl, level=INFO)
1309+ return template.render(ctxt)
1310+
1311+ def write(self, config_file):
1312+ """
1313+ Write a single config file, raises if config file is not registered.
1314+ """
1315+ if config_file not in self.templates:
1316+ log('Config not registered: %s' % config_file, level=ERROR)
1317+ raise OSConfigException
1318+
1319+ _out = self.render(config_file)
1320+
1321+ with open(config_file, 'wb') as out:
1322+ out.write(_out)
1323+
1324+ log('Wrote template %s.' % config_file, level=INFO)
1325+
1326+ def write_all(self):
1327+ """
1328+ Write out all registered config files.
1329+ """
1330+ [self.write(k) for k in self.templates.iterkeys()]
1331+
1332+ def set_release(self, openstack_release):
1333+ """
1334+ Resets the template environment and generates a new template loader
1335+ based on a the new openstack release.
1336+ """
1337+ self._tmpl_env = None
1338+ self.openstack_release = openstack_release
1339+ self._get_tmpl_env()
1340+
1341+ def complete_contexts(self):
1342+ '''
1343+ Returns a list of context interfaces that yield a complete context.
1344+ '''
1345+ interfaces = []
1346+ [interfaces.extend(i.complete_contexts())
1347+ for i in self.templates.itervalues()]
1348+ return interfaces
1349
1350=== added file 'hooks/charmhelpers/contrib/openstack/utils.py'
1351--- hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
1352+++ hooks/charmhelpers/contrib/openstack/utils.py 2013-10-15 01:36:24 +0000
1353@@ -0,0 +1,365 @@
1354+#!/usr/bin/python
1355+
1356+# Common python helper functions used for OpenStack charms.
1357+from collections import OrderedDict
1358+
1359+import apt_pkg as apt
1360+import subprocess
1361+import os
1362+import socket
1363+import sys
1364+
1365+from charmhelpers.core.hookenv import (
1366+ config,
1367+ log as juju_log,
1368+ charm_dir,
1369+)
1370+
1371+from charmhelpers.core.host import (
1372+ lsb_release,
1373+)
1374+
1375+from charmhelpers.fetch import (
1376+ apt_install,
1377+)
1378+
1379+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
1380+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
1381+
1382+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
1383+ ('oneiric', 'diablo'),
1384+ ('precise', 'essex'),
1385+ ('quantal', 'folsom'),
1386+ ('raring', 'grizzly'),
1387+ ('saucy', 'havana'),
1388+])
1389+
1390+
1391+OPENSTACK_CODENAMES = OrderedDict([
1392+ ('2011.2', 'diablo'),
1393+ ('2012.1', 'essex'),
1394+ ('2012.2', 'folsom'),
1395+ ('2013.1', 'grizzly'),
1396+ ('2013.2', 'havana'),
1397+ ('2014.1', 'icehouse'),
1398+])
1399+
1400+# The ugly duckling
1401+SWIFT_CODENAMES = OrderedDict([
1402+ ('1.4.3', 'diablo'),
1403+ ('1.4.8', 'essex'),
1404+ ('1.7.4', 'folsom'),
1405+ ('1.8.0', 'grizzly'),
1406+ ('1.7.7', 'grizzly'),
1407+ ('1.7.6', 'grizzly'),
1408+ ('1.10.0', 'havana'),
1409+ ('1.9.1', 'havana'),
1410+ ('1.9.0', 'havana'),
1411+])
1412+
1413+
1414+def error_out(msg):
1415+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
1416+ sys.exit(1)
1417+
1418+
1419+def get_os_codename_install_source(src):
1420+ '''Derive OpenStack release codename from a given installation source.'''
1421+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1422+ rel = ''
1423+ if src == 'distro':
1424+ try:
1425+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1426+ except KeyError:
1427+ e = 'Could not derive openstack release for '\
1428+ 'this Ubuntu release: %s' % ubuntu_rel
1429+ error_out(e)
1430+ return rel
1431+
1432+ if src.startswith('cloud:'):
1433+ ca_rel = src.split(':')[1]
1434+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
1435+ return ca_rel
1436+
1437+ # Best guess match based on deb string provided
1438+ if src.startswith('deb') or src.startswith('ppa'):
1439+ for k, v in OPENSTACK_CODENAMES.iteritems():
1440+ if v in src:
1441+ return v
1442+
1443+
1444+def get_os_version_install_source(src):
1445+ codename = get_os_codename_install_source(src)
1446+ return get_os_version_codename(codename)
1447+
1448+
1449+def get_os_codename_version(vers):
1450+ '''Determine OpenStack codename from version number.'''
1451+ try:
1452+ return OPENSTACK_CODENAMES[vers]
1453+ except KeyError:
1454+ e = 'Could not determine OpenStack codename for version %s' % vers
1455+ error_out(e)
1456+
1457+
1458+def get_os_version_codename(codename):
1459+ '''Determine OpenStack version number from codename.'''
1460+ for k, v in OPENSTACK_CODENAMES.iteritems():
1461+ if v == codename:
1462+ return k
1463+ e = 'Could not derive OpenStack version for '\
1464+ 'codename: %s' % codename
1465+ error_out(e)
1466+
1467+
1468+def get_os_codename_package(package, fatal=True):
1469+ '''Derive OpenStack release codename from an installed package.'''
1470+ apt.init()
1471+ cache = apt.Cache()
1472+
1473+ try:
1474+ pkg = cache[package]
1475+ except:
1476+ if not fatal:
1477+ return None
1478+ # the package is unknown to the current apt cache.
1479+ e = 'Could not determine version of package with no installation '\
1480+ 'candidate: %s' % package
1481+ error_out(e)
1482+
1483+ if not pkg.current_ver:
1484+ if not fatal:
1485+ return None
1486+ # package is known, but no version is currently installed.
1487+ e = 'Could not determine version of uninstalled package: %s' % package
1488+ error_out(e)
1489+
1490+ vers = apt.upstream_version(pkg.current_ver.ver_str)
1491+
1492+ try:
1493+ if 'swift' in pkg.name:
1494+ swift_vers = vers[:5]
1495+ if swift_vers not in SWIFT_CODENAMES:
1496+ # Deal with 1.10.0 upward
1497+ swift_vers = vers[:6]
1498+ return SWIFT_CODENAMES[swift_vers]
1499+ else:
1500+ vers = vers[:6]
1501+ return OPENSTACK_CODENAMES[vers]
1502+ except KeyError:
1503+ e = 'Could not determine OpenStack codename for version %s' % vers
1504+ error_out(e)
1505+
1506+
1507+def get_os_version_package(pkg, fatal=True):
1508+ '''Derive OpenStack version number from an installed package.'''
1509+ codename = get_os_codename_package(pkg, fatal=fatal)
1510+
1511+ if not codename:
1512+ return None
1513+
1514+ if 'swift' in pkg:
1515+ vers_map = SWIFT_CODENAMES
1516+ else:
1517+ vers_map = OPENSTACK_CODENAMES
1518+
1519+ for version, cname in vers_map.iteritems():
1520+ if cname == codename:
1521+ return version
1522+ #e = "Could not determine OpenStack version for package: %s" % pkg
1523+ #error_out(e)
1524+
1525+
1526+os_rel = None
1527+
1528+
1529+def os_release(package, base='essex'):
1530+ '''
1531+ Returns OpenStack release codename from a cached global.
1532+ If the codename can not be determined from either an installed package or
1533+ the installation source, the earliest release supported by the charm should
1534+ be returned.
1535+ '''
1536+ global os_rel
1537+ if os_rel:
1538+ return os_rel
1539+ os_rel = (get_os_codename_package(package, fatal=False) or
1540+ get_os_codename_install_source(config('openstack-origin')) or
1541+ base)
1542+ return os_rel
1543+
1544+
1545+def import_key(keyid):
1546+ cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
1547+ "--recv-keys %s" % keyid
1548+ try:
1549+ subprocess.check_call(cmd.split(' '))
1550+ except subprocess.CalledProcessError:
1551+ error_out("Error importing repo key %s" % keyid)
1552+
1553+
1554+def configure_installation_source(rel):
1555+ '''Configure apt installation source.'''
1556+ if rel == 'distro':
1557+ return
1558+ elif rel[:4] == "ppa:":
1559+ src = rel
1560+ subprocess.check_call(["add-apt-repository", "-y", src])
1561+ elif rel[:3] == "deb":
1562+ l = len(rel.split('|'))
1563+ if l == 2:
1564+ src, key = rel.split('|')
1565+ juju_log("Importing PPA key from keyserver for %s" % src)
1566+ import_key(key)
1567+ elif l == 1:
1568+ src = rel
1569+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
1570+ f.write(src)
1571+ elif rel[:6] == 'cloud:':
1572+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1573+ rel = rel.split(':')[1]
1574+ u_rel = rel.split('-')[0]
1575+ ca_rel = rel.split('-')[1]
1576+
1577+ if u_rel != ubuntu_rel:
1578+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
1579+ 'version (%s)' % (ca_rel, ubuntu_rel)
1580+ error_out(e)
1581+
1582+ if 'staging' in ca_rel:
1583+ # staging is just a regular PPA.
1584+ os_rel = ca_rel.split('/')[0]
1585+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
1586+ cmd = 'add-apt-repository -y %s' % ppa
1587+ subprocess.check_call(cmd.split(' '))
1588+ return
1589+
1590+ # map charm config options to actual archive pockets.
1591+ pockets = {
1592+ 'folsom': 'precise-updates/folsom',
1593+ 'folsom/updates': 'precise-updates/folsom',
1594+ 'folsom/proposed': 'precise-proposed/folsom',
1595+ 'grizzly': 'precise-updates/grizzly',
1596+ 'grizzly/updates': 'precise-updates/grizzly',
1597+ 'grizzly/proposed': 'precise-proposed/grizzly',
1598+ 'havana': 'precise-updates/havana',
1599+ 'havana/updates': 'precise-updates/havana',
1600+ 'havana/proposed': 'precise-proposed/havana',
1601+ }
1602+
1603+ try:
1604+ pocket = pockets[ca_rel]
1605+ except KeyError:
1606+ e = 'Invalid Cloud Archive release specified: %s' % rel
1607+ error_out(e)
1608+
1609+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
1610+ apt_install('ubuntu-cloud-keyring', fatal=True)
1611+
1612+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
1613+ f.write(src)
1614+ else:
1615+ error_out("Invalid openstack-release specified: %s" % rel)
1616+
1617+
1618+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
1619+ """
1620+ Write an rc file in the charm-delivered directory containing
1621+ exported environment variables provided by env_vars. Any charm scripts run
1622+ outside the juju hook environment can source this scriptrc to obtain
1623+ updated config information necessary to perform health checks or
1624+ service changes.
1625+ """
1626+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
1627+ if not os.path.exists(os.path.dirname(juju_rc_path)):
1628+ os.mkdir(os.path.dirname(juju_rc_path))
1629+ with open(juju_rc_path, 'wb') as rc_script:
1630+ rc_script.write(
1631+ "#!/bin/bash\n")
1632+ [rc_script.write('export %s=%s\n' % (u, p))
1633+ for u, p in env_vars.iteritems() if u != "script_path"]
1634+
1635+
1636+def openstack_upgrade_available(package):
1637+ """
1638+ Determines if an OpenStack upgrade is available from installation
1639+ source, based on version of installed package.
1640+
1641+ :param package: str: Name of installed package.
1642+
1643+ :returns: bool: : Returns True if configured installation source offers
1644+ a newer version of package.
1645+
1646+ """
1647+
1648+ src = config('openstack-origin')
1649+ cur_vers = get_os_version_package(package)
1650+ available_vers = get_os_version_install_source(src)
1651+ apt.init()
1652+ return apt.version_compare(available_vers, cur_vers) == 1
1653+
1654+
1655+def is_ip(address):
1656+ """
1657+ Returns True if address is a valid IP address.
1658+ """
1659+ try:
1660+ # Test to see if already an IPv4 address
1661+ socket.inet_aton(address)
1662+ return True
1663+ except socket.error:
1664+ return False
1665+
1666+
1667+def ns_query(address):
1668+ try:
1669+ import dns.resolver
1670+ except ImportError:
1671+ apt_install('python-dnspython')
1672+ import dns.resolver
1673+
1674+ if isinstance(address, dns.name.Name):
1675+ rtype = 'PTR'
1676+ elif isinstance(address, basestring):
1677+ rtype = 'A'
1678+
1679+ answers = dns.resolver.query(address, rtype)
1680+ if answers:
1681+ return str(answers[0])
1682+ return None
1683+
1684+
1685+def get_host_ip(hostname):
1686+ """
1687+ Resolves the IP for a given hostname, or returns
1688+ the input if it is already an IP.
1689+ """
1690+ if is_ip(hostname):
1691+ return hostname
1692+
1693+ return ns_query(hostname)
1694+
1695+
1696+def get_hostname(address):
1697+ """
1698+ Resolves hostname for given IP, or returns the input
1699+ if it is already a hostname.
1700+ """
1701+ if not is_ip(address):
1702+ return address
1703+
1704+ try:
1705+ import dns.reversename
1706+ except ImportError:
1707+ apt_install('python-dnspython')
1708+ import dns.reversename
1709+
1710+ rev = dns.reversename.from_address(address)
1711+ result = ns_query(rev)
1712+ if not result:
1713+ return None
1714+
1715+ # strip trailing .
1716+ if result.endswith('.'):
1717+ return result[:-1]
1718+ return result
1719
1720=== added directory 'hooks/charmhelpers/core'
1721=== added file 'hooks/charmhelpers/core/__init__.py'
1722=== added file 'hooks/charmhelpers/core/hookenv.py'
1723--- hooks/charmhelpers/core/hookenv.py 1970-01-01 00:00:00 +0000
1724+++ hooks/charmhelpers/core/hookenv.py 2013-10-15 01:36:24 +0000
1725@@ -0,0 +1,340 @@
1726+"Interactions with the Juju environment"
1727+# Copyright 2013 Canonical Ltd.
1728+#
1729+# Authors:
1730+# Charm Helpers Developers <juju@lists.ubuntu.com>
1731+
1732+import os
1733+import json
1734+import yaml
1735+import subprocess
1736+import UserDict
1737+
1738+CRITICAL = "CRITICAL"
1739+ERROR = "ERROR"
1740+WARNING = "WARNING"
1741+INFO = "INFO"
1742+DEBUG = "DEBUG"
1743+MARKER = object()
1744+
1745+cache = {}
1746+
1747+
1748+def cached(func):
1749+ ''' Cache return values for multiple executions of func + args
1750+
1751+ For example:
1752+
1753+ @cached
1754+ def unit_get(attribute):
1755+ pass
1756+
1757+ unit_get('test')
1758+
1759+ will cache the result of unit_get + 'test' for future calls.
1760+ '''
1761+ def wrapper(*args, **kwargs):
1762+ global cache
1763+ key = str((func, args, kwargs))
1764+ try:
1765+ return cache[key]
1766+ except KeyError:
1767+ res = func(*args, **kwargs)
1768+ cache[key] = res
1769+ return res
1770+ return wrapper
1771+
1772+
1773+def flush(key):
1774+ ''' Flushes any entries from function cache where the
1775+ key is found in the function+args '''
1776+ flush_list = []
1777+ for item in cache:
1778+ if key in item:
1779+ flush_list.append(item)
1780+ for item in flush_list:
1781+ del cache[item]
1782+
1783+
1784+def log(message, level=None):
1785+ "Write a message to the juju log"
1786+ command = ['juju-log']
1787+ if level:
1788+ command += ['-l', level]
1789+ command += [message]
1790+ subprocess.call(command)
1791+
1792+
1793+class Serializable(UserDict.IterableUserDict):
1794+ "Wrapper, an object that can be serialized to yaml or json"
1795+
1796+ def __init__(self, obj):
1797+ # wrap the object
1798+ UserDict.IterableUserDict.__init__(self)
1799+ self.data = obj
1800+
1801+ def __getattr__(self, attr):
1802+ # See if this object has attribute.
1803+ if attr in ("json", "yaml", "data"):
1804+ return self.__dict__[attr]
1805+ # Check for attribute in wrapped object.
1806+ got = getattr(self.data, attr, MARKER)
1807+ if got is not MARKER:
1808+ return got
1809+ # Proxy to the wrapped object via dict interface.
1810+ try:
1811+ return self.data[attr]
1812+ except KeyError:
1813+ raise AttributeError(attr)
1814+
1815+ def __getstate__(self):
1816+ # Pickle as a standard dictionary.
1817+ return self.data
1818+
1819+ def __setstate__(self, state):
1820+ # Unpickle into our wrapper.
1821+ self.data = state
1822+
1823+ def json(self):
1824+ "Serialize the object to json"
1825+ return json.dumps(self.data)
1826+
1827+ def yaml(self):
1828+ "Serialize the object to yaml"
1829+ return yaml.dump(self.data)
1830+
1831+
1832+def execution_environment():
1833+ """A convenient bundling of the current execution context"""
1834+ context = {}
1835+ context['conf'] = config()
1836+ if relation_id():
1837+ context['reltype'] = relation_type()
1838+ context['relid'] = relation_id()
1839+ context['rel'] = relation_get()
1840+ context['unit'] = local_unit()
1841+ context['rels'] = relations()
1842+ context['env'] = os.environ
1843+ return context
1844+
1845+
1846+def in_relation_hook():
1847+ "Determine whether we're running in a relation hook"
1848+ return 'JUJU_RELATION' in os.environ
1849+
1850+
1851+def relation_type():
1852+ "The scope for the current relation hook"
1853+ return os.environ.get('JUJU_RELATION', None)
1854+
1855+
1856+def relation_id():
1857+ "The relation ID for the current relation hook"
1858+ return os.environ.get('JUJU_RELATION_ID', None)
1859+
1860+
1861+def local_unit():
1862+ "Local unit ID"
1863+ return os.environ['JUJU_UNIT_NAME']
1864+
1865+
1866+def remote_unit():
1867+ "The remote unit for the current relation hook"
1868+ return os.environ['JUJU_REMOTE_UNIT']
1869+
1870+
1871+def service_name():
1872+ "The name service group this unit belongs to"
1873+ return local_unit().split('/')[0]
1874+
1875+
1876+@cached
1877+def config(scope=None):
1878+ "Juju charm configuration"
1879+ config_cmd_line = ['config-get']
1880+ if scope is not None:
1881+ config_cmd_line.append(scope)
1882+ config_cmd_line.append('--format=json')
1883+ try:
1884+ return json.loads(subprocess.check_output(config_cmd_line))
1885+ except ValueError:
1886+ return None
1887+
1888+
1889+@cached
1890+def relation_get(attribute=None, unit=None, rid=None):
1891+ _args = ['relation-get', '--format=json']
1892+ if rid:
1893+ _args.append('-r')
1894+ _args.append(rid)
1895+ _args.append(attribute or '-')
1896+ if unit:
1897+ _args.append(unit)
1898+ try:
1899+ return json.loads(subprocess.check_output(_args))
1900+ except ValueError:
1901+ return None
1902+
1903+
1904+def relation_set(relation_id=None, relation_settings={}, **kwargs):
1905+ relation_cmd_line = ['relation-set']
1906+ if relation_id is not None:
1907+ relation_cmd_line.extend(('-r', relation_id))
1908+ for k, v in (relation_settings.items() + kwargs.items()):
1909+ if v is None:
1910+ relation_cmd_line.append('{}='.format(k))
1911+ else:
1912+ relation_cmd_line.append('{}={}'.format(k, v))
1913+ subprocess.check_call(relation_cmd_line)
1914+ # Flush cache of any relation-gets for local unit
1915+ flush(local_unit())
1916+
1917+
1918+@cached
1919+def relation_ids(reltype=None):
1920+ "A list of relation_ids"
1921+ reltype = reltype or relation_type()
1922+ relid_cmd_line = ['relation-ids', '--format=json']
1923+ if reltype is not None:
1924+ relid_cmd_line.append(reltype)
1925+ return json.loads(subprocess.check_output(relid_cmd_line)) or []
1926+ return []
1927+
1928+
1929+@cached
1930+def related_units(relid=None):
1931+ "A list of related units"
1932+ relid = relid or relation_id()
1933+ units_cmd_line = ['relation-list', '--format=json']
1934+ if relid is not None:
1935+ units_cmd_line.extend(('-r', relid))
1936+ return json.loads(subprocess.check_output(units_cmd_line)) or []
1937+
1938+
1939+@cached
1940+def relation_for_unit(unit=None, rid=None):
1941+ "Get the json represenation of a unit's relation"
1942+ unit = unit or remote_unit()
1943+ relation = relation_get(unit=unit, rid=rid)
1944+ for key in relation:
1945+ if key.endswith('-list'):
1946+ relation[key] = relation[key].split()
1947+ relation['__unit__'] = unit
1948+ return relation
1949+
1950+
1951+@cached
1952+def relations_for_id(relid=None):
1953+ "Get relations of a specific relation ID"
1954+ relation_data = []
1955+ relid = relid or relation_ids()
1956+ for unit in related_units(relid):
1957+ unit_data = relation_for_unit(unit, relid)
1958+ unit_data['__relid__'] = relid
1959+ relation_data.append(unit_data)
1960+ return relation_data
1961+
1962+
1963+@cached
1964+def relations_of_type(reltype=None):
1965+ "Get relations of a specific type"
1966+ relation_data = []
1967+ reltype = reltype or relation_type()
1968+ for relid in relation_ids(reltype):
1969+ for relation in relations_for_id(relid):
1970+ relation['__relid__'] = relid
1971+ relation_data.append(relation)
1972+ return relation_data
1973+
1974+
1975+@cached
1976+def relation_types():
1977+ "Get a list of relation types supported by this charm"
1978+ charmdir = os.environ.get('CHARM_DIR', '')
1979+ mdf = open(os.path.join(charmdir, 'metadata.yaml'))
1980+ md = yaml.safe_load(mdf)
1981+ rel_types = []
1982+ for key in ('provides', 'requires', 'peers'):
1983+ section = md.get(key)
1984+ if section:
1985+ rel_types.extend(section.keys())
1986+ mdf.close()
1987+ return rel_types
1988+
1989+
1990+@cached
1991+def relations():
1992+ rels = {}
1993+ for reltype in relation_types():
1994+ relids = {}
1995+ for relid in relation_ids(reltype):
1996+ units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
1997+ for unit in related_units(relid):
1998+ reldata = relation_get(unit=unit, rid=relid)
1999+ units[unit] = reldata
2000+ relids[relid] = units
2001+ rels[reltype] = relids
2002+ return rels
2003+
2004+
2005+def open_port(port, protocol="TCP"):
2006+ "Open a service network port"
2007+ _args = ['open-port']
2008+ _args.append('{}/{}'.format(port, protocol))
2009+ subprocess.check_call(_args)
2010+
2011+
2012+def close_port(port, protocol="TCP"):
2013+ "Close a service network port"
2014+ _args = ['close-port']
2015+ _args.append('{}/{}'.format(port, protocol))
2016+ subprocess.check_call(_args)
2017+
2018+
2019+@cached
2020+def unit_get(attribute):
2021+ _args = ['unit-get', '--format=json', attribute]
2022+ try:
2023+ return json.loads(subprocess.check_output(_args))
2024+ except ValueError:
2025+ return None
2026+
2027+
2028+def unit_private_ip():
2029+ return unit_get('private-address')
2030+
2031+
2032+class UnregisteredHookError(Exception):
2033+ pass
2034+
2035+
2036+class Hooks(object):
2037+ def __init__(self):
2038+ super(Hooks, self).__init__()
2039+ self._hooks = {}
2040+
2041+ def register(self, name, function):
2042+ self._hooks[name] = function
2043+
2044+ def execute(self, args):
2045+ hook_name = os.path.basename(args[0])
2046+ if hook_name in self._hooks:
2047+ self._hooks[hook_name]()
2048+ else:
2049+ raise UnregisteredHookError(hook_name)
2050+
2051+ def hook(self, *hook_names):
2052+ def wrapper(decorated):
2053+ for hook_name in hook_names:
2054+ self.register(hook_name, decorated)
2055+ else:
2056+ self.register(decorated.__name__, decorated)
2057+ if '_' in decorated.__name__:
2058+ self.register(
2059+ decorated.__name__.replace('_', '-'), decorated)
2060+ return decorated
2061+ return wrapper
2062+
2063+
2064+def charm_dir():
2065+ return os.environ.get('CHARM_DIR')
2066
2067=== added file 'hooks/charmhelpers/core/host.py'
2068--- hooks/charmhelpers/core/host.py 1970-01-01 00:00:00 +0000
2069+++ hooks/charmhelpers/core/host.py 2013-10-15 01:36:24 +0000
2070@@ -0,0 +1,241 @@
2071+"""Tools for working with the host system"""
2072+# Copyright 2012 Canonical Ltd.
2073+#
2074+# Authors:
2075+# Nick Moffitt <nick.moffitt@canonical.com>
2076+# Matthew Wedgwood <matthew.wedgwood@canonical.com>
2077+
2078+import os
2079+import pwd
2080+import grp
2081+import random
2082+import string
2083+import subprocess
2084+import hashlib
2085+
2086+from collections import OrderedDict
2087+
2088+from hookenv import log
2089+
2090+
2091+def service_start(service_name):
2092+ return service('start', service_name)
2093+
2094+
2095+def service_stop(service_name):
2096+ return service('stop', service_name)
2097+
2098+
2099+def service_restart(service_name):
2100+ return service('restart', service_name)
2101+
2102+
2103+def service_reload(service_name, restart_on_failure=False):
2104+ service_result = service('reload', service_name)
2105+ if not service_result and restart_on_failure:
2106+ service_result = service('restart', service_name)
2107+ return service_result
2108+
2109+
2110+def service(action, service_name):
2111+ cmd = ['service', service_name, action]
2112+ return subprocess.call(cmd) == 0
2113+
2114+
2115+def service_running(service):
2116+ try:
2117+ output = subprocess.check_output(['service', service, 'status'])
2118+ except subprocess.CalledProcessError:
2119+ return False
2120+ else:
2121+ if ("start/running" in output or "is running" in output):
2122+ return True
2123+ else:
2124+ return False
2125+
2126+
2127+def adduser(username, password=None, shell='/bin/bash', system_user=False):
2128+ """Add a user"""
2129+ try:
2130+ user_info = pwd.getpwnam(username)
2131+ log('user {0} already exists!'.format(username))
2132+ except KeyError:
2133+ log('creating user {0}'.format(username))
2134+ cmd = ['useradd']
2135+ if system_user or password is None:
2136+ cmd.append('--system')
2137+ else:
2138+ cmd.extend([
2139+ '--create-home',
2140+ '--shell', shell,
2141+ '--password', password,
2142+ ])
2143+ cmd.append(username)
2144+ subprocess.check_call(cmd)
2145+ user_info = pwd.getpwnam(username)
2146+ return user_info
2147+
2148+
2149+def add_user_to_group(username, group):
2150+ """Add a user to a group"""
2151+ cmd = [
2152+ 'gpasswd', '-a',
2153+ username,
2154+ group
2155+ ]
2156+ log("Adding user {} to group {}".format(username, group))
2157+ subprocess.check_call(cmd)
2158+
2159+
2160+def rsync(from_path, to_path, flags='-r', options=None):
2161+ """Replicate the contents of a path"""
2162+ options = options or ['--delete', '--executability']
2163+ cmd = ['/usr/bin/rsync', flags]
2164+ cmd.extend(options)
2165+ cmd.append(from_path)
2166+ cmd.append(to_path)
2167+ log(" ".join(cmd))
2168+ return subprocess.check_output(cmd).strip()
2169+
2170+
2171+def symlink(source, destination):
2172+ """Create a symbolic link"""
2173+ log("Symlinking {} as {}".format(source, destination))
2174+ cmd = [
2175+ 'ln',
2176+ '-sf',
2177+ source,
2178+ destination,
2179+ ]
2180+ subprocess.check_call(cmd)
2181+
2182+
2183+def mkdir(path, owner='root', group='root', perms=0555, force=False):
2184+ """Create a directory"""
2185+ log("Making dir {} {}:{} {:o}".format(path, owner, group,
2186+ perms))
2187+ uid = pwd.getpwnam(owner).pw_uid
2188+ gid = grp.getgrnam(group).gr_gid
2189+ realpath = os.path.abspath(path)
2190+ if os.path.exists(realpath):
2191+ if force and not os.path.isdir(realpath):
2192+ log("Removing non-directory file {} prior to mkdir()".format(path))
2193+ os.unlink(realpath)
2194+ else:
2195+ os.makedirs(realpath, perms)
2196+ os.chown(realpath, uid, gid)
2197+
2198+
2199+def write_file(path, content, owner='root', group='root', perms=0444):
2200+ """Create or overwrite a file with the contents of a string"""
2201+ log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
2202+ uid = pwd.getpwnam(owner).pw_uid
2203+ gid = grp.getgrnam(group).gr_gid
2204+ with open(path, 'w') as target:
2205+ os.fchown(target.fileno(), uid, gid)
2206+ os.fchmod(target.fileno(), perms)
2207+ target.write(content)
2208+
2209+
2210+def mount(device, mountpoint, options=None, persist=False):
2211+ '''Mount a filesystem'''
2212+ cmd_args = ['mount']
2213+ if options is not None:
2214+ cmd_args.extend(['-o', options])
2215+ cmd_args.extend([device, mountpoint])
2216+ try:
2217+ subprocess.check_output(cmd_args)
2218+ except subprocess.CalledProcessError, e:
2219+ log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
2220+ return False
2221+ if persist:
2222+ # TODO: update fstab
2223+ pass
2224+ return True
2225+
2226+
2227+def umount(mountpoint, persist=False):
2228+ '''Unmount a filesystem'''
2229+ cmd_args = ['umount', mountpoint]
2230+ try:
2231+ subprocess.check_output(cmd_args)
2232+ except subprocess.CalledProcessError, e:
2233+ log('Error unmounting {}\n{}'.format(mountpoint, e.output))
2234+ return False
2235+ if persist:
2236+ # TODO: update fstab
2237+ pass
2238+ return True
2239+
2240+
2241+def mounts():
2242+ '''List of all mounted volumes as [[mountpoint,device],[...]]'''
2243+ with open('/proc/mounts') as f:
2244+ # [['/mount/point','/dev/path'],[...]]
2245+ system_mounts = [m[1::-1] for m in [l.strip().split()
2246+ for l in f.readlines()]]
2247+ return system_mounts
2248+
2249+
2250+def file_hash(path):
2251+ ''' Generate a md5 hash of the contents of 'path' or None if not found '''
2252+ if os.path.exists(path):
2253+ h = hashlib.md5()
2254+ with open(path, 'r') as source:
2255+ h.update(source.read()) # IGNORE:E1101 - it does have update
2256+ return h.hexdigest()
2257+ else:
2258+ return None
2259+
2260+
2261+def restart_on_change(restart_map):
2262+ ''' Restart services based on configuration files changing
2263+
2264+ This function is used a decorator, for example
2265+
2266+ @restart_on_change({
2267+ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
2268+ })
2269+ def ceph_client_changed():
2270+ ...
2271+
2272+ In this example, the cinder-api and cinder-volume services
2273+ would be restarted if /etc/ceph/ceph.conf is changed by the
2274+ ceph_client_changed function.
2275+ '''
2276+ def wrap(f):
2277+ def wrapped_f(*args):
2278+ checksums = {}
2279+ for path in restart_map:
2280+ checksums[path] = file_hash(path)
2281+ f(*args)
2282+ restarts = []
2283+ for path in restart_map:
2284+ if checksums[path] != file_hash(path):
2285+ restarts += restart_map[path]
2286+ for service_name in list(OrderedDict.fromkeys(restarts)):
2287+ service('restart', service_name)
2288+ return wrapped_f
2289+ return wrap
2290+
2291+
2292+def lsb_release():
2293+ '''Return /etc/lsb-release in a dict'''
2294+ d = {}
2295+ with open('/etc/lsb-release', 'r') as lsb:
2296+ for l in lsb:
2297+ k, v = l.split('=')
2298+ d[k.strip()] = v.strip()
2299+ return d
2300+
2301+
2302+def pwgen(length=None):
2303+ '''Generate a random pasword.'''
2304+ if length is None:
2305+ length = random.choice(range(35, 45))
2306+ alphanumeric_chars = [
2307+ l for l in (string.letters + string.digits)
2308+ if l not in 'l0QD1vAEIOUaeiou']
2309+ random_chars = [
2310+ random.choice(alphanumeric_chars) for _ in range(length)]
2311+ return(''.join(random_chars))
2312
2313=== added directory 'hooks/charmhelpers/fetch'
2314=== added file 'hooks/charmhelpers/fetch/__init__.py'
2315--- hooks/charmhelpers/fetch/__init__.py 1970-01-01 00:00:00 +0000
2316+++ hooks/charmhelpers/fetch/__init__.py 2013-10-15 01:36:24 +0000
2317@@ -0,0 +1,209 @@
2318+import importlib
2319+from yaml import safe_load
2320+from charmhelpers.core.host import (
2321+ lsb_release
2322+)
2323+from urlparse import (
2324+ urlparse,
2325+ urlunparse,
2326+)
2327+import subprocess
2328+from charmhelpers.core.hookenv import (
2329+ config,
2330+ log,
2331+)
2332+import apt_pkg
2333+
2334+CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
2335+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
2336+"""
2337+PROPOSED_POCKET = """# Proposed
2338+deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
2339+"""
2340+
2341+
2342+def filter_installed_packages(packages):
2343+ """Returns a list of packages that require installation"""
2344+ apt_pkg.init()
2345+ cache = apt_pkg.Cache()
2346+ _pkgs = []
2347+ for package in packages:
2348+ try:
2349+ p = cache[package]
2350+ p.current_ver or _pkgs.append(package)
2351+ except KeyError:
2352+ log('Package {} has no installation candidate.'.format(package),
2353+ level='WARNING')
2354+ _pkgs.append(package)
2355+ return _pkgs
2356+
2357+
2358+def apt_install(packages, options=None, fatal=False):
2359+ """Install one or more packages"""
2360+ options = options or []
2361+ cmd = ['apt-get', '-y']
2362+ cmd.extend(options)
2363+ cmd.append('install')
2364+ if isinstance(packages, basestring):
2365+ cmd.append(packages)
2366+ else:
2367+ cmd.extend(packages)
2368+ log("Installing {} with options: {}".format(packages,
2369+ options))
2370+ if fatal:
2371+ subprocess.check_call(cmd)
2372+ else:
2373+ subprocess.call(cmd)
2374+
2375+
2376+def apt_update(fatal=False):
2377+ """Update local apt cache"""
2378+ cmd = ['apt-get', 'update']
2379+ if fatal:
2380+ subprocess.check_call(cmd)
2381+ else:
2382+ subprocess.call(cmd)
2383+
2384+
2385+def apt_purge(packages, fatal=False):
2386+ """Purge one or more packages"""
2387+ cmd = ['apt-get', '-y', 'purge']
2388+ if isinstance(packages, basestring):
2389+ cmd.append(packages)
2390+ else:
2391+ cmd.extend(packages)
2392+ log("Purging {}".format(packages))
2393+ if fatal:
2394+ subprocess.check_call(cmd)
2395+ else:
2396+ subprocess.call(cmd)
2397+
2398+
2399+def add_source(source, key=None):
2400+ if ((source.startswith('ppa:') or
2401+ source.startswith('http:'))):
2402+ subprocess.check_call(['add-apt-repository', '--yes', source])
2403+ elif source.startswith('cloud:'):
2404+ apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
2405+ fatal=True)
2406+ pocket = source.split(':')[-1]
2407+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
2408+ apt.write(CLOUD_ARCHIVE.format(pocket))
2409+ elif source == 'proposed':
2410+ release = lsb_release()['DISTRIB_CODENAME']
2411+ with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
2412+ apt.write(PROPOSED_POCKET.format(release))
2413+ if key:
2414+ subprocess.check_call(['apt-key', 'import', key])
2415+
2416+
2417+class SourceConfigError(Exception):
2418+ pass
2419+
2420+
2421+def configure_sources(update=False,
2422+ sources_var='install_sources',
2423+ keys_var='install_keys'):
2424+ """
2425+ Configure multiple sources from charm configuration
2426+
2427+ Example config:
2428+ install_sources:
2429+ - "ppa:foo"
2430+ - "http://example.com/repo precise main"
2431+ install_keys:
2432+ - null
2433+ - "a1b2c3d4"
2434+
2435+ Note that 'null' (a.k.a. None) should not be quoted.
2436+ """
2437+ sources = safe_load(config(sources_var))
2438+ keys = safe_load(config(keys_var))
2439+ if isinstance(sources, basestring) and isinstance(keys, basestring):
2440+ add_source(sources, keys)
2441+ else:
2442+ if not len(sources) == len(keys):
2443+ msg = 'Install sources and keys lists are different lengths'
2444+ raise SourceConfigError(msg)
2445+ for src_num in range(len(sources)):
2446+ add_source(sources[src_num], keys[src_num])
2447+ if update:
2448+ apt_update(fatal=True)
2449+
2450+# The order of this list is very important. Handlers should be listed in from
2451+# least- to most-specific URL matching.
2452+FETCH_HANDLERS = (
2453+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
2454+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
2455+)
2456+
2457+
2458+class UnhandledSource(Exception):
2459+ pass
2460+
2461+
2462+def install_remote(source):
2463+ """
2464+ Install a file tree from a remote source
2465+
2466+ The specified source should be a url of the form:
2467+ scheme://[host]/path[#[option=value][&...]]
2468+
2469+ Schemes supported are based on this modules submodules
2470+ Options supported are submodule-specific"""
2471+ # We ONLY check for True here because can_handle may return a string
2472+ # explaining why it can't handle a given source.
2473+ handlers = [h for h in plugins() if h.can_handle(source) is True]
2474+ installed_to = None
2475+ for handler in handlers:
2476+ try:
2477+ installed_to = handler.install(source)
2478+ except UnhandledSource:
2479+ pass
2480+ if not installed_to:
2481+ raise UnhandledSource("No handler found for source {}".format(source))
2482+ return installed_to
2483+
2484+
2485+def install_from_config(config_var_name):
2486+ charm_config = config()
2487+ source = charm_config[config_var_name]
2488+ return install_remote(source)
2489+
2490+
2491+class BaseFetchHandler(object):
2492+ """Base class for FetchHandler implementations in fetch plugins"""
2493+ def can_handle(self, source):
2494+ """Returns True if the source can be handled. Otherwise returns
2495+ a string explaining why it cannot"""
2496+ return "Wrong source type"
2497+
2498+ def install(self, source):
2499+ """Try to download and unpack the source. Return the path to the
2500+ unpacked files or raise UnhandledSource."""
2501+ raise UnhandledSource("Wrong source type {}".format(source))
2502+
2503+ def parse_url(self, url):
2504+ return urlparse(url)
2505+
2506+ def base_url(self, url):
2507+ """Return url without querystring or fragment"""
2508+ parts = list(self.parse_url(url))
2509+ parts[4:] = ['' for i in parts[4:]]
2510+ return urlunparse(parts)
2511+
2512+
2513+def plugins(fetch_handlers=None):
2514+ if not fetch_handlers:
2515+ fetch_handlers = FETCH_HANDLERS
2516+ plugin_list = []
2517+ for handler_name in fetch_handlers:
2518+ package, classname = handler_name.rsplit('.', 1)
2519+ try:
2520+ handler_class = getattr(importlib.import_module(package), classname)
2521+ plugin_list.append(handler_class())
2522+ except (ImportError, AttributeError):
2523+ # Skip missing plugins so that they can be ommitted from
2524+ # installation if desired
2525+ log("FetchHandler {} not found, skipping plugin".format(handler_name))
2526+ return plugin_list
2527
2528=== added file 'hooks/charmhelpers/fetch/archiveurl.py'
2529--- hooks/charmhelpers/fetch/archiveurl.py 1970-01-01 00:00:00 +0000
2530+++ hooks/charmhelpers/fetch/archiveurl.py 2013-10-15 01:36:24 +0000
2531@@ -0,0 +1,48 @@
2532+import os
2533+import urllib2
2534+from charmhelpers.fetch import (
2535+ BaseFetchHandler,
2536+ UnhandledSource
2537+)
2538+from charmhelpers.payload.archive import (
2539+ get_archive_handler,
2540+ extract,
2541+)
2542+from charmhelpers.core.host import mkdir
2543+
2544+
2545+class ArchiveUrlFetchHandler(BaseFetchHandler):
2546+ """Handler for archives via generic URLs"""
2547+ def can_handle(self, source):
2548+ url_parts = self.parse_url(source)
2549+ if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
2550+ return "Wrong source type"
2551+ if get_archive_handler(self.base_url(source)):
2552+ return True
2553+ return False
2554+
2555+ def download(self, source, dest):
2556+ # propogate all exceptions
2557+ # URLError, OSError, etc
2558+ response = urllib2.urlopen(source)
2559+ try:
2560+ with open(dest, 'w') as dest_file:
2561+ dest_file.write(response.read())
2562+ except Exception as e:
2563+ if os.path.isfile(dest):
2564+ os.unlink(dest)
2565+ raise e
2566+
2567+ def install(self, source):
2568+ url_parts = self.parse_url(source)
2569+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
2570+ if not os.path.exists(dest_dir):
2571+ mkdir(dest_dir, perms=0755)
2572+ dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
2573+ try:
2574+ self.download(source, dld_file)
2575+ except urllib2.URLError as e:
2576+ raise UnhandledSource(e.reason)
2577+ except OSError as e:
2578+ raise UnhandledSource(e.strerror)
2579+ return extract(dld_file)
2580
2581=== added file 'hooks/charmhelpers/fetch/bzrurl.py'
2582--- hooks/charmhelpers/fetch/bzrurl.py 1970-01-01 00:00:00 +0000
2583+++ hooks/charmhelpers/fetch/bzrurl.py 2013-10-15 01:36:24 +0000
2584@@ -0,0 +1,49 @@
2585+import os
2586+from charmhelpers.fetch import (
2587+ BaseFetchHandler,
2588+ UnhandledSource
2589+)
2590+from charmhelpers.core.host import mkdir
2591+
2592+try:
2593+ from bzrlib.branch import Branch
2594+except ImportError:
2595+ from charmhelpers.fetch import apt_install
2596+ apt_install("python-bzrlib")
2597+ from bzrlib.branch import Branch
2598+
2599+class BzrUrlFetchHandler(BaseFetchHandler):
2600+ """Handler for bazaar branches via generic and lp URLs"""
2601+ def can_handle(self, source):
2602+ url_parts = self.parse_url(source)
2603+ if url_parts.scheme not in ('bzr+ssh', 'lp'):
2604+ return False
2605+ else:
2606+ return True
2607+
2608+ def branch(self, source, dest):
2609+ url_parts = self.parse_url(source)
2610+ # If we use lp:branchname scheme we need to load plugins
2611+ if not self.can_handle(source):
2612+ raise UnhandledSource("Cannot handle {}".format(source))
2613+ if url_parts.scheme == "lp":
2614+ from bzrlib.plugin import load_plugins
2615+ load_plugins()
2616+ try:
2617+ remote_branch = Branch.open(source)
2618+ remote_branch.bzrdir.sprout(dest).open_branch()
2619+ except Exception as e:
2620+ raise e
2621+
2622+ def install(self, source):
2623+ url_parts = self.parse_url(source)
2624+ branch_name = url_parts.path.strip("/").split("/")[-1]
2625+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
2626+ if not os.path.exists(dest_dir):
2627+ mkdir(dest_dir, perms=0755)
2628+ try:
2629+ self.branch(source, dest_dir)
2630+ except OSError as e:
2631+ raise UnhandledSource(e.strerror)
2632+ return dest_dir
2633+
2634
2635=== added directory 'hooks/charmhelpers/payload'
2636=== added file 'hooks/charmhelpers/payload/__init__.py'
2637--- hooks/charmhelpers/payload/__init__.py 1970-01-01 00:00:00 +0000
2638+++ hooks/charmhelpers/payload/__init__.py 2013-10-15 01:36:24 +0000
2639@@ -0,0 +1,1 @@
2640+"Tools for working with files injected into a charm just before deployment."
2641
2642=== added file 'hooks/charmhelpers/payload/execd.py'
2643--- hooks/charmhelpers/payload/execd.py 1970-01-01 00:00:00 +0000
2644+++ hooks/charmhelpers/payload/execd.py 2013-10-15 01:36:24 +0000
2645@@ -0,0 +1,50 @@
2646+#!/usr/bin/env python
2647+
2648+import os
2649+import sys
2650+import subprocess
2651+from charmhelpers.core import hookenv
2652+
2653+
2654+def default_execd_dir():
2655+ return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
2656+
2657+
2658+def execd_module_paths(execd_dir=None):
2659+ """Generate a list of full paths to modules within execd_dir."""
2660+ if not execd_dir:
2661+ execd_dir = default_execd_dir()
2662+
2663+ if not os.path.exists(execd_dir):
2664+ return
2665+
2666+ for subpath in os.listdir(execd_dir):
2667+ module = os.path.join(execd_dir, subpath)
2668+ if os.path.isdir(module):
2669+ yield module
2670+
2671+
2672+def execd_submodule_paths(command, execd_dir=None):
2673+ """Generate a list of full paths to the specified command within exec_dir.
2674+ """
2675+ for module_path in execd_module_paths(execd_dir):
2676+ path = os.path.join(module_path, command)
2677+ if os.access(path, os.X_OK) and os.path.isfile(path):
2678+ yield path
2679+
2680+
2681+def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
2682+ """Run command for each module within execd_dir which defines it."""
2683+ for submodule_path in execd_submodule_paths(command, execd_dir):
2684+ try:
2685+ subprocess.check_call(submodule_path, shell=True, stderr=stderr)
2686+ except subprocess.CalledProcessError as e:
2687+ hookenv.log("Error ({}) running {}. Output: {}".format(
2688+ e.returncode, e.cmd, e.output))
2689+ if die_on_error:
2690+ sys.exit(e.returncode)
2691+
2692+
2693+def execd_preinstall(execd_dir=None):
2694+ """Run charm-pre-install for each module within execd_dir."""
2695+ execd_run('charm-pre-install', execd_dir=execd_dir)
2696
2697=== removed directory 'hooks/lib'
2698=== removed file 'hooks/lib/__init__.py'
2699=== removed file 'hooks/lib/apache_utils.py'
2700--- hooks/lib/apache_utils.py 2013-03-12 21:52:12 +0000
2701+++ hooks/lib/apache_utils.py 1970-01-01 00:00:00 +0000
2702@@ -1,193 +0,0 @@
2703-#
2704-# Copyright 2012 Canonical Ltd.
2705-#
2706-# Authors:
2707-# James Page <james.page@ubuntu.com>
2708-#
2709-
2710-from lib.utils import (
2711- relation_ids,
2712- relation_list,
2713- relation_get,
2714- render_template,
2715- juju_log,
2716- config_get,
2717- install,
2718- get_host_ip,
2719- restart
2720- )
2721-from lib.cluster_utils import https
2722-
2723-import os
2724-import subprocess
2725-from base64 import b64decode
2726-
2727-APACHE_SITE_DIR = "/etc/apache2/sites-available"
2728-SITE_TEMPLATE = "apache2_site.tmpl"
2729-RELOAD_CHECK = "To activate the new configuration"
2730-
2731-
2732-def get_cert():
2733- cert = config_get('ssl_cert')
2734- key = config_get('ssl_key')
2735- if not (cert and key):
2736- juju_log('INFO',
2737- "Inspecting identity-service relations for SSL certificate.")
2738- cert = key = None
2739- for r_id in relation_ids('identity-service'):
2740- for unit in relation_list(r_id):
2741- if not cert:
2742- cert = relation_get('ssl_cert',
2743- rid=r_id, unit=unit)
2744- if not key:
2745- key = relation_get('ssl_key',
2746- rid=r_id, unit=unit)
2747- return (cert, key)
2748-
2749-
2750-def get_ca_cert():
2751- ca_cert = None
2752- juju_log('INFO',
2753- "Inspecting identity-service relations for CA SSL certificate.")
2754- for r_id in relation_ids('identity-service'):
2755- for unit in relation_list(r_id):
2756- if not ca_cert:
2757- ca_cert = relation_get('ca_cert',
2758- rid=r_id, unit=unit)
2759- return ca_cert
2760-
2761-
2762-def install_ca_cert(ca_cert):
2763- if ca_cert:
2764- with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
2765- 'w') as crt:
2766- crt.write(ca_cert)
2767- subprocess.check_call(['update-ca-certificates', '--fresh'])
2768-
2769-
2770-def enable_https(port_maps, namespace, cert, key, ca_cert=None):
2771- '''
2772- For a given number of port mappings, configures apache2
2773- HTTPs local reverse proxying using certficates and keys provided in
2774- either configuration data (preferred) or relation data. Assumes ports
2775- are not in use (calling charm should ensure that).
2776-
2777- port_maps: dict: external to internal port mappings
2778- namespace: str: name of charm
2779- '''
2780- def _write_if_changed(path, new_content):
2781- content = None
2782- if os.path.exists(path):
2783- with open(path, 'r') as f:
2784- content = f.read().strip()
2785- if content != new_content:
2786- with open(path, 'w') as f:
2787- f.write(new_content)
2788- return True
2789- else:
2790- return False
2791-
2792- juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
2793- http_restart = False
2794-
2795- if cert:
2796- cert = b64decode(cert)
2797- if key:
2798- key = b64decode(key)
2799- if ca_cert:
2800- ca_cert = b64decode(ca_cert)
2801-
2802- if not cert and not key:
2803- juju_log('ERROR',
2804- "Expected but could not find SSL certificate data, not "
2805- "configuring HTTPS!")
2806- return False
2807-
2808- install('apache2')
2809- if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
2810- 'proxy', 'proxy_http']):
2811- http_restart = True
2812-
2813- ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
2814- if not os.path.exists(ssl_dir):
2815- os.makedirs(ssl_dir)
2816-
2817- if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
2818- http_restart = True
2819- if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
2820- http_restart = True
2821- os.chmod(os.path.join(ssl_dir, 'key'), 0600)
2822-
2823- install_ca_cert(ca_cert)
2824-
2825- sites_dir = '/etc/apache2/sites-available'
2826- for ext_port, int_port in port_maps.items():
2827- juju_log('INFO',
2828- 'Creating apache2 reverse proxy vhost'
2829- ' for {}:{}'.format(ext_port,
2830- int_port))
2831- site = "{}_{}".format(namespace, ext_port)
2832- site_path = os.path.join(sites_dir, site)
2833- with open(site_path, 'w') as fsite:
2834- context = {
2835- "ext": ext_port,
2836- "int": int_port,
2837- "namespace": namespace,
2838- "private_address": get_host_ip()
2839- }
2840- fsite.write(render_template(SITE_TEMPLATE,
2841- context))
2842-
2843- if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
2844- http_restart = True
2845-
2846- if http_restart:
2847- restart('apache2')
2848-
2849- return True
2850-
2851-
2852-def disable_https(port_maps, namespace):
2853- '''
2854- Ensure HTTPS reverse proxying is disables for given port mappings
2855-
2856- port_maps: dict: of ext -> int port mappings
2857- namespace: str: name of chamr
2858- '''
2859- juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
2860-
2861- if (not os.path.exists('/etc/apache2') or
2862- not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
2863- return
2864-
2865- http_restart = False
2866- for ext_port in port_maps.keys():
2867- if os.path.exists(os.path.join(APACHE_SITE_DIR,
2868- "{}_{}".format(namespace,
2869- ext_port))):
2870- juju_log('INFO',
2871- "Disabling HTTPS reverse proxy"
2872- " for {} {}.".format(namespace,
2873- ext_port))
2874- if (RELOAD_CHECK in
2875- subprocess.check_output(['a2dissite',
2876- '{}_{}'.format(namespace,
2877- ext_port)])):
2878- http_restart = True
2879-
2880- if http_restart:
2881- restart(['apache2'])
2882-
2883-
2884-def setup_https(port_maps, namespace, cert, key, ca_cert=None):
2885- '''
2886- Ensures HTTPS is either enabled or disabled for given port
2887- mapping.
2888-
2889- port_maps: dict: of ext -> int port mappings
2890- namespace: str: name of charm
2891- '''
2892- if not https:
2893- disable_https(port_maps, namespace)
2894- else:
2895- enable_https(port_maps, namespace, cert, key, ca_cert)
2896
2897=== removed file 'hooks/lib/cluster_utils.py'
2898--- hooks/lib/cluster_utils.py 2013-03-19 14:04:54 +0000
2899+++ hooks/lib/cluster_utils.py 1970-01-01 00:00:00 +0000
2900@@ -1,130 +0,0 @@
2901-#
2902-# Copyright 2012 Canonical Ltd.
2903-#
2904-# This file is sourced from lp:openstack-charm-helpers
2905-#
2906-# Authors:
2907-# James Page <james.page@ubuntu.com>
2908-# Adam Gandelman <adamg@ubuntu.com>
2909-#
2910-
2911-from lib.utils import (
2912- juju_log,
2913- relation_ids,
2914- relation_list,
2915- relation_get,
2916- get_unit_hostname,
2917- config_get
2918- )
2919-import subprocess
2920-import os
2921-
2922-
2923-def is_clustered():
2924- for r_id in (relation_ids('ha') or []):
2925- for unit in (relation_list(r_id) or []):
2926- clustered = relation_get('clustered',
2927- rid=r_id,
2928- unit=unit)
2929- if clustered:
2930- return True
2931- return False
2932-
2933-
2934-def is_leader(resource):
2935- cmd = [
2936- "crm", "resource",
2937- "show", resource
2938- ]
2939- try:
2940- status = subprocess.check_output(cmd)
2941- except subprocess.CalledProcessError:
2942- return False
2943- else:
2944- if get_unit_hostname() in status:
2945- return True
2946- else:
2947- return False
2948-
2949-
2950-def peer_units():
2951- peers = []
2952- for r_id in (relation_ids('cluster') or []):
2953- for unit in (relation_list(r_id) or []):
2954- peers.append(unit)
2955- return peers
2956-
2957-
2958-def oldest_peer(peers):
2959- local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
2960- for peer in peers:
2961- remote_unit_no = int(peer.split('/')[1])
2962- if remote_unit_no < local_unit_no:
2963- return False
2964- return True
2965-
2966-
2967-def eligible_leader(resource):
2968- if is_clustered():
2969- if not is_leader(resource):
2970- juju_log('INFO', 'Deferring action to CRM leader.')
2971- return False
2972- else:
2973- peers = peer_units()
2974- if peers and not oldest_peer(peers):
2975- juju_log('INFO', 'Deferring action to oldest service unit.')
2976- return False
2977- return True
2978-
2979-
2980-def https():
2981- '''
2982- Determines whether enough data has been provided in configuration
2983- or relation data to configure HTTPS
2984- .
2985- returns: boolean
2986- '''
2987- if config_get('use-https') == "yes":
2988- return True
2989- if config_get('ssl_cert') and config_get('ssl_key'):
2990- return True
2991- for r_id in relation_ids('identity-service'):
2992- for unit in relation_list(r_id):
2993- if (relation_get('https_keystone', rid=r_id, unit=unit) and
2994- relation_get('ssl_cert', rid=r_id, unit=unit) and
2995- relation_get('ssl_key', rid=r_id, unit=unit) and
2996- relation_get('ca_cert', rid=r_id, unit=unit)):
2997- return True
2998- return False
2999-
3000-
3001-def determine_api_port(public_port):
3002- '''
3003- Determine correct API server listening port based on
3004- existence of HTTPS reverse proxy and/or haproxy.
3005-
3006- public_port: int: standard public port for given service
3007-
3008- returns: int: the correct listening port for the API service
3009- '''
3010- i = 0
3011- if len(peer_units()) > 0 or is_clustered():
3012- i += 1
3013- if https():
3014- i += 1
3015- return public_port - (i * 10)
3016-
3017-
3018-def determine_haproxy_port(public_port):
3019- '''
3020- Description: Determine correct proxy listening port based on public IP +
3021- existence of HTTPS reverse proxy.
3022-
3023- public_port: int: standard public port for given service
3024-
3025- returns: int: the correct listening port for the HAProxy service
3026- '''
3027- i = 0
3028- if https():
3029- i += 1
3030- return public_port - (i * 10)
3031
3032=== removed file 'hooks/lib/haproxy_utils.py'
3033--- hooks/lib/haproxy_utils.py 2013-03-12 11:17:32 +0000
3034+++ hooks/lib/haproxy_utils.py 1970-01-01 00:00:00 +0000
3035@@ -1,52 +0,0 @@
3036-#
3037-# Copyright 2012 Canonical Ltd.
3038-#
3039-# Authors:
3040-# James Page <james.page@ubuntu.com>
3041-#
3042-
3043-from lib.utils import (
3044- relation_ids,
3045- relation_list,
3046- relation_get,
3047- unit_get,
3048- reload,
3049- render_template
3050- )
3051-import os
3052-
3053-HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
3054-HAPROXY_DEFAULT = '/etc/default/haproxy'
3055-
3056-
3057-def configure_haproxy(service_ports):
3058- '''
3059- Configure HAProxy based on the current peers in the service
3060- cluster using the provided port map:
3061-
3062- "swift": [ 8080, 8070 ]
3063-
3064- HAproxy will also be reloaded/started if required
3065-
3066- service_ports: dict: dict of lists of [ frontend, backend ]
3067- '''
3068- cluster_hosts = {}
3069- cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
3070- unit_get('private-address')
3071- for r_id in relation_ids('cluster'):
3072- for unit in relation_list(r_id):
3073- cluster_hosts[unit.replace('/', '-')] = \
3074- relation_get(attribute='private-address',
3075- rid=r_id,
3076- unit=unit)
3077- context = {
3078- 'units': cluster_hosts,
3079- 'service_ports': service_ports
3080- }
3081- with open(HAPROXY_CONF, 'w') as f:
3082- f.write(render_template(os.path.basename(HAPROXY_CONF),
3083- context))
3084- with open(HAPROXY_DEFAULT, 'w') as f:
3085- f.write('ENABLED=1')
3086-
3087- reload('haproxy')
3088
3089=== removed file 'hooks/lib/openstack_common.py'
3090--- hooks/lib/openstack_common.py 2013-07-19 19:35:01 +0000
3091+++ hooks/lib/openstack_common.py 1970-01-01 00:00:00 +0000
3092@@ -1,231 +0,0 @@
3093-#!/usr/bin/python
3094-
3095-# Common python helper functions used for OpenStack charms.
3096-
3097-import apt_pkg as apt
3098-import subprocess
3099-import os
3100-
3101-CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
3102-CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
3103-
3104-ubuntu_openstack_release = {
3105- 'oneiric': 'diablo',
3106- 'precise': 'essex',
3107- 'quantal': 'folsom',
3108- 'raring': 'grizzly',
3109-}
3110-
3111-
3112-openstack_codenames = {
3113- '2011.2': 'diablo',
3114- '2012.1': 'essex',
3115- '2012.2': 'folsom',
3116- '2013.1': 'grizzly',
3117- '2013.2': 'havana',
3118-}
3119-
3120-# The ugly duckling
3121-swift_codenames = {
3122- '1.4.3': 'diablo',
3123- '1.4.8': 'essex',
3124- '1.7.4': 'folsom',
3125- '1.7.6': 'grizzly',
3126- '1.7.7': 'grizzly',
3127- '1.8.0': 'grizzly',
3128-}
3129-
3130-
3131-def juju_log(msg):
3132- subprocess.check_call(['juju-log', msg])
3133-
3134-
3135-def error_out(msg):
3136- juju_log("FATAL ERROR: %s" % msg)
3137- exit(1)
3138-
3139-
3140-def lsb_release():
3141- '''Return /etc/lsb-release in a dict'''
3142- lsb = open('/etc/lsb-release', 'r')
3143- d = {}
3144- for l in lsb:
3145- k, v = l.split('=')
3146- d[k.strip()] = v.strip()
3147- return d
3148-
3149-
3150-def get_os_codename_install_source(src):
3151- '''Derive OpenStack release codename from a given installation source.'''
3152- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3153-
3154- rel = ''
3155- if src == 'distro':
3156- try:
3157- rel = ubuntu_openstack_release[ubuntu_rel]
3158- except KeyError:
3159- e = 'Code not derive openstack release for '\
3160- 'this Ubuntu release: %s' % rel
3161- error_out(e)
3162- return rel
3163-
3164- if src.startswith('cloud:'):
3165- ca_rel = src.split(':')[1]
3166- ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
3167- return ca_rel
3168-
3169- # Best guess match based on deb string provided
3170- if src.startswith('deb') or src.startswith('ppa'):
3171- for k, v in openstack_codenames.iteritems():
3172- if v in src:
3173- return v
3174-
3175-
3176-def get_os_codename_version(vers):
3177- '''Determine OpenStack codename from version number.'''
3178- try:
3179- return openstack_codenames[vers]
3180- except KeyError:
3181- e = 'Could not determine OpenStack codename for version %s' % vers
3182- error_out(e)
3183-
3184-
3185-def get_os_version_codename(codename):
3186- '''Determine OpenStack version number from codename.'''
3187- for k, v in openstack_codenames.iteritems():
3188- if v == codename:
3189- return k
3190- e = 'Code not derive OpenStack version for '\
3191- 'codename: %s' % codename
3192- error_out(e)
3193-
3194-
3195-def get_os_codename_package(pkg):
3196- '''Derive OpenStack release codename from an installed package.'''
3197- apt.init()
3198- cache = apt.Cache()
3199- try:
3200- pkg = cache[pkg]
3201- except:
3202- e = 'Could not determine version of installed package: %s' % pkg
3203- error_out(e)
3204-
3205- vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
3206-
3207- try:
3208- if 'swift' in pkg.name:
3209- vers = vers[:5]
3210- return swift_codenames[vers]
3211- else:
3212- vers = vers[:6]
3213- return openstack_codenames[vers]
3214- except KeyError:
3215- e = 'Could not determine OpenStack codename for version %s' % vers
3216- error_out(e)
3217-
3218-
3219-def get_os_version_package(pkg):
3220- '''Derive OpenStack version number from an installed package.'''
3221- codename = get_os_codename_package(pkg)
3222-
3223- if 'swift' in pkg:
3224- vers_map = swift_codenames
3225- else:
3226- vers_map = openstack_codenames
3227-
3228- for version, cname in vers_map.iteritems():
3229- if cname == codename:
3230- return version
3231- e = "Could not determine OpenStack version for package: %s" % pkg
3232- error_out(e)
3233-
3234-
3235-def configure_installation_source(rel):
3236- '''Configure apt installation source.'''
3237-
3238- def _import_key(keyid):
3239- cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
3240- "--recv-keys %s" % keyid
3241- try:
3242- subprocess.check_call(cmd.split(' '))
3243- except subprocess.CalledProcessError:
3244- error_out("Error importing repo key %s" % keyid)
3245-
3246- if rel == 'distro':
3247- return
3248- elif rel[:4] == "ppa:":
3249- src = rel
3250- subprocess.check_call(["add-apt-repository", "-y", src])
3251- elif rel[:3] == "deb":
3252- l = len(rel.split('|'))
3253- if l == 2:
3254- src, key = rel.split('|')
3255- juju_log("Importing PPA key from keyserver for %s" % src)
3256- _import_key(key)
3257- elif l == 1:
3258- src = rel
3259- else:
3260- error_out("Invalid openstack-release: %s" % rel)
3261-
3262- with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
3263- f.write(src)
3264- elif rel[:6] == 'cloud:':
3265- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3266- rel = rel.split(':')[1]
3267- u_rel = rel.split('-')[0]
3268- ca_rel = rel.split('-')[1]
3269-
3270- if u_rel != ubuntu_rel:
3271- e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
3272- 'version (%s)' % (ca_rel, ubuntu_rel)
3273- error_out(e)
3274-
3275- if 'staging' in ca_rel:
3276- # staging is just a regular PPA.
3277- os_rel = ca_rel.split('/')[0]
3278- ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
3279- cmd = 'add-apt-repository -y %s' % ppa
3280- subprocess.check_call(cmd.split(' '))
3281- return
3282-
3283- # map charm config options to actual archive pockets.
3284- pockets = {
3285- 'folsom': 'precise-updates/folsom',
3286- 'folsom/updates': 'precise-updates/folsom',
3287- 'folsom/proposed': 'precise-proposed/folsom',
3288- 'grizzly': 'precise-updates/grizzly',
3289- 'grizzly/updates': 'precise-updates/grizzly',
3290- 'grizzly/proposed': 'precise-proposed/grizzly'
3291- }
3292-
3293- try:
3294- pocket = pockets[ca_rel]
3295- except KeyError:
3296- e = 'Invalid Cloud Archive release specified: %s' % rel
3297- error_out(e)
3298-
3299- src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
3300- cmd = ['apt-get', '-y', 'install', 'ubuntu-cloud-keyring']
3301- subprocess.check_call(cmd)
3302-
3303- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
3304- f.write(src)
3305- else:
3306- error_out("Invalid openstack-release specified: %s" % rel)
3307-
3308-
3309-def save_script_rc(script_path="scripts/scriptrc", **env_vars):
3310- """
3311- Write an rc file in the charm-delivered directory containing
3312- exported environment variables provided by env_vars. Any charm scripts run
3313- outside the juju hook environment can source this scriptrc to obtain
3314- updated config information necessary to perform health checks or
3315- service changes.
3316- """
3317- charm_dir = os.getenv('CHARM_DIR')
3318- juju_rc_path = "%s/%s" % (charm_dir, script_path)
3319- with open(juju_rc_path, 'wb') as rc_script:
3320- rc_script.write(
3321- "#!/bin/bash\n")
3322- [rc_script.write('export %s=%s\n' % (u, p))
3323- for u, p in env_vars.iteritems() if u != "script_path"]
3324
3325=== removed file 'hooks/lib/utils.py'
3326--- hooks/lib/utils.py 2013-03-21 18:29:07 +0000
3327+++ hooks/lib/utils.py 1970-01-01 00:00:00 +0000
3328@@ -1,332 +0,0 @@
3329-#
3330-# Copyright 2012 Canonical Ltd.
3331-#
3332-# This file is sourced from lp:openstack-charm-helpers
3333-#
3334-# Authors:
3335-# James Page <james.page@ubuntu.com>
3336-# Paul Collins <paul.collins@canonical.com>
3337-# Adam Gandelman <adamg@ubuntu.com>
3338-#
3339-
3340-import json
3341-import os
3342-import subprocess
3343-import socket
3344-import sys
3345-
3346-
3347-def do_hooks(hooks):
3348- hook = os.path.basename(sys.argv[0])
3349-
3350- try:
3351- hook_func = hooks[hook]
3352- except KeyError:
3353- juju_log('INFO',
3354- "This charm doesn't know how to handle '{}'.".format(hook))
3355- else:
3356- hook_func()
3357-
3358-
3359-def install(*pkgs):
3360- cmd = [
3361- 'apt-get',
3362- '-y',
3363- 'install'
3364- ]
3365- for pkg in pkgs:
3366- cmd.append(pkg)
3367- subprocess.check_call(cmd)
3368-
3369-TEMPLATES_DIR = 'templates'
3370-
3371-try:
3372- import jinja2
3373-except ImportError:
3374- install('python-jinja2')
3375- import jinja2
3376-
3377-try:
3378- import dns.resolver
3379-except ImportError:
3380- install('python-dnspython')
3381- import dns.resolver
3382-
3383-
3384-def render_template(template_name, context, template_dir=TEMPLATES_DIR):
3385- templates = jinja2.Environment(
3386- loader=jinja2.FileSystemLoader(template_dir)
3387- )
3388- template = templates.get_template(template_name)
3389- return template.render(context)
3390-
3391-CLOUD_ARCHIVE = \
3392-""" # Ubuntu Cloud Archive
3393-deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
3394-"""
3395-
3396-CLOUD_ARCHIVE_POCKETS = {
3397- 'folsom': 'precise-updates/folsom',
3398- 'folsom/updates': 'precise-updates/folsom',
3399- 'folsom/proposed': 'precise-proposed/folsom',
3400- 'grizzly': 'precise-updates/grizzly',
3401- 'grizzly/updates': 'precise-updates/grizzly',
3402- 'grizzly/proposed': 'precise-proposed/grizzly'
3403- }
3404-
3405-
3406-def configure_source():
3407- source = str(config_get('openstack-origin'))
3408- if not source:
3409- return
3410- if source.startswith('ppa:'):
3411- cmd = [
3412- 'add-apt-repository',
3413- source
3414- ]
3415- subprocess.check_call(cmd)
3416- if source.startswith('cloud:'):
3417- # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
3418- # cloud:precise-folsom/updates or cloud:precise-folsom/proposed
3419- install('ubuntu-cloud-keyring')
3420- pocket = source.split(':')[1]
3421- pocket = pocket.split('-')[1]
3422- with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
3423- apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
3424- if source.startswith('deb'):
3425- l = len(source.split('|'))
3426- if l == 2:
3427- (apt_line, key) = source.split('|')
3428- cmd = [
3429- 'apt-key',
3430- 'adv', '--keyserver keyserver.ubuntu.com',
3431- '--recv-keys', key
3432- ]
3433- subprocess.check_call(cmd)
3434- elif l == 1:
3435- apt_line = source
3436-
3437- with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
3438- apt.write(apt_line + "\n")
3439- cmd = [
3440- 'apt-get',
3441- 'update'
3442- ]
3443- subprocess.check_call(cmd)
3444-
3445-# Protocols
3446-TCP = 'TCP'
3447-UDP = 'UDP'
3448-
3449-
3450-def expose(port, protocol='TCP'):
3451- cmd = [
3452- 'open-port',
3453- '{}/{}'.format(port, protocol)
3454- ]
3455- subprocess.check_call(cmd)
3456-
3457-
3458-def juju_log(severity, message):
3459- cmd = [
3460- 'juju-log',
3461- '--log-level', severity,
3462- message
3463- ]
3464- subprocess.check_call(cmd)
3465-
3466-
3467-cache = {}
3468-
3469-
3470-def cached(func):
3471- def wrapper(*args, **kwargs):
3472- global cache
3473- key = str((func, args, kwargs))
3474- try:
3475- return cache[key]
3476- except KeyError:
3477- res = func(*args, **kwargs)
3478- cache[key] = res
3479- return res
3480- return wrapper
3481-
3482-
3483-@cached
3484-def relation_ids(relation):
3485- cmd = [
3486- 'relation-ids',
3487- relation
3488- ]
3489- result = str(subprocess.check_output(cmd)).split()
3490- if result == "":
3491- return None
3492- else:
3493- return result
3494-
3495-
3496-@cached
3497-def relation_list(rid):
3498- cmd = [
3499- 'relation-list',
3500- '-r', rid,
3501- ]
3502- result = str(subprocess.check_output(cmd)).split()
3503- if result == "":
3504- return None
3505- else:
3506- return result
3507-
3508-
3509-@cached
3510-def relation_get(attribute, unit=None, rid=None):
3511- cmd = [
3512- 'relation-get',
3513- ]
3514- if rid:
3515- cmd.append('-r')
3516- cmd.append(rid)
3517- cmd.append(attribute)
3518- if unit:
3519- cmd.append(unit)
3520- value = subprocess.check_output(cmd).strip() # IGNORE:E1103
3521- if value == "":
3522- return None
3523- else:
3524- return value
3525-
3526-
3527-@cached
3528-def relation_get_dict(relation_id=None, remote_unit=None):
3529- """Obtain all relation data as dict by way of JSON"""
3530- cmd = [
3531- 'relation-get', '--format=json'
3532- ]
3533- if relation_id:
3534- cmd.append('-r')
3535- cmd.append(relation_id)
3536- if remote_unit:
3537- remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
3538- os.environ['JUJU_REMOTE_UNIT'] = remote_unit
3539- j = subprocess.check_output(cmd)
3540- if remote_unit and remote_unit_orig:
3541- os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
3542- d = json.loads(j)
3543- settings = {}
3544- # convert unicode to strings
3545- for k, v in d.iteritems():
3546- settings[str(k)] = str(v)
3547- return settings
3548-
3549-
3550-def relation_set(**kwargs):
3551- cmd = [
3552- 'relation-set'
3553- ]
3554- args = []
3555- for k, v in kwargs.items():
3556- if k == 'rid':
3557- if v:
3558- cmd.append('-r')
3559- cmd.append(v)
3560- else:
3561- args.append('{}={}'.format(k, v))
3562- cmd += args
3563- subprocess.check_call(cmd)
3564-
3565-
3566-@cached
3567-def unit_get(attribute):
3568- cmd = [
3569- 'unit-get',
3570- attribute
3571- ]
3572- value = subprocess.check_output(cmd).strip() # IGNORE:E1103
3573- if value == "":
3574- return None
3575- else:
3576- return value
3577-
3578-
3579-@cached
3580-def config_get(attribute):
3581- cmd = [
3582- 'config-get',
3583- '--format',
3584- 'json',
3585- ]
3586- out = subprocess.check_output(cmd).strip() # IGNORE:E1103
3587- cfg = json.loads(out)
3588-
3589- try:
3590- return cfg[attribute]
3591- except KeyError:
3592- return None
3593-
3594-
3595-@cached
3596-def get_unit_hostname():
3597- return socket.gethostname()
3598-
3599-
3600-@cached
3601-def get_host_ip(hostname=unit_get('private-address')):
3602- try:
3603- # Test to see if already an IPv4 address
3604- socket.inet_aton(hostname)
3605- return hostname
3606- except socket.error:
3607- answers = dns.resolver.query(hostname, 'A')
3608- if answers:
3609- return answers[0].address
3610- return None
3611-
3612-
3613-def _svc_control(service, action):
3614- subprocess.check_call(['service', service, action])
3615-
3616-
3617-def restart(*services):
3618- for service in services:
3619- _svc_control(service, 'restart')
3620-
3621-
3622-def stop(*services):
3623- for service in services:
3624- _svc_control(service, 'stop')
3625-
3626-
3627-def start(*services):
3628- for service in services:
3629- _svc_control(service, 'start')
3630-
3631-
3632-def reload(*services):
3633- for service in services:
3634- try:
3635- _svc_control(service, 'reload')
3636- except subprocess.CalledProcessError:
3637- # Reload failed - either service does not support reload
3638- # or it was not running - restart will fixup most things
3639- _svc_control(service, 'restart')
3640-
3641-
3642-def running(service):
3643- try:
3644- output = subprocess.check_output(['service', service, 'status'])
3645- except subprocess.CalledProcessError:
3646- return False
3647- else:
3648- if ("start/running" in output or
3649- "is running" in output):
3650- return True
3651- else:
3652- return False
3653-
3654-
3655-def is_relation_made(relation, key='private-address'):
3656- for r_id in (relation_ids(relation) or []):
3657- for unit in (relation_list(r_id) or []):
3658- if relation_get(key, rid=r_id, unit=unit):
3659- return True
3660- return False
3661
3662=== added symlink 'hooks/start'
3663=== target is u'swift_hooks.py'
3664=== added symlink 'hooks/stop'
3665=== target is u'swift_hooks.py'
3666=== added file 'hooks/swift_context.py'
3667--- hooks/swift_context.py 1970-01-01 00:00:00 +0000
3668+++ hooks/swift_context.py 2013-10-15 01:36:24 +0000
3669@@ -0,0 +1,223 @@
3670+from charmhelpers.core.hookenv import (
3671+ config,
3672+ log,
3673+ relation_ids,
3674+ related_units,
3675+ relation_get,
3676+ unit_get
3677+)
3678+
3679+from charmhelpers.contrib.openstack.context import (
3680+ OSContextGenerator,
3681+ ApacheSSLContext as SSLContext,
3682+ context_complete,
3683+ CA_CERT_PATH
3684+)
3685+
3686+from charmhelpers.contrib.hahelpers.cluster import (
3687+ determine_api_port,
3688+ determine_haproxy_port,
3689+)
3690+
3691+from charmhelpers.contrib.openstack.utils import get_host_ip
3692+import subprocess
3693+import os
3694+
3695+
3696+from charmhelpers.contrib.hahelpers.apache import (
3697+ get_cert,
3698+ get_ca_cert,
3699+)
3700+
3701+from base64 import b64decode, b64encode
3702+
3703+
3704+class HAProxyContext(OSContextGenerator):
3705+ interfaces = ['cluster']
3706+
3707+ def __call__(self):
3708+ '''
3709+ Extends the main charmhelpers HAProxyContext with a port mapping
3710+ specific to this charm.
3711+ Also used to extend cinder.conf context with correct api_listening_port
3712+ '''
3713+ haproxy_port = determine_haproxy_port(config('bind-port'))
3714+ api_port = determine_api_port(config('bind-port'))
3715+
3716+ ctxt = {
3717+ 'service_ports': {'swift_api': [haproxy_port, api_port]},
3718+ }
3719+ return ctxt
3720+
3721+
3722+WWW_DIR = '/var/www/swift-rings'
3723+
3724+
3725+def generate_cert():
3726+ '''
3727+ Generates a self signed certificate and key using the
3728+ provided charm configuration data.
3729+
3730+ returns: tuple of (cert, key)
3731+ '''
3732+ CERT = '/etc/swift/ssl.cert'
3733+ KEY = '/etc/swift/ssl.key'
3734+ if not os.path.exists(CERT) and not os.path.exists(KEY):
3735+ subj = '/C=%s/ST=%s/L=%s/CN=%s' %\
3736+ (config('country'), config('state'),
3737+ config('locale'), config('common-name'))
3738+ cmd = ['openssl', 'req', '-new', '-x509', '-nodes',
3739+ '-out', CERT, '-keyout', KEY,
3740+ '-subj', subj]
3741+ subprocess.check_call(cmd)
3742+ os.chmod(KEY, 0600)
3743+ # Slurp as base64 encoded - makes handling easier up the stack
3744+ with open(CERT, 'r') as cfile:
3745+ ssl_cert = b64encode(cfile.read())
3746+ with open(KEY, 'r') as kfile:
3747+ ssl_key = b64encode(kfile.read())
3748+ return (ssl_cert, ssl_key)
3749+
3750+
3751+class ApacheSSLContext(SSLContext):
3752+ interfaces = ['https']
3753+ external_ports = [config('bind-port')]
3754+ service_namespace = 'swift'
3755+
3756+ def configure_cert(self):
3757+ if not os.path.isdir('/etc/apache2/ssl'):
3758+ os.mkdir('/etc/apache2/ssl')
3759+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
3760+ if not os.path.isdir(ssl_dir):
3761+ os.mkdir(ssl_dir)
3762+ cert, key = get_cert()
3763+ # Swift specific - generate a cert by default if not using
3764+ # a) user supplied cert or b) keystone signed cert
3765+ if None in [cert, key]:
3766+ cert, key = generate_cert()
3767+ with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
3768+ cert_out.write(b64decode(cert))
3769+ with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
3770+ key_out.write(b64decode(key))
3771+ ca_cert = get_ca_cert()
3772+ if ca_cert:
3773+ with open(CA_CERT_PATH, 'w') as ca_out:
3774+ ca_out.write(b64decode(ca_cert))
3775+ subprocess.check_call(['update-ca-certificates'])
3776+
3777+ def __call__(self):
3778+ return super(ApacheSSLContext, self).__call__()
3779+
3780+
3781+class SwiftRingContext(OSContextGenerator):
3782+ def __call__(self):
3783+ allowed_hosts = []
3784+ for relid in relation_ids('swift-storage'):
3785+ for unit in related_units(relid):
3786+ host = relation_get('private-address', unit, relid)
3787+ allowed_hosts.append(get_host_ip(host))
3788+
3789+ ctxt = {
3790+ 'www_dir': WWW_DIR,
3791+ 'allowed_hosts': allowed_hosts
3792+ }
3793+ return ctxt
3794+
3795+
3796+class SwiftIdentityContext(OSContextGenerator):
3797+ interfaces = ['identity-service']
3798+
3799+ def __call__(self):
3800+ bind_port = config('bind-port')
3801+ workers = config('workers')
3802+ if workers == '0':
3803+ import multiprocessing
3804+ workers = multiprocessing.cpu_count()
3805+ ctxt = {
3806+ 'proxy_ip': get_host_ip(unit_get('private-address')),
3807+ 'bind_port': determine_api_port(bind_port),
3808+ 'workers': workers,
3809+ 'operator_roles': config('operator-roles'),
3810+ 'delay_auth_decision': config('delay-auth-decision')
3811+ }
3812+
3813+ ctxt['ssl'] = False
3814+
3815+ auth_type = config('auth-type')
3816+ auth_host = config('keystone-auth-host')
3817+ admin_user = config('keystone-admin-user')
3818+ admin_password = config('keystone-admin-user')
3819+ if (auth_type == 'keystone' and auth_host
3820+ and admin_user and admin_password):
3821+ log('Using user-specified Keystone configuration.')
3822+ ks_auth = {
3823+ 'auth_type': 'keystone',
3824+ 'auth_protocol': config('keystone-auth-protocol'),
3825+ 'keystone_host': auth_host,
3826+ 'auth_port': config('keystone-auth-port'),
3827+ 'service_user': admin_user,
3828+ 'service_password': admin_password,
3829+ 'service_tenant': config('keystone-admin-tenant-name')
3830+ }
3831+ ctxt.update(ks_auth)
3832+
3833+ for relid in relation_ids('identity-service'):
3834+ log('Using Keystone configuration from identity-service.')
3835+ for unit in related_units(relid):
3836+ ks_auth = {
3837+ 'auth_type': 'keystone',
3838+ 'auth_protocol': 'http', # TODO: http hardcode
3839+ 'keystone_host': relation_get('auth_host',
3840+ unit, relid),
3841+ 'auth_port': relation_get('auth_port',
3842+ unit, relid),
3843+ 'service_user': relation_get('service_username',
3844+ unit, relid),
3845+ 'service_password': relation_get('service_password',
3846+ unit, relid),
3847+ 'service_tenant': relation_get('service_tenant',
3848+ unit, relid),
3849+ 'service_port': relation_get('service_port',
3850+ unit, relid),
3851+ 'admin_token': relation_get('admin_token',
3852+ unit, relid),
3853+ }
3854+ if context_complete(ks_auth):
3855+ ctxt.update(ks_auth)
3856+ return ctxt
3857+
3858+
3859+class MemcachedContext(OSContextGenerator):
3860+ def __call__(self):
3861+ ctxt = {
3862+ 'proxy_ip': get_host_ip(unit_get('private-address'))
3863+ }
3864+ return ctxt
3865+
3866+SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
3867+
3868+
3869+def get_swift_hash():
3870+ if os.path.isfile(SWIFT_HASH_FILE):
3871+ with open(SWIFT_HASH_FILE, 'r') as hashfile:
3872+ swift_hash = hashfile.read().strip()
3873+ elif config('swift-hash'):
3874+ swift_hash = config('swift-hash')
3875+ with open(SWIFT_HASH_FILE, 'w') as hashfile:
3876+ hashfile.write(swift_hash)
3877+ else:
3878+ cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n']
3879+ rand = open('/dev/random', 'r')
3880+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand)
3881+ swift_hash = p.communicate()[0].strip()
3882+ with open(SWIFT_HASH_FILE, 'w') as hashfile:
3883+ hashfile.write(swift_hash)
3884+ return swift_hash
3885+
3886+
3887+class SwiftHashContext(OSContextGenerator):
3888+ def __call__(self):
3889+ ctxt = {
3890+ 'swift_hash': get_swift_hash()
3891+ }
3892+ return ctxt
3893
3894=== modified file 'hooks/swift_hooks.py'
3895--- hooks/swift_hooks.py 2013-05-22 21:30:39 +0000
3896+++ hooks/swift_hooks.py 2013-10-15 01:36:24 +0000
3897@@ -4,254 +4,298 @@
3898 import sys
3899 import shutil
3900 import uuid
3901-from subprocess import check_call
3902-
3903-import lib.openstack_common as openstack
3904-import lib.utils as utils
3905-import lib.cluster_utils as cluster
3906-import swift_utils as swift
3907+import subprocess
3908+
3909+import charmhelpers.contrib.openstack.utils as openstack
3910+import charmhelpers.contrib.hahelpers.cluster as cluster
3911+from swift_utils import (
3912+ register_configs,
3913+ restart_map,
3914+ determine_packages,
3915+ ensure_swift_dir,
3916+ SWIFT_RINGS, WWW_DIR,
3917+ initialize_ring,
3918+ swift_user,
3919+ SWIFT_HA_RES,
3920+ balance_ring,
3921+ SWIFT_CONF_DIR,
3922+ get_zone,
3923+ exists_in_ring,
3924+ add_to_ring,
3925+ should_balance,
3926+ do_openstack_upgrade,
3927+ write_rc_script
3928+)
3929+from swift_context import get_swift_hash
3930+
3931+from charmhelpers.core.hookenv import (
3932+ config,
3933+ unit_get,
3934+ relation_set,
3935+ relation_ids,
3936+ relation_get,
3937+ log, ERROR,
3938+ Hooks, UnregisteredHookError,
3939+ open_port
3940+)
3941+from charmhelpers.core.host import (
3942+ service_restart,
3943+ restart_on_change
3944+)
3945+from charmhelpers.fetch import (
3946+ apt_install,
3947+ apt_update
3948+)
3949+from charmhelpers.payload.execd import execd_preinstall
3950
3951 extra_pkgs = [
3952 "haproxy",
3953 "python-jinja2"
3954- ]
3955-
3956-
3957+]
3958+
3959+
3960+hooks = Hooks()
3961+
3962+CONFIGS = register_configs()
3963+
3964+
3965+@hooks.hook('install')
3966 def install():
3967- src = utils.config_get('openstack-origin')
3968+ execd_preinstall()
3969+ src = config('openstack-origin')
3970 if src != 'distro':
3971 openstack.configure_installation_source(src)
3972- check_call(['apt-get', 'update'])
3973+ apt_update(fatal=True)
3974 rel = openstack.get_os_codename_install_source(src)
3975
3976- pkgs = swift.determine_packages(rel)
3977- utils.install(*pkgs)
3978- utils.install(*extra_pkgs)
3979-
3980- swift.ensure_swift_dir()
3981-
3982- # initialize swift configs.
3983- # swift.conf hash
3984- ctxt = {
3985- 'swift_hash': swift.get_swift_hash()
3986- }
3987- with open(swift.SWIFT_CONF, 'w') as conf:
3988- conf.write(swift.render_config(swift.SWIFT_CONF, ctxt))
3989-
3990- # swift-proxy.conf
3991- swift.write_proxy_config()
3992-
3993- # memcached.conf
3994- ctxt = {'proxy_ip': utils.get_host_ip()}
3995- with open(swift.MEMCACHED_CONF, 'w') as conf:
3996- conf.write(swift.render_config(swift.MEMCACHED_CONF, ctxt))
3997- check_call(['service', 'memcached', 'restart'])
3998-
3999+ pkgs = determine_packages(rel)
4000+ apt_install(pkgs, fatal=True)
4001+ apt_install(extra_pkgs, fatal=True)
4002+
4003+ ensure_swift_dir()
4004 # initialize new storage rings.
4005- for ring in swift.SWIFT_RINGS.iteritems():
4006- swift.initialize_ring(ring[1],
4007- utils.config_get('partition-power'),
4008- utils.config_get('replicas'),
4009- utils.config_get('min-hours'))
4010+ for ring in SWIFT_RINGS.iteritems():
4011+ initialize_ring(ring[1],
4012+ config('partition-power'),
4013+ config('replicas'),
4014+ config('min-hours'))
4015
4016 # configure a directory on webserver for distributing rings.
4017- if not os.path.isdir(swift.WWW_DIR):
4018- os.mkdir(swift.WWW_DIR, 0755)
4019- uid, gid = swift.swift_user()
4020- os.chown(swift.WWW_DIR, uid, gid)
4021- swift.write_apache_config()
4022- swift.configure_https()
4023-
4024-
4025+ if not os.path.isdir(WWW_DIR):
4026+ os.mkdir(WWW_DIR, 0755)
4027+ uid, gid = swift_user()
4028+ os.chown(WWW_DIR, uid, gid)
4029+
4030+
4031+@hooks.hook('identity-service-relation-joined')
4032 def keystone_joined(relid=None):
4033- if not cluster.eligible_leader(swift.SWIFT_HA_RES):
4034+ if not cluster.eligible_leader(SWIFT_HA_RES):
4035 return
4036 if cluster.is_clustered():
4037- hostname = utils.config_get('vip')
4038+ hostname = config('vip')
4039 else:
4040- hostname = utils.unit_get('private-address')
4041- port = utils.config_get('bind-port')
4042+ hostname = unit_get('private-address')
4043+ port = config('bind-port')
4044 if cluster.https():
4045 proto = 'https'
4046 else:
4047 proto = 'http'
4048 admin_url = '%s://%s:%s' % (proto, hostname, port)
4049 internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
4050- utils.relation_set(service='swift',
4051- region=utils.config_get('region'),
4052- public_url=public_url, internal_url=internal_url,
4053- admin_url=admin_url,
4054- requested_roles=utils.config_get('operator-roles'),
4055- rid=relid)
4056-
4057-
4058+ relation_set(service='swift',
4059+ region=config('region'),
4060+ public_url=public_url, internal_url=internal_url,
4061+ admin_url=admin_url,
4062+ requested_roles=config('operator-roles'),
4063+ relation_id=relid)
4064+
4065+
4066+@hooks.hook('identity-service-relation-changed')
4067+@restart_on_change(restart_map())
4068 def keystone_changed():
4069- swift.write_proxy_config()
4070- swift.configure_https()
4071- # Re-fire keystone hooks to ripple back the HTTPS service entry
4072- for relid in utils.relation_ids('identity-service'):
4073- keystone_joined(relid=relid)
4074+ configure_https()
4075
4076
4077 def balance_rings():
4078 '''handle doing ring balancing and distribution.'''
4079 new_ring = False
4080- for ring in swift.SWIFT_RINGS.itervalues():
4081- if swift.balance_ring(ring):
4082- utils.juju_log('INFO', 'Balanced ring %s' % ring)
4083+ for ring in SWIFT_RINGS.itervalues():
4084+ if balance_ring(ring):
4085+ log('Balanced ring %s' % ring)
4086 new_ring = True
4087 if not new_ring:
4088 return
4089
4090- for ring in swift.SWIFT_RINGS.keys():
4091+ for ring in SWIFT_RINGS.keys():
4092 f = '%s.ring.gz' % ring
4093- shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f),
4094- os.path.join(swift.WWW_DIR, f))
4095+ shutil.copyfile(os.path.join(SWIFT_CONF_DIR, f),
4096+ os.path.join(WWW_DIR, f))
4097
4098- if cluster.eligible_leader(swift.SWIFT_HA_RES):
4099+ if cluster.eligible_leader(SWIFT_HA_RES):
4100 msg = 'Broadcasting notification to all storage nodes that new '\
4101 'ring is ready for consumption.'
4102- utils.juju_log('INFO', msg)
4103- path = swift.WWW_DIR.split('/var/www/')[1]
4104+ log(msg)
4105+ path = WWW_DIR.split('/var/www/')[1]
4106 trigger = uuid.uuid4()
4107- swift_hash = swift.get_swift_hash()
4108
4109 if cluster.is_clustered():
4110- hostname = utils.config_get('vip')
4111+ hostname = config('vip')
4112 else:
4113- hostname = utils.unit_get('private-address')
4114+ hostname = unit_get('private-address')
4115
4116 rings_url = 'http://%s/%s' % (hostname, path)
4117 # notify storage nodes that there is a new ring to fetch.
4118- for relid in utils.relation_ids('swift-storage'):
4119- utils.relation_set(rid=relid, swift_hash=swift_hash,
4120- rings_url=rings_url, trigger=trigger)
4121-
4122- swift.proxy_control('restart')
4123-
4124-
4125+ for relid in relation_ids('swift-storage'):
4126+ relation_set(relation_id=relid, swift_hash=get_swift_hash(),
4127+ rings_url=rings_url, trigger=trigger)
4128+
4129+ service_restart('swift-proxy')
4130+
4131+
4132+@hooks.hook('swift-storage-relation-changed')
4133+@restart_on_change(restart_map())
4134 def storage_changed():
4135- zone = swift.get_zone(utils.config_get('zone-assignment'))
4136+ zone = get_zone(config('zone-assignment'))
4137 node_settings = {
4138- 'ip': utils.get_host_ip(utils.relation_get('private-address')),
4139+ 'ip': openstack.get_host_ip(relation_get('private-address')),
4140 'zone': zone,
4141- 'account_port': utils.relation_get('account_port'),
4142- 'object_port': utils.relation_get('object_port'),
4143- 'container_port': utils.relation_get('container_port'),
4144+ 'account_port': relation_get('account_port'),
4145+ 'object_port': relation_get('object_port'),
4146+ 'container_port': relation_get('container_port'),
4147 }
4148 if None in node_settings.itervalues():
4149- utils.juju_log('INFO', 'storage_changed: Relation not ready.')
4150+ log('storage_changed: Relation not ready.')
4151 return None
4152
4153 for k in ['zone', 'account_port', 'object_port', 'container_port']:
4154 node_settings[k] = int(node_settings[k])
4155
4156- # Grant new node access to rings via apache.
4157- swift.write_apache_config()
4158+ CONFIGS.write_all()
4159
4160 # allow for multiple devs per unit, passed along as a : separated list
4161- devs = utils.relation_get('device').split(':')
4162+ devs = relation_get('device').split(':')
4163 for dev in devs:
4164 node_settings['device'] = dev
4165- for ring in swift.SWIFT_RINGS.itervalues():
4166- if not swift.exists_in_ring(ring, node_settings):
4167- swift.add_to_ring(ring, node_settings)
4168+ for ring in SWIFT_RINGS.itervalues():
4169+ if not exists_in_ring(ring, node_settings):
4170+ add_to_ring(ring, node_settings)
4171
4172- if swift.should_balance([r for r in swift.SWIFT_RINGS.itervalues()]):
4173+ if should_balance([r for r in SWIFT_RINGS.itervalues()]):
4174 balance_rings()
4175
4176
4177+@hooks.hook('swift-storage-relation-broken')
4178+@restart_on_change(restart_map())
4179 def storage_broken():
4180- swift.write_apache_config()
4181-
4182-
4183+ CONFIGS.write_all()
4184+
4185+
4186+@hooks.hook('config-changed')
4187+@restart_on_change(restart_map())
4188 def config_changed():
4189+ configure_https()
4190+ open_port(config('bind-port'))
4191 # Determine whether or not we should do an upgrade, based on the
4192 # the version offered in keyston-release.
4193- src = utils.config_get('openstack-origin')
4194+ src = config('openstack-origin')
4195 available = openstack.get_os_codename_install_source(src)
4196 installed = openstack.get_os_codename_package('python-swift')
4197 if (available and
4198- openstack.get_os_version_codename(available) > \
4199+ openstack.get_os_version_codename(available) >
4200 openstack.get_os_version_codename(installed)):
4201- pkgs = swift.determine_packages(available)
4202- swift.do_openstack_upgrade(src, pkgs)
4203-
4204- relids = utils.relation_ids('identity-service')
4205- if relids:
4206- for relid in relids:
4207- keystone_joined(relid)
4208- swift.write_proxy_config()
4209- swift.configure_https()
4210-
4211-
4212+ pkgs = determine_packages(available)
4213+ do_openstack_upgrade(src, pkgs)
4214+
4215+
4216+@hooks.hook('cluster-relation-changed',
4217+ 'cluster-relation-joined')
4218+@restart_on_change(restart_map())
4219 def cluster_changed():
4220- swift.configure_haproxy()
4221-
4222-
4223+ CONFIGS.write_all()
4224+
4225+
4226+@hooks.hook('ha-relation-changed')
4227 def ha_relation_changed():
4228- clustered = utils.relation_get('clustered')
4229- if clustered and cluster.is_leader(swift.SWIFT_HA_RES):
4230- utils.juju_log('INFO',
4231- 'Cluster configured, notifying other services and'
4232- 'updating keystone endpoint configuration')
4233+ clustered = relation_get('clustered')
4234+ if clustered and cluster.is_leader(SWIFT_HA_RES):
4235+ log('Cluster configured, notifying other services and'
4236+ 'updating keystone endpoint configuration')
4237 # Tell all related services to start using
4238 # the VIP instead
4239- for r_id in utils.relation_ids('identity-service'):
4240+ for r_id in relation_ids('identity-service'):
4241 keystone_joined(relid=r_id)
4242
4243
4244+@hooks.hook('ha-relation-joined')
4245 def ha_relation_joined():
4246 # Obtain the config values necessary for the cluster config. These
4247 # include multicast port and interface to bind to.
4248- corosync_bindiface = utils.config_get('ha-bindiface')
4249- corosync_mcastport = utils.config_get('ha-mcastport')
4250- vip = utils.config_get('vip')
4251- vip_cidr = utils.config_get('vip_cidr')
4252- vip_iface = utils.config_get('vip_iface')
4253+ corosync_bindiface = config('ha-bindiface')
4254+ corosync_mcastport = config('ha-mcastport')
4255+ vip = config('vip')
4256+ vip_cidr = config('vip_cidr')
4257+ vip_iface = config('vip_iface')
4258 if not vip:
4259- utils.juju_log('ERROR',
4260- 'Unable to configure hacluster as vip not provided')
4261+ log('Unable to configure hacluster as vip not provided',
4262+ level=ERROR)
4263 sys.exit(1)
4264
4265 # Obtain resources
4266 resources = {
4267- 'res_swift_vip': 'ocf:heartbeat:IPaddr2',
4268- 'res_swift_haproxy': 'lsb:haproxy'
4269- }
4270+ 'res_swift_vip': 'ocf:heartbeat:IPaddr2',
4271+ 'res_swift_haproxy': 'lsb:haproxy'
4272+ }
4273 resource_params = {
4274- 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
4275- (vip, vip_cidr, vip_iface),
4276- 'res_swift_haproxy': 'op monitor interval="5s"'
4277- }
4278+ 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
4279+ (vip, vip_cidr, vip_iface),
4280+ 'res_swift_haproxy': 'op monitor interval="5s"'
4281+ }
4282 init_services = {
4283- 'res_swift_haproxy': 'haproxy'
4284- }
4285+ 'res_swift_haproxy': 'haproxy'
4286+ }
4287 clones = {
4288- 'cl_swift_haproxy': 'res_swift_haproxy'
4289- }
4290-
4291- utils.relation_set(init_services=init_services,
4292- corosync_bindiface=corosync_bindiface,
4293- corosync_mcastport=corosync_mcastport,
4294- resources=resources,
4295- resource_params=resource_params,
4296- clones=clones)
4297-
4298-
4299-hooks = {
4300- 'install': install,
4301- 'config-changed': config_changed,
4302- 'identity-service-relation-joined': keystone_joined,
4303- 'identity-service-relation-changed': keystone_changed,
4304- 'swift-storage-relation-changed': storage_changed,
4305- 'swift-storage-relation-broken': storage_broken,
4306- "cluster-relation-joined": cluster_changed,
4307- "cluster-relation-changed": cluster_changed,
4308- "ha-relation-joined": ha_relation_joined,
4309- "ha-relation-changed": ha_relation_changed
4310-}
4311-
4312-utils.do_hooks(hooks)
4313-
4314-sys.exit(0)
4315+ 'cl_swift_haproxy': 'res_swift_haproxy'
4316+ }
4317+
4318+ relation_set(init_services=init_services,
4319+ corosync_bindiface=corosync_bindiface,
4320+ corosync_mcastport=corosync_mcastport,
4321+ resources=resources,
4322+ resource_params=resource_params,
4323+ clones=clones)
4324+
4325+
4326+def configure_https():
4327+ '''
4328+ Enables SSL API Apache config if appropriate and kicks identity-service
4329+ with any required api updates.
4330+ '''
4331+ # need to write all to ensure changes to the entire request pipeline
4332+ # propagate (c-api, haprxy, apache)
4333+ CONFIGS.write_all()
4334+ if 'https' in CONFIGS.complete_contexts():
4335+ cmd = ['a2ensite', 'openstack_https_frontend']
4336+ subprocess.check_call(cmd)
4337+ else:
4338+ cmd = ['a2dissite', 'openstack_https_frontend']
4339+ subprocess.check_call(cmd)
4340+
4341+ # Apache 2.4 required enablement of configuration
4342+ if os.path.exists('/usr/sbin/a2enconf'):
4343+ subprocess.check_call(['a2enconf', 'swift-rings'])
4344+
4345+ for rid in relation_ids('identity-service'):
4346+ keystone_joined(relid=rid)
4347+
4348+ write_rc_script()
4349+
4350+
4351+def main():
4352+ try:
4353+ hooks.execute(sys.argv)
4354+ except UnregisteredHookError as e:
4355+ log('Unknown hook {} - skipping.'.format(e))
4356+
4357+
4358+if __name__ == '__main__':
4359+ main()
4360
4361=== modified file 'hooks/swift_utils.py'
4362--- hooks/swift_utils.py 2013-05-22 21:42:18 +0000
4363+++ hooks/swift_utils.py 2013-10-15 01:36:24 +0000
4364@@ -1,22 +1,36 @@
4365 import os
4366 import pwd
4367 import subprocess
4368-import lib.openstack_common as openstack
4369-import lib.utils as utils
4370-import lib.haproxy_utils as haproxy
4371-import lib.apache_utils as apache
4372-import lib.cluster_utils as cluster
4373+import charmhelpers.contrib.openstack.utils as openstack
4374 import sys
4375-from base64 import b64encode
4376+from collections import OrderedDict
4377+
4378+from charmhelpers.core.hookenv import (
4379+ log, ERROR,
4380+ config,
4381+ relation_get,
4382+)
4383+from charmhelpers.fetch import (
4384+ apt_update,
4385+ apt_install
4386+)
4387+
4388+import charmhelpers.contrib.openstack.context as context
4389+import charmhelpers.contrib.openstack.templating as templating
4390+import swift_context
4391
4392
4393 # Various config files that are managed via templating.
4394-SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
4395 SWIFT_CONF = '/etc/swift/swift.conf'
4396 SWIFT_PROXY_CONF = '/etc/swift/proxy-server.conf'
4397 SWIFT_CONF_DIR = os.path.dirname(SWIFT_CONF)
4398 MEMCACHED_CONF = '/etc/memcached.conf'
4399-APACHE_CONF = '/etc/apache2/conf.d/swift-rings'
4400+SWIFT_RINGS_CONF = '/etc/apache2/conf.d/swift-rings'
4401+SWIFT_RINGS_24_CONF = '/etc/apache2/conf-available/swift-rings.conf'
4402+HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
4403+APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
4404+APACHE_SITE_24_CONF = '/etc/apache2/sites-available/' \
4405+ 'openstack_https_frontend.conf'
4406
4407 WWW_DIR = '/var/www/swift-rings'
4408
4409@@ -37,47 +51,103 @@
4410 'apache2',
4411 'python-keystone',
4412 ]
4413+# > Folsom specific packages
4414+FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
4415
4416 SWIFT_HA_RES = 'res_swift_vip'
4417
4418-# Folsom-specific packages
4419-FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
4420-
4421-
4422-def proxy_control(action):
4423- '''utility to work around swift-init's bad RCs.'''
4424- def _cmd(action):
4425- return ['swift-init', 'proxy-server', action]
4426-
4427- p = subprocess.Popen(_cmd('status'), stdout=subprocess.PIPE)
4428- p.communicate()
4429- status = p.returncode
4430- if action == 'stop':
4431- if status == 1:
4432- return
4433- elif status == 0:
4434- return subprocess.check_call(_cmd('stop'))
4435-
4436- # the proxy will not start unless there are balanced rings
4437- # gzip'd in /etc/swift
4438- missing = False
4439- for k in SWIFT_RINGS.keys():
4440- if not os.path.exists(os.path.join(SWIFT_CONF_DIR, '%s.ring.gz' % k)):
4441- missing = True
4442- if missing:
4443- utils.juju_log('INFO', 'Rings not balanced, skipping %s.' % action)
4444- return
4445-
4446- if action == 'start':
4447- if status == 0:
4448- return
4449- elif status == 1:
4450- return subprocess.check_call(_cmd('start'))
4451- elif action == 'restart':
4452- if status == 0:
4453- return subprocess.check_call(_cmd('restart'))
4454- elif status == 1:
4455- return subprocess.check_call(_cmd('start'))
4456+TEMPLATES = 'templates/'
4457+
4458+# Map config files to hook contexts and services that will be associated
4459+# with file in restart_on_changes()'s service map.
4460+CONFIG_FILES = OrderedDict([
4461+ (SWIFT_CONF, {
4462+ 'hook_contexts': [swift_context.SwiftHashContext()],
4463+ 'services': ['swift-proxy'],
4464+ }),
4465+ (SWIFT_PROXY_CONF, {
4466+ 'hook_contexts': [swift_context.SwiftIdentityContext()],
4467+ 'services': ['swift-proxy'],
4468+ }),
4469+ (HAPROXY_CONF, {
4470+ 'hook_contexts': [context.HAProxyContext(),
4471+ swift_context.HAProxyContext()],
4472+ 'services': ['haproxy'],
4473+ }),
4474+ (SWIFT_RINGS_CONF, {
4475+ 'hook_contexts': [swift_context.SwiftRingContext()],
4476+ 'services': ['apache2'],
4477+ }),
4478+ (SWIFT_RINGS_24_CONF, {
4479+ 'hook_contexts': [swift_context.SwiftRingContext()],
4480+ 'services': ['apache2'],
4481+ }),
4482+ (APACHE_SITE_CONF, {
4483+ 'hook_contexts': [swift_context.ApacheSSLContext()],
4484+ 'services': ['apache2'],
4485+ }),
4486+ (APACHE_SITE_24_CONF, {
4487+ 'hook_contexts': [swift_context.ApacheSSLContext()],
4488+ 'services': ['apache2'],
4489+ }),
4490+ (MEMCACHED_CONF, {
4491+ 'hook_contexts': [swift_context.MemcachedContext()],
4492+ 'services': ['memcached'],
4493+ }),
4494+])
4495+
4496+
4497+def register_configs():
4498+ """
4499+ Register config files with their respective contexts.
4500+ Regstration of some configs may not be required depending on
4501+ existing of certain relations.
4502+ """
4503+ # if called without anything installed (eg during install hook)
4504+ # just default to earliest supported release. configs dont get touched
4505+ # till post-install, anyway.
4506+ release = openstack.get_os_codename_package('swift-proxy', fatal=False) \
4507+ or 'essex'
4508+ configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
4509+ openstack_release=release)
4510+
4511+ confs = [SWIFT_CONF,
4512+ SWIFT_PROXY_CONF,
4513+ HAPROXY_CONF,
4514+ MEMCACHED_CONF]
4515+
4516+ for conf in confs:
4517+ configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
4518+
4519+ if os.path.exists('/etc/apache2/conf-available'):
4520+ configs.register(SWIFT_RINGS_24_CONF,
4521+ CONFIG_FILES[SWIFT_RINGS_24_CONF]['hook_contexts'])
4522+ configs.register(APACHE_SITE_24_CONF,
4523+ CONFIG_FILES[APACHE_SITE_24_CONF]['hook_contexts'])
4524+ else:
4525+ configs.register(SWIFT_RINGS_CONF,
4526+ CONFIG_FILES[SWIFT_RINGS_CONF]['hook_contexts'])
4527+ configs.register(APACHE_SITE_CONF,
4528+ CONFIG_FILES[APACHE_SITE_CONF]['hook_contexts'])
4529+ return configs
4530+
4531+
4532+def restart_map():
4533+ '''
4534+ Determine the correct resource map to be passed to
4535+ charmhelpers.core.restart_on_change() based on the services configured.
4536+
4537+ :returns: dict: A dictionary mapping config file to lists of services
4538+ that should be restarted when file changes.
4539+ '''
4540+ _map = []
4541+ for f, ctxt in CONFIG_FILES.iteritems():
4542+ svcs = []
4543+ for svc in ctxt['services']:
4544+ svcs.append(svc)
4545+ if svcs:
4546+ _map.append((f, svcs))
4547+ return OrderedDict(_map)
4548
4549
4550 def swift_user(username='swift'):
4551@@ -100,120 +170,16 @@
4552 return FOLSOM_PACKAGES
4553 elif release == 'grizzly':
4554 return FOLSOM_PACKAGES
4555-
4556-
4557-def render_config(config_file, context):
4558- '''write out config using templates for a specific openstack release.'''
4559- os_release = openstack.get_os_codename_package('python-swift')
4560- # load os release-specific templates.
4561- cfile = os.path.basename(config_file)
4562- templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release)
4563- context['os_release'] = os_release
4564- return utils.render_template(cfile, context, templates_dir)
4565-
4566-
4567-def get_swift_hash():
4568- if os.path.isfile(SWIFT_HASH_FILE):
4569- with open(SWIFT_HASH_FILE, 'r') as hashfile:
4570- swift_hash = hashfile.read().strip()
4571- elif utils.config_get('swift-hash'):
4572- swift_hash = utils.config_get('swift-hash')
4573- with open(SWIFT_HASH_FILE, 'w') as hashfile:
4574- hashfile.write(swift_hash)
4575 else:
4576- cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n']
4577- rand = open('/dev/random', 'r')
4578- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand)
4579- swift_hash = p.communicate()[0].strip()
4580- with open(SWIFT_HASH_FILE, 'w') as hashfile:
4581- hashfile.write(swift_hash)
4582- return swift_hash
4583-
4584-
4585-def get_keystone_auth():
4586- '''return standard keystone auth credentials, either from config or the
4587- identity-service relation. user-specified config is given priority
4588- over an existing relation.
4589- '''
4590- auth_type = utils.config_get('auth-type')
4591- auth_host = utils.config_get('keystone-auth-host')
4592- admin_user = utils.config_get('keystone-admin-user')
4593- admin_password = utils.config_get('keystone-admin-user')
4594- if (auth_type == 'keystone' and auth_host
4595- and admin_user and admin_password):
4596- utils.juju_log('INFO', 'Using user-specified Keystone configuration.')
4597- ks_auth = {
4598- 'auth_type': 'keystone',
4599- 'auth_protocol': utils.config_get('keystone-auth-protocol'),
4600- 'keystone_host': auth_host,
4601- 'auth_port': utils.config_get('keystone-auth-port'),
4602- 'service_user': admin_user,
4603- 'service_password': admin_password,
4604- 'service_tenant': utils.config_get('keystone-admin-tenant-name')
4605- }
4606- return ks_auth
4607-
4608- for relid in utils.relation_ids('identity-service'):
4609- utils.juju_log('INFO',
4610- 'Using Keystone configuration from identity-service.')
4611- for unit in utils.relation_list(relid):
4612- ks_auth = {
4613- 'auth_type': 'keystone',
4614- 'auth_protocol': 'http',
4615- 'keystone_host': utils.relation_get('auth_host',
4616- unit, relid),
4617- 'auth_port': utils.relation_get('auth_port', unit, relid),
4618- 'service_user': utils.relation_get('service_username',
4619- unit, relid),
4620- 'service_password': utils.relation_get('service_password',
4621- unit, relid),
4622- 'service_tenant': utils.relation_get('service_tenant',
4623- unit, relid),
4624- 'service_port': utils.relation_get('service_port',
4625- unit, relid),
4626- 'admin_token': utils.relation_get('admin_token',
4627- unit, relid),
4628- }
4629- if None not in ks_auth.itervalues():
4630- return ks_auth
4631- return None
4632-
4633-
4634-def write_proxy_config():
4635-
4636- bind_port = utils.config_get('bind-port')
4637- workers = utils.config_get('workers')
4638- if workers == '0':
4639- import multiprocessing
4640- workers = multiprocessing.cpu_count()
4641-
4642+ return FOLSOM_PACKAGES
4643+
4644+
4645+def write_rc_script():
4646 env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
4647- 'OPENSTACK_PORT_API': bind_port,
4648+ 'OPENSTACK_PORT_API': config('bind-port'),
4649 'OPENSTACK_PORT_MEMCACHED': 11211}
4650 openstack.save_script_rc(**env_vars)
4651
4652- ctxt = {
4653- 'proxy_ip': utils.get_host_ip(),
4654- 'bind_port': cluster.determine_api_port(bind_port),
4655- 'workers': workers,
4656- 'operator_roles': utils.config_get('operator-roles'),
4657- 'delay_auth_decision': utils.config_get('delay-auth-decision')
4658- }
4659-
4660- ctxt['ssl'] = False
4661-
4662- ks_auth = get_keystone_auth()
4663- if ks_auth:
4664- utils.juju_log('INFO', 'Enabling Keystone authentication.')
4665- for k, v in ks_auth.iteritems():
4666- ctxt[k] = v
4667-
4668- with open(SWIFT_PROXY_CONF, 'w') as conf:
4669- conf.write(render_config(SWIFT_PROXY_CONF, ctxt))
4670-
4671- proxy_control('restart')
4672- subprocess.check_call(['open-port', str(bind_port)])
4673-
4674
4675 def _load_builder(path):
4676 # lifted straight from /usr/bin/swift-ring-builder
4677@@ -263,7 +229,7 @@
4678 if sorted(d) == sorted(n):
4679
4680 msg = 'Node already exists in ring (%s).' % ring_path
4681- utils.juju_log('INFO', msg)
4682+ log(msg)
4683 return True
4684
4685 return False
4686@@ -290,9 +256,9 @@
4687 ring.add_dev(new_dev)
4688 _write_ring(ring, ring_path)
4689 msg = 'Added new device to ring %s: %s' %\
4690- (ring_path,
4691- [k for k in new_dev.iteritems()])
4692- utils.juju_log('INFO', msg)
4693+ (ring_path,
4694+ [k for k in new_dev.iteritems()])
4695+ log(msg)
4696
4697
4698 def _get_zone(ring_builder):
4699@@ -328,7 +294,7 @@
4700 being assigned to a different zone.
4701 '''
4702 if assignment_policy == 'manual':
4703- return utils.relation_get('zone')
4704+ return relation_get('zone')
4705 elif assignment_policy == 'auto':
4706 potential_zones = []
4707 for ring in SWIFT_RINGS.itervalues():
4708@@ -336,8 +302,8 @@
4709 potential_zones.append(_get_zone(builder))
4710 return set(potential_zones).pop()
4711 else:
4712- utils.juju_log('ERROR', 'Invalid zone assignment policy: %s' %\
4713- assignment_policy)
4714+ log('Invalid zone assignment policy: %s' % assignment_policy,
4715+ level=ERROR)
4716 sys.exit(1)
4717
4718
4719@@ -355,7 +321,7 @@
4720 # swift-ring-builder returns 1 on WARNING (ring didn't require balance)
4721 return False
4722 else:
4723- utils.juju_log('ERROR', 'balance_ring: %s returned %s' % (cmd, rc))
4724+ log('balance_ring: %s returned %s' % (cmd, rc), level=ERROR)
4725 sys.exit(1)
4726
4727
4728@@ -373,88 +339,9 @@
4729 return do_rebalance
4730
4731
4732-def write_apache_config():
4733- '''write out /etc/apache2/conf.d/swift-rings with a list of authenticated
4734- hosts'''
4735- utils.juju_log('INFO', 'Updating %s.' % APACHE_CONF)
4736-
4737- allowed_hosts = []
4738- for relid in utils.relation_ids('swift-storage'):
4739- for unit in utils.relation_list(relid):
4740- host = utils.relation_get('private-address', unit, relid)
4741- allowed_hosts.append(utils.get_host_ip(host))
4742-
4743- ctxt = {
4744- 'www_dir': WWW_DIR,
4745- 'allowed_hosts': allowed_hosts
4746- }
4747- with open(APACHE_CONF, 'w') as conf:
4748- conf.write(render_config(APACHE_CONF, ctxt))
4749- utils.reload('apache2')
4750-
4751-
4752-def generate_cert():
4753- '''
4754- Generates a self signed certificate and key using the
4755- provided charm configuration data.
4756-
4757- returns: tuple of (cert, key)
4758- '''
4759- CERT = '/etc/swift/ssl.cert'
4760- KEY = '/etc/swift/ssl.key'
4761- if (not os.path.exists(CERT) and
4762- not os.path.exists(KEY)):
4763- subj = '/C=%s/ST=%s/L=%s/CN=%s' %\
4764- (utils.config_get('country'), utils.config_get('state'),
4765- utils.config_get('locale'), utils.config_get('common-name'))
4766- cmd = ['openssl', 'req', '-new', '-x509', '-nodes',
4767- '-out', CERT, '-keyout', KEY,
4768- '-subj', subj]
4769- subprocess.check_call(cmd)
4770- os.chmod(KEY, 0600)
4771- # Slurp as base64 encoded - makes handling easier up the stack
4772- with open(CERT, 'r') as cfile:
4773- ssl_cert = b64encode(cfile.read())
4774- with open(KEY, 'r') as kfile:
4775- ssl_key = b64encode(kfile.read())
4776- return (ssl_cert, ssl_key)
4777-
4778-
4779-def configure_haproxy():
4780- api_port = utils.config_get('bind-port')
4781- service_ports = {
4782- "swift": [
4783- cluster.determine_haproxy_port(api_port),
4784- cluster.determine_api_port(api_port)
4785- ]
4786- }
4787- write_proxy_config()
4788- haproxy.configure_haproxy(service_ports)
4789-
4790-
4791-def configure_https():
4792- if cluster.https():
4793- api_port = utils.config_get('bind-port')
4794- if (len(cluster.peer_units()) > 0 or
4795- cluster.is_clustered()):
4796- target_port = cluster.determine_haproxy_port(api_port)
4797- configure_haproxy()
4798- else:
4799- target_port = cluster.determine_api_port(api_port)
4800- write_proxy_config()
4801- cert, key = apache.get_cert()
4802- if None in (cert, key):
4803- cert, key = generate_cert()
4804- ca_cert = apache.get_ca_cert()
4805- apache.setup_https(namespace="swift",
4806- port_maps={api_port: target_port},
4807- cert=cert, key=key, ca_cert=ca_cert)
4808-
4809-
4810 def do_openstack_upgrade(source, packages):
4811 openstack.configure_installation_source(source)
4812- os.environ['DEBIAN_FRONTEND'] = 'noninteractive'
4813- subprocess.check_call(['apt-get', 'update'])
4814- cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confnew', '-y',
4815- 'install'] + packages
4816- subprocess.check_call(cmd)
4817+ apt_update(fatal=True)
4818+ apt_install(options=['--option', 'Dpkg::Options::=--force-confnew'],
4819+ packages=packages,
4820+ fatal=True)
4821
4822=== modified file 'revision'
4823--- revision 2013-07-19 19:35:01 +0000
4824+++ revision 2013-10-15 01:36:24 +0000
4825@@ -1,1 +1,1 @@
4826-133
4827+146
4828
4829=== removed file 'templates/apache2_site.tmpl'
4830--- templates/apache2_site.tmpl 2013-03-07 21:55:52 +0000
4831+++ templates/apache2_site.tmpl 1970-01-01 00:00:00 +0000
4832@@ -1,19 +0,0 @@
4833-Listen {{ ext }}
4834-NameVirtualHost *:{{ ext }}
4835-<VirtualHost *:{{ ext }}>
4836- ServerName {{ private_address }}
4837- SSLEngine on
4838- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
4839- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
4840- ProxyPass / http://localhost:{{ int }}/
4841- ProxyPassReverse / http://localhost:{{ int }}/
4842- ProxyPreserveHost on
4843-</VirtualHost>
4844-<Proxy *>
4845- Order deny,allow
4846- Allow from all
4847-</Proxy>
4848-<Location />
4849- Order allow,deny
4850- Allow from all
4851-</Location>
4852
4853=== removed directory 'templates/folsom'
4854=== removed symlink 'templates/folsom/memcached.conf'
4855=== target was u'../essex/memcached.conf'
4856=== removed symlink 'templates/folsom/proxy-server.conf'
4857=== target was u'../essex/proxy-server.conf'
4858=== removed symlink 'templates/folsom/swift-rings'
4859=== target was u'../essex/swift-rings'
4860=== removed symlink 'templates/folsom/swift.conf'
4861=== target was u'../essex/swift.conf'
4862=== removed symlink 'templates/grizzly/memcached.conf'
4863=== target was u'../essex/memcached.conf'
4864=== modified file 'templates/grizzly/proxy-server.conf'
4865--- templates/grizzly/proxy-server.conf 2013-04-15 20:20:50 +0000
4866+++ templates/grizzly/proxy-server.conf 2013-10-15 01:36:24 +0000
4867@@ -48,7 +48,6 @@
4868 delay_auth_decision = {{ delay_auth_decision|lower }}
4869 {% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %}
4870
4871-
4872 [filter:s3token]
4873 paste.filter_factory = keystone.middleware.s3_token:filter_factory
4874 service_host = {{ keystone_host }}
4875
4876=== removed symlink 'templates/grizzly/swift-rings'
4877=== target was u'../essex/swift-rings'
4878=== removed symlink 'templates/grizzly/swift.conf'
4879=== target was u'../essex/swift.conf'
4880=== removed file 'templates/haproxy.cfg'
4881--- templates/haproxy.cfg 2013-03-04 17:00:47 +0000
4882+++ templates/haproxy.cfg 1970-01-01 00:00:00 +0000
4883@@ -1,35 +0,0 @@
4884-global
4885- log 127.0.0.1 local0
4886- log 127.0.0.1 local1 notice
4887- maxconn 20000
4888- user haproxy
4889- group haproxy
4890- spread-checks 0
4891-
4892-defaults
4893- log global
4894- mode http
4895- option httplog
4896- option dontlognull
4897- retries 3
4898- timeout queue 1000
4899- timeout connect 1000
4900- timeout client 30000
4901- timeout server 30000
4902-
4903-listen stats :8888
4904- mode http
4905- stats enable
4906- stats hide-version
4907- stats realm Haproxy\ Statistics
4908- stats uri /
4909- stats auth admin:password
4910-
4911-{% for service, ports in service_ports.iteritems() -%}
4912-listen {{ service }} 0.0.0.0:{{ ports[0] }}
4913- balance roundrobin
4914- option tcplog
4915- {% for unit, address in units.iteritems() -%}
4916- server {{ unit }} {{ address }}:{{ ports[1] }} check
4917- {% endfor %}
4918-{% endfor %}
4919
4920=== added directory 'templates/havana'
4921=== added file 'templates/havana/proxy-server.conf'
4922--- templates/havana/proxy-server.conf 1970-01-01 00:00:00 +0000
4923+++ templates/havana/proxy-server.conf 2013-10-15 01:36:24 +0000
4924@@ -0,0 +1,65 @@
4925+[DEFAULT]
4926+bind_port = {{ bind_port }}
4927+workers = {{ workers }}
4928+user = swift
4929+{% if ssl %}
4930+cert_file = {{ ssl_cert }}
4931+key_file = {{ ssl_key }}
4932+{% endif %}
4933+
4934+{% if auth_type == 'keystone' %}
4935+[pipeline:main]
4936+pipeline = healthcheck cache swift3 authtoken keystoneauth proxy-server
4937+{% else %}
4938+[pipeline:main]
4939+pipeline = healthcheck cache tempauth proxy-server
4940+{% endif %}
4941+
4942+[app:proxy-server]
4943+use = egg:swift#proxy
4944+allow_account_management = true
4945+{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
4946+
4947+[filter:tempauth]
4948+use = egg:swift#tempauth
4949+user_system_root = testpass .admin https://{{ proxy_ip }}:8080/v1/AUTH_system
4950+
4951+[filter:healthcheck]
4952+use = egg:swift#healthcheck
4953+
4954+[filter:cache]
4955+use = egg:swift#memcache
4956+memcache_servers = {{ proxy_ip }}:11211
4957+
4958+{% if auth_type == 'keystone' %}
4959+[filter:keystoneauth]
4960+use = egg:swift#keystoneauth
4961+operator_roles = {{ operator_roles }}
4962+
4963+[filter:authtoken]
4964+paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
4965+auth_host = {{ keystone_host }}
4966+auth_port = {{ auth_port }}
4967+auth_protocol = {{ auth_protocol }}
4968+auth_uri = {{ auth_protocol }}://{{ keystone_host }}:{{ service_port }}
4969+admin_tenant_name = {{ service_tenant }}
4970+admin_user = {{ service_user }}
4971+admin_password = {{ service_password }}
4972+delay_auth_decision = {{ delay_auth_decision|lower }}
4973+{% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %}
4974+cache = swift.cache
4975+
4976+[filter:s3token]
4977+paste.filter_factory = keystone.middleware.s3_token:filter_factory
4978+service_host = {{ keystone_host }}
4979+service_port = {{ service_port }}
4980+auth_port = {{ auth_port }}
4981+auth_host = {{ keystone_host }}
4982+auth_protocol = {{ auth_protocol }}
4983+auth_token = {{ admin_token }}
4984+admin_token = {{ admin_token }}
4985+
4986+[filter:swift3]
4987+{% if os_release == 'essex' %}use = egg:swift#swift3{% else %}use = egg:swift3#swift3
4988+{% endif %}
4989+{% endif %}
4990
4991=== renamed file 'templates/essex/memcached.conf' => 'templates/memcached.conf'
4992=== renamed file 'templates/essex/swift-rings' => 'templates/swift-rings'
4993=== added symlink 'templates/swift-rings.conf'
4994=== target is u'swift-rings'
4995=== renamed file 'templates/essex/swift.conf' => 'templates/swift.conf'
4996=== renamed directory 'tests' => 'unit_tests'

Subscribers

People subscribed via source and target branches