Merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha into lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next

Proposed by Liam Young
Status: Merged
Merged at revision: 32
Proposed branch: lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next
Diff against target: 5611 lines (+4608/-116)
46 files modified
charm-helpers-hooks.yaml (+8/-1)
config.yaml (+20/-0)
files/ports.conf (+11/-0)
hooks/ceph_radosgw_context.py (+29/-0)
hooks/charmhelpers/__init__.py (+22/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+248/-0)
hooks/charmhelpers/contrib/network/ip.py (+351/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+92/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+278/-0)
hooks/charmhelpers/contrib/openstack/context.py (+1038/-0)
hooks/charmhelpers/contrib/openstack/ip.py (+93/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+223/-0)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+2/-0)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+15/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+58/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+279/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+625/-0)
hooks/charmhelpers/contrib/python/packages.py (+77/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+428/-0)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+89/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/decorators.py (+41/-0)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+36/-16)
hooks/charmhelpers/core/host.py (+52/-24)
hooks/charmhelpers/core/services/__init__.py (+2/-2)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/templating.py (+3/-2)
hooks/charmhelpers/fetch/__init__.py (+22/-13)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+12/-5)
hooks/hooks.py (+118/-8)
hooks/utils.py (+36/-2)
metadata.yaml (+6/-0)
templates/ceph.conf (+1/-1)
templates/rgw (+1/-1)
tests/charmhelpers/__init__.py (+22/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+3/-3)
tests/charmhelpers/contrib/amulet/utils.py (+6/-4)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+243263@code.launchpad.net

Description of the change

Add HA support

To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2014-09-27 02:57:08 +0000
3+++ charm-helpers-hooks.yaml 2015-01-15 16:18:44 +0000
4@@ -5,5 +5,12 @@
5 - fetch
6 - contrib.storage.linux:
7 - utils
8+ - contrib.hahelpers:
9+ - apache
10+ - cluster
11 - payload.execd
12- - contrib.openstack.alternatives
13+ - contrib.openstack|inc=*
14+ - contrib.network.ip
15+ - contrib.openstack.ip
16+ - contrib.storage.linux
17+ - contrib.python.packages
18
19=== modified file 'config.yaml'
20--- config.yaml 2015-01-14 09:10:04 +0000
21+++ config.yaml 2015-01-15 16:18:44 +0000
22@@ -67,3 +67,23 @@
23 .
24 Enable this option to disable use of Apache and enable the embedded
25 web container feature.
26+ vip:
27+ type: string
28+ default:
29+ description: |
30+ Virtual IP(s) to use to front API services in HA configuration.
31+ .
32+ If multiple networks are being used, a VIP should be provided for each
33+ network, separated by spaces.
34+ ha-bindiface:
35+ type: string
36+ default: eth0
37+ description: |
38+ Default network interface on which HA cluster will bind to communication
39+ with the other members of the HA Cluster.
40+ ha-mcastport:
41+ type: int
42+ default: 5414
43+ description: |
44+ Default multicast port number that will be used to communicate between
45+ HA Cluster nodes.
46
47=== added file 'files/ports.conf'
48--- files/ports.conf 1970-01-01 00:00:00 +0000
49+++ files/ports.conf 2015-01-15 16:18:44 +0000
50@@ -0,0 +1,11 @@
51+Listen 70
52+
53+<IfModule ssl_module>
54+ Listen 443
55+</IfModule>
56+
57+<IfModule mod_gnutls.c>
58+ Listen 443
59+</IfModule>
60+
61+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
62
63=== added file 'hooks/ceph_radosgw_context.py'
64--- hooks/ceph_radosgw_context.py 1970-01-01 00:00:00 +0000
65+++ hooks/ceph_radosgw_context.py 2015-01-15 16:18:44 +0000
66@@ -0,0 +1,29 @@
67+from charmhelpers.contrib.openstack import context
68+from charmhelpers.contrib.hahelpers.cluster import (
69+ determine_api_port,
70+ determine_apache_port,
71+)
72+
73+
74+class HAProxyContext(context.HAProxyContext):
75+
76+ def __call__(self):
77+ ctxt = super(HAProxyContext, self).__call__()
78+
79+ # Apache ports
80+ a_cephradosgw_api = determine_apache_port(80,
81+ singlenode_mode=True)
82+
83+ port_mapping = {
84+ 'cephradosgw-server': [
85+ 80, a_cephradosgw_api]
86+ }
87+
88+ ctxt['cephradosgw_bind_port'] = determine_api_port(
89+ 80,
90+ singlenode_mode=True,
91+ )
92+
93+ # for haproxy.conf
94+ ctxt['service_ports'] = port_mapping
95+ return ctxt
96
97=== modified file 'hooks/charmhelpers/__init__.py'
98--- hooks/charmhelpers/__init__.py 2014-01-24 16:02:57 +0000
99+++ hooks/charmhelpers/__init__.py 2015-01-15 16:18:44 +0000
100@@ -0,0 +1,22 @@
101+# Bootstrap charm-helpers, installing its dependencies if necessary using
102+# only standard libraries.
103+import subprocess
104+import sys
105+
106+try:
107+ import six # flake8: noqa
108+except ImportError:
109+ if sys.version_info.major == 2:
110+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
111+ else:
112+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
113+ import six # flake8: noqa
114+
115+try:
116+ import yaml # flake8: noqa
117+except ImportError:
118+ if sys.version_info.major == 2:
119+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
120+ else:
121+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
122+ import yaml # flake8: noqa
123
124=== added directory 'hooks/charmhelpers/contrib/hahelpers'
125=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
126=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
127--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
128+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2015-01-15 16:18:44 +0000
129@@ -0,0 +1,66 @@
130+#
131+# Copyright 2012 Canonical Ltd.
132+#
133+# This file is sourced from lp:openstack-charm-helpers
134+#
135+# Authors:
136+# James Page <james.page@ubuntu.com>
137+# Adam Gandelman <adamg@ubuntu.com>
138+#
139+
140+import subprocess
141+
142+from charmhelpers.core.hookenv import (
143+ config as config_get,
144+ relation_get,
145+ relation_ids,
146+ related_units as relation_list,
147+ log,
148+ INFO,
149+)
150+
151+
152+def get_cert(cn=None):
153+ # TODO: deal with multiple https endpoints via charm config
154+ cert = config_get('ssl_cert')
155+ key = config_get('ssl_key')
156+ if not (cert and key):
157+ log("Inspecting identity-service relations for SSL certificate.",
158+ level=INFO)
159+ cert = key = None
160+ if cn:
161+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
162+ ssl_key_attr = 'ssl_key_{}'.format(cn)
163+ else:
164+ ssl_cert_attr = 'ssl_cert'
165+ ssl_key_attr = 'ssl_key'
166+ for r_id in relation_ids('identity-service'):
167+ for unit in relation_list(r_id):
168+ if not cert:
169+ cert = relation_get(ssl_cert_attr,
170+ rid=r_id, unit=unit)
171+ if not key:
172+ key = relation_get(ssl_key_attr,
173+ rid=r_id, unit=unit)
174+ return (cert, key)
175+
176+
177+def get_ca_cert():
178+ ca_cert = config_get('ssl_ca')
179+ if ca_cert is None:
180+ log("Inspecting identity-service relations for CA SSL certificate.",
181+ level=INFO)
182+ for r_id in relation_ids('identity-service'):
183+ for unit in relation_list(r_id):
184+ if ca_cert is None:
185+ ca_cert = relation_get('ca_cert',
186+ rid=r_id, unit=unit)
187+ return ca_cert
188+
189+
190+def install_ca_cert(ca_cert):
191+ if ca_cert:
192+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
193+ 'w') as crt:
194+ crt.write(ca_cert)
195+ subprocess.check_call(['update-ca-certificates', '--fresh'])
196
197=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
198--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
199+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-01-15 16:18:44 +0000
200@@ -0,0 +1,248 @@
201+#
202+# Copyright 2012 Canonical Ltd.
203+#
204+# Authors:
205+# James Page <james.page@ubuntu.com>
206+# Adam Gandelman <adamg@ubuntu.com>
207+#
208+
209+"""
210+Helpers for clustering and determining "cluster leadership" and other
211+clustering-related helpers.
212+"""
213+
214+import subprocess
215+import os
216+
217+from socket import gethostname as get_unit_hostname
218+
219+import six
220+
221+from charmhelpers.core.hookenv import (
222+ log,
223+ relation_ids,
224+ related_units as relation_list,
225+ relation_get,
226+ config as config_get,
227+ INFO,
228+ ERROR,
229+ WARNING,
230+ unit_get,
231+)
232+from charmhelpers.core.decorators import (
233+ retry_on_exception,
234+)
235+
236+
237+class HAIncompleteConfig(Exception):
238+ pass
239+
240+
241+class CRMResourceNotFound(Exception):
242+ pass
243+
244+
245+def is_elected_leader(resource):
246+ """
247+ Returns True if the charm executing this is the elected cluster leader.
248+
249+ It relies on two mechanisms to determine leadership:
250+ 1. If the charm is part of a corosync cluster, call corosync to
251+ determine leadership.
252+ 2. If the charm is not part of a corosync cluster, the leader is
253+ determined as being "the alive unit with the lowest unit numer". In
254+ other words, the oldest surviving unit.
255+ """
256+ if is_clustered():
257+ if not is_crm_leader(resource):
258+ log('Deferring action to CRM leader.', level=INFO)
259+ return False
260+ else:
261+ peers = peer_units()
262+ if peers and not oldest_peer(peers):
263+ log('Deferring action to oldest service unit.', level=INFO)
264+ return False
265+ return True
266+
267+
268+def is_clustered():
269+ for r_id in (relation_ids('ha') or []):
270+ for unit in (relation_list(r_id) or []):
271+ clustered = relation_get('clustered',
272+ rid=r_id,
273+ unit=unit)
274+ if clustered:
275+ return True
276+ return False
277+
278+
279+@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
280+def is_crm_leader(resource, retry=False):
281+ """
282+ Returns True if the charm calling this is the elected corosync leader,
283+ as returned by calling the external "crm" command.
284+
285+ We allow this operation to be retried to avoid the possibility of getting a
286+ false negative. See LP #1396246 for more info.
287+ """
288+ cmd = ['crm', 'resource', 'show', resource]
289+ try:
290+ status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
291+ if not isinstance(status, six.text_type):
292+ status = six.text_type(status, "utf-8")
293+ except subprocess.CalledProcessError:
294+ status = None
295+
296+ if status and get_unit_hostname() in status:
297+ return True
298+
299+ if status and "resource %s is NOT running" % (resource) in status:
300+ raise CRMResourceNotFound("CRM resource %s not found" % (resource))
301+
302+ return False
303+
304+
305+def is_leader(resource):
306+ log("is_leader is deprecated. Please consider using is_crm_leader "
307+ "instead.", level=WARNING)
308+ return is_crm_leader(resource)
309+
310+
311+def peer_units(peer_relation="cluster"):
312+ peers = []
313+ for r_id in (relation_ids(peer_relation) or []):
314+ for unit in (relation_list(r_id) or []):
315+ peers.append(unit)
316+ return peers
317+
318+
319+def peer_ips(peer_relation='cluster', addr_key='private-address'):
320+ '''Return a dict of peers and their private-address'''
321+ peers = {}
322+ for r_id in relation_ids(peer_relation):
323+ for unit in relation_list(r_id):
324+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
325+ return peers
326+
327+
328+def oldest_peer(peers):
329+ """Determines who the oldest peer is by comparing unit numbers."""
330+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
331+ for peer in peers:
332+ remote_unit_no = int(peer.split('/')[1])
333+ if remote_unit_no < local_unit_no:
334+ return False
335+ return True
336+
337+
338+def eligible_leader(resource):
339+ log("eligible_leader is deprecated. Please consider using "
340+ "is_elected_leader instead.", level=WARNING)
341+ return is_elected_leader(resource)
342+
343+
344+def https():
345+ '''
346+ Determines whether enough data has been provided in configuration
347+ or relation data to configure HTTPS
348+ .
349+ returns: boolean
350+ '''
351+ if config_get('use-https') == "yes":
352+ return True
353+ if config_get('ssl_cert') and config_get('ssl_key'):
354+ return True
355+ for r_id in relation_ids('identity-service'):
356+ for unit in relation_list(r_id):
357+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
358+ rel_state = [
359+ relation_get('https_keystone', rid=r_id, unit=unit),
360+ relation_get('ca_cert', rid=r_id, unit=unit),
361+ ]
362+ # NOTE: works around (LP: #1203241)
363+ if (None not in rel_state) and ('' not in rel_state):
364+ return True
365+ return False
366+
367+
368+def determine_api_port(public_port, singlenode_mode=False):
369+ '''
370+ Determine correct API server listening port based on
371+ existence of HTTPS reverse proxy and/or haproxy.
372+
373+ public_port: int: standard public port for given service
374+
375+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
376+
377+ returns: int: the correct listening port for the API service
378+ '''
379+ i = 0
380+ if singlenode_mode:
381+ i += 1
382+ elif len(peer_units()) > 0 or is_clustered():
383+ i += 1
384+ if https():
385+ i += 1
386+ return public_port - (i * 10)
387+
388+
389+def determine_apache_port(public_port, singlenode_mode=False):
390+ '''
391+ Description: Determine correct apache listening port based on public IP +
392+ state of the cluster.
393+
394+ public_port: int: standard public port for given service
395+
396+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
397+
398+ returns: int: the correct listening port for the HAProxy service
399+ '''
400+ i = 0
401+ if singlenode_mode:
402+ i += 1
403+ elif len(peer_units()) > 0 or is_clustered():
404+ i += 1
405+ return public_port - (i * 10)
406+
407+
408+def get_hacluster_config():
409+ '''
410+ Obtains all relevant configuration from charm configuration required
411+ for initiating a relation to hacluster:
412+
413+ ha-bindiface, ha-mcastport, vip
414+
415+ returns: dict: A dict containing settings keyed by setting name.
416+ raises: HAIncompleteConfig if settings are missing.
417+ '''
418+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
419+ conf = {}
420+ for setting in settings:
421+ conf[setting] = config_get(setting)
422+ missing = []
423+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
424+ if missing:
425+ log('Insufficient config data to configure hacluster.', level=ERROR)
426+ raise HAIncompleteConfig
427+ return conf
428+
429+
430+def canonical_url(configs, vip_setting='vip'):
431+ '''
432+ Returns the correct HTTP URL to this host given the state of HTTPS
433+ configuration and hacluster.
434+
435+ :configs : OSTemplateRenderer: A config tempating object to inspect for
436+ a complete https context.
437+
438+ :vip_setting: str: Setting in charm config that specifies
439+ VIP address.
440+ '''
441+ scheme = 'http'
442+ if 'https' in configs.complete_contexts():
443+ scheme = 'https'
444+ if is_clustered():
445+ addr = config_get(vip_setting)
446+ else:
447+ addr = unit_get('private-address')
448+ return '%s://%s' % (scheme, addr)
449
450=== added directory 'hooks/charmhelpers/contrib/network'
451=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
452=== added file 'hooks/charmhelpers/contrib/network/ip.py'
453--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
454+++ hooks/charmhelpers/contrib/network/ip.py 2015-01-15 16:18:44 +0000
455@@ -0,0 +1,351 @@
456+import glob
457+import re
458+import subprocess
459+
460+from functools import partial
461+
462+from charmhelpers.core.hookenv import unit_get
463+from charmhelpers.fetch import apt_install
464+from charmhelpers.core.hookenv import (
465+ log
466+)
467+
468+try:
469+ import netifaces
470+except ImportError:
471+ apt_install('python-netifaces')
472+ import netifaces
473+
474+try:
475+ import netaddr
476+except ImportError:
477+ apt_install('python-netaddr')
478+ import netaddr
479+
480+
481+def _validate_cidr(network):
482+ try:
483+ netaddr.IPNetwork(network)
484+ except (netaddr.core.AddrFormatError, ValueError):
485+ raise ValueError("Network (%s) is not in CIDR presentation format" %
486+ network)
487+
488+
489+def no_ip_found_error_out(network):
490+ errmsg = ("No IP address found in network: %s" % network)
491+ raise ValueError(errmsg)
492+
493+
494+def get_address_in_network(network, fallback=None, fatal=False):
495+ """Get an IPv4 or IPv6 address within the network from the host.
496+
497+ :param network (str): CIDR presentation format. For example,
498+ '192.168.1.0/24'.
499+ :param fallback (str): If no address is found, return fallback.
500+ :param fatal (boolean): If no address is found, fallback is not
501+ set and fatal is True then exit(1).
502+ """
503+ if network is None:
504+ if fallback is not None:
505+ return fallback
506+
507+ if fatal:
508+ no_ip_found_error_out(network)
509+ else:
510+ return None
511+
512+ _validate_cidr(network)
513+ network = netaddr.IPNetwork(network)
514+ for iface in netifaces.interfaces():
515+ addresses = netifaces.ifaddresses(iface)
516+ if network.version == 4 and netifaces.AF_INET in addresses:
517+ addr = addresses[netifaces.AF_INET][0]['addr']
518+ netmask = addresses[netifaces.AF_INET][0]['netmask']
519+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
520+ if cidr in network:
521+ return str(cidr.ip)
522+
523+ if network.version == 6 and netifaces.AF_INET6 in addresses:
524+ for addr in addresses[netifaces.AF_INET6]:
525+ if not addr['addr'].startswith('fe80'):
526+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
527+ addr['netmask']))
528+ if cidr in network:
529+ return str(cidr.ip)
530+
531+ if fallback is not None:
532+ return fallback
533+
534+ if fatal:
535+ no_ip_found_error_out(network)
536+
537+ return None
538+
539+
540+def is_ipv6(address):
541+ """Determine whether provided address is IPv6 or not."""
542+ try:
543+ address = netaddr.IPAddress(address)
544+ except netaddr.AddrFormatError:
545+ # probably a hostname - so not an address at all!
546+ return False
547+
548+ return address.version == 6
549+
550+
551+def is_address_in_network(network, address):
552+ """
553+ Determine whether the provided address is within a network range.
554+
555+ :param network (str): CIDR presentation format. For example,
556+ '192.168.1.0/24'.
557+ :param address: An individual IPv4 or IPv6 address without a net
558+ mask or subnet prefix. For example, '192.168.1.1'.
559+ :returns boolean: Flag indicating whether address is in network.
560+ """
561+ try:
562+ network = netaddr.IPNetwork(network)
563+ except (netaddr.core.AddrFormatError, ValueError):
564+ raise ValueError("Network (%s) is not in CIDR presentation format" %
565+ network)
566+
567+ try:
568+ address = netaddr.IPAddress(address)
569+ except (netaddr.core.AddrFormatError, ValueError):
570+ raise ValueError("Address (%s) is not in correct presentation format" %
571+ address)
572+
573+ if address in network:
574+ return True
575+ else:
576+ return False
577+
578+
579+def _get_for_address(address, key):
580+ """Retrieve an attribute of or the physical interface that
581+ the IP address provided could be bound to.
582+
583+ :param address (str): An individual IPv4 or IPv6 address without a net
584+ mask or subnet prefix. For example, '192.168.1.1'.
585+ :param key: 'iface' for the physical interface name or an attribute
586+ of the configured interface, for example 'netmask'.
587+ :returns str: Requested attribute or None if address is not bindable.
588+ """
589+ address = netaddr.IPAddress(address)
590+ for iface in netifaces.interfaces():
591+ addresses = netifaces.ifaddresses(iface)
592+ if address.version == 4 and netifaces.AF_INET in addresses:
593+ addr = addresses[netifaces.AF_INET][0]['addr']
594+ netmask = addresses[netifaces.AF_INET][0]['netmask']
595+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
596+ cidr = network.cidr
597+ if address in cidr:
598+ if key == 'iface':
599+ return iface
600+ else:
601+ return addresses[netifaces.AF_INET][0][key]
602+
603+ if address.version == 6 and netifaces.AF_INET6 in addresses:
604+ for addr in addresses[netifaces.AF_INET6]:
605+ if not addr['addr'].startswith('fe80'):
606+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
607+ addr['netmask']))
608+ cidr = network.cidr
609+ if address in cidr:
610+ if key == 'iface':
611+ return iface
612+ elif key == 'netmask' and cidr:
613+ return str(cidr).split('/')[1]
614+ else:
615+ return addr[key]
616+
617+ return None
618+
619+
620+get_iface_for_address = partial(_get_for_address, key='iface')
621+
622+
623+get_netmask_for_address = partial(_get_for_address, key='netmask')
624+
625+
626+def format_ipv6_addr(address):
627+ """If address is IPv6, wrap it in '[]' otherwise return None.
628+
629+ This is required by most configuration files when specifying IPv6
630+ addresses.
631+ """
632+ if is_ipv6(address):
633+ return "[%s]" % address
634+
635+ return None
636+
637+
638+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
639+ fatal=True, exc_list=None):
640+ """Return the assigned IP address for a given interface, if any."""
641+ # Extract nic if passed /dev/ethX
642+ if '/' in iface:
643+ iface = iface.split('/')[-1]
644+
645+ if not exc_list:
646+ exc_list = []
647+
648+ try:
649+ inet_num = getattr(netifaces, inet_type)
650+ except AttributeError:
651+ raise Exception("Unknown inet type '%s'" % str(inet_type))
652+
653+ interfaces = netifaces.interfaces()
654+ if inc_aliases:
655+ ifaces = []
656+ for _iface in interfaces:
657+ if iface == _iface or _iface.split(':')[0] == iface:
658+ ifaces.append(_iface)
659+
660+ if fatal and not ifaces:
661+ raise Exception("Invalid interface '%s'" % iface)
662+
663+ ifaces.sort()
664+ else:
665+ if iface not in interfaces:
666+ if fatal:
667+ raise Exception("Interface '%s' not found " % (iface))
668+ else:
669+ return []
670+
671+ else:
672+ ifaces = [iface]
673+
674+ addresses = []
675+ for netiface in ifaces:
676+ net_info = netifaces.ifaddresses(netiface)
677+ if inet_num in net_info:
678+ for entry in net_info[inet_num]:
679+ if 'addr' in entry and entry['addr'] not in exc_list:
680+ addresses.append(entry['addr'])
681+
682+ if fatal and not addresses:
683+ raise Exception("Interface '%s' doesn't have any %s addresses." %
684+ (iface, inet_type))
685+
686+ return sorted(addresses)
687+
688+
689+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
690+
691+
692+def get_iface_from_addr(addr):
693+ """Work out on which interface the provided address is configured."""
694+ for iface in netifaces.interfaces():
695+ addresses = netifaces.ifaddresses(iface)
696+ for inet_type in addresses:
697+ for _addr in addresses[inet_type]:
698+ _addr = _addr['addr']
699+ # link local
700+ ll_key = re.compile("(.+)%.*")
701+ raw = re.match(ll_key, _addr)
702+ if raw:
703+ _addr = raw.group(1)
704+
705+ if _addr == addr:
706+ log("Address '%s' is configured on iface '%s'" %
707+ (addr, iface))
708+ return iface
709+
710+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
711+ raise Exception(msg)
712+
713+
714+def sniff_iface(f):
715+ """Ensure decorated function is called with a value for iface.
716+
717+ If no iface provided, inject net iface inferred from unit private address.
718+ """
719+ def iface_sniffer(*args, **kwargs):
720+ if not kwargs.get('iface', None):
721+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
722+
723+ return f(*args, **kwargs)
724+
725+ return iface_sniffer
726+
727+
728+@sniff_iface
729+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
730+ dynamic_only=True):
731+ """Get assigned IPv6 address for a given interface.
732+
733+ Returns list of addresses found. If no address found, returns empty list.
734+
735+ If iface is None, we infer the current primary interface by doing a reverse
736+ lookup on the unit private-address.
737+
738+ We currently only support scope global IPv6 addresses i.e. non-temporary
739+ addresses. If no global IPv6 address is found, return the first one found
740+ in the ipv6 address list.
741+ """
742+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
743+ inc_aliases=inc_aliases, fatal=fatal,
744+ exc_list=exc_list)
745+
746+ if addresses:
747+ global_addrs = []
748+ for addr in addresses:
749+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
750+ m = re.match(key_scope_link_local, addr)
751+ if m:
752+ eui_64_mac = m.group(1)
753+ iface = m.group(2)
754+ else:
755+ global_addrs.append(addr)
756+
757+ if global_addrs:
758+ # Make sure any found global addresses are not temporary
759+ cmd = ['ip', 'addr', 'show', iface]
760+ out = subprocess.check_output(cmd).decode('UTF-8')
761+ if dynamic_only:
762+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
763+ else:
764+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
765+
766+ addrs = []
767+ for line in out.split('\n'):
768+ line = line.strip()
769+ m = re.match(key, line)
770+ if m and 'temporary' not in line:
771+ # Return the first valid address we find
772+ for addr in global_addrs:
773+ if m.group(1) == addr:
774+ if not dynamic_only or \
775+ m.group(1).endswith(eui_64_mac):
776+ addrs.append(addr)
777+
778+ if addrs:
779+ return addrs
780+
781+ if fatal:
782+ raise Exception("Interface '%s' does not have a scope global "
783+ "non-temporary ipv6 address." % iface)
784+
785+ return []
786+
787+
788+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
789+ """Return a list of bridges on the system."""
790+ b_regex = "%s/*/bridge" % vnic_dir
791+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
792+
793+
794+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
795+ """Return a list of nics comprising a given bridge on the system."""
796+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
797+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
798+
799+
800+def is_bridge_member(nic):
801+ """Check if a given nic is a member of a bridge."""
802+ for bridge in get_bridges():
803+ if nic in get_bridge_nics(bridge):
804+ return True
805+
806+ return False
807
808=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
809=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
810=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
811--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
812+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-01-15 16:18:44 +0000
813@@ -0,0 +1,92 @@
814+import six
815+from charmhelpers.contrib.amulet.deployment import (
816+ AmuletDeployment
817+)
818+
819+
820+class OpenStackAmuletDeployment(AmuletDeployment):
821+ """OpenStack amulet deployment.
822+
823+ This class inherits from AmuletDeployment and has additional support
824+ that is specifically for use by OpenStack charms.
825+ """
826+
827+ def __init__(self, series=None, openstack=None, source=None, stable=True):
828+ """Initialize the deployment environment."""
829+ super(OpenStackAmuletDeployment, self).__init__(series)
830+ self.openstack = openstack
831+ self.source = source
832+ self.stable = stable
833+ # Note(coreycb): this needs to be changed when new next branches come
834+ # out.
835+ self.current_next = "trusty"
836+
837+ def _determine_branch_locations(self, other_services):
838+ """Determine the branch locations for the other services.
839+
840+ Determine if the local branch being tested is derived from its
841+ stable or next (dev) branch, and based on this, use the corresonding
842+ stable or next branches for the other_services."""
843+ base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
844+
845+ if self.stable:
846+ for svc in other_services:
847+ temp = 'lp:charms/{}'
848+ svc['location'] = temp.format(svc['name'])
849+ else:
850+ for svc in other_services:
851+ if svc['name'] in base_charms:
852+ temp = 'lp:charms/{}'
853+ svc['location'] = temp.format(svc['name'])
854+ else:
855+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
856+ svc['location'] = temp.format(self.current_next,
857+ svc['name'])
858+ return other_services
859+
860+ def _add_services(self, this_service, other_services):
861+ """Add services to the deployment and set openstack-origin/source."""
862+ other_services = self._determine_branch_locations(other_services)
863+
864+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
865+ other_services)
866+
867+ services = other_services
868+ services.append(this_service)
869+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
870+ 'ceph-osd', 'ceph-radosgw']
871+
872+ if self.openstack:
873+ for svc in services:
874+ if svc['name'] not in use_source:
875+ config = {'openstack-origin': self.openstack}
876+ self.d.configure(svc['name'], config)
877+
878+ if self.source:
879+ for svc in services:
880+ if svc['name'] in use_source:
881+ config = {'source': self.source}
882+ self.d.configure(svc['name'], config)
883+
884+ def _configure_services(self, configs):
885+ """Configure all of the services."""
886+ for service, config in six.iteritems(configs):
887+ self.d.configure(service, config)
888+
889+ def _get_openstack_release(self):
890+ """Get openstack release.
891+
892+ Return an integer representing the enum value of the openstack
893+ release.
894+ """
895+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
896+ self.precise_havana, self.precise_icehouse,
897+ self.trusty_icehouse) = range(6)
898+ releases = {
899+ ('precise', None): self.precise_essex,
900+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
901+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
902+ ('precise', 'cloud:precise-havana'): self.precise_havana,
903+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
904+ ('trusty', None): self.trusty_icehouse}
905+ return releases[(self.series, self.openstack)]
906
907=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
908--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
909+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-01-15 16:18:44 +0000
910@@ -0,0 +1,278 @@
911+import logging
912+import os
913+import time
914+import urllib
915+
916+import glanceclient.v1.client as glance_client
917+import keystoneclient.v2_0 as keystone_client
918+import novaclient.v1_1.client as nova_client
919+
920+import six
921+
922+from charmhelpers.contrib.amulet.utils import (
923+ AmuletUtils
924+)
925+
926+DEBUG = logging.DEBUG
927+ERROR = logging.ERROR
928+
929+
930+class OpenStackAmuletUtils(AmuletUtils):
931+ """OpenStack amulet utilities.
932+
933+ This class inherits from AmuletUtils and has additional support
934+ that is specifically for use by OpenStack charms.
935+ """
936+
937+ def __init__(self, log_level=ERROR):
938+ """Initialize the deployment environment."""
939+ super(OpenStackAmuletUtils, self).__init__(log_level)
940+
941+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
942+ public_port, expected):
943+ """Validate endpoint data.
944+
945+ Validate actual endpoint data vs expected endpoint data. The ports
946+ are used to find the matching endpoint.
947+ """
948+ found = False
949+ for ep in endpoints:
950+ self.log.debug('endpoint: {}'.format(repr(ep)))
951+ if (admin_port in ep.adminurl and
952+ internal_port in ep.internalurl and
953+ public_port in ep.publicurl):
954+ found = True
955+ actual = {'id': ep.id,
956+ 'region': ep.region,
957+ 'adminurl': ep.adminurl,
958+ 'internalurl': ep.internalurl,
959+ 'publicurl': ep.publicurl,
960+ 'service_id': ep.service_id}
961+ ret = self._validate_dict_data(expected, actual)
962+ if ret:
963+ return 'unexpected endpoint data - {}'.format(ret)
964+
965+ if not found:
966+ return 'endpoint not found'
967+
968+ def validate_svc_catalog_endpoint_data(self, expected, actual):
969+ """Validate service catalog endpoint data.
970+
971+ Validate a list of actual service catalog endpoints vs a list of
972+ expected service catalog endpoints.
973+ """
974+ self.log.debug('actual: {}'.format(repr(actual)))
975+ for k, v in six.iteritems(expected):
976+ if k in actual:
977+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
978+ if ret:
979+ return self.endpoint_error(k, ret)
980+ else:
981+ return "endpoint {} does not exist".format(k)
982+ return ret
983+
984+ def validate_tenant_data(self, expected, actual):
985+ """Validate tenant data.
986+
987+ Validate a list of actual tenant data vs list of expected tenant
988+ data.
989+ """
990+ self.log.debug('actual: {}'.format(repr(actual)))
991+ for e in expected:
992+ found = False
993+ for act in actual:
994+ a = {'enabled': act.enabled, 'description': act.description,
995+ 'name': act.name, 'id': act.id}
996+ if e['name'] == a['name']:
997+ found = True
998+ ret = self._validate_dict_data(e, a)
999+ if ret:
1000+ return "unexpected tenant data - {}".format(ret)
1001+ if not found:
1002+ return "tenant {} does not exist".format(e['name'])
1003+ return ret
1004+
1005+ def validate_role_data(self, expected, actual):
1006+ """Validate role data.
1007+
1008+ Validate a list of actual role data vs a list of expected role
1009+ data.
1010+ """
1011+ self.log.debug('actual: {}'.format(repr(actual)))
1012+ for e in expected:
1013+ found = False
1014+ for act in actual:
1015+ a = {'name': act.name, 'id': act.id}
1016+ if e['name'] == a['name']:
1017+ found = True
1018+ ret = self._validate_dict_data(e, a)
1019+ if ret:
1020+ return "unexpected role data - {}".format(ret)
1021+ if not found:
1022+ return "role {} does not exist".format(e['name'])
1023+ return ret
1024+
1025+ def validate_user_data(self, expected, actual):
1026+ """Validate user data.
1027+
1028+ Validate a list of actual user data vs a list of expected user
1029+ data.
1030+ """
1031+ self.log.debug('actual: {}'.format(repr(actual)))
1032+ for e in expected:
1033+ found = False
1034+ for act in actual:
1035+ a = {'enabled': act.enabled, 'name': act.name,
1036+ 'email': act.email, 'tenantId': act.tenantId,
1037+ 'id': act.id}
1038+ if e['name'] == a['name']:
1039+ found = True
1040+ ret = self._validate_dict_data(e, a)
1041+ if ret:
1042+ return "unexpected user data - {}".format(ret)
1043+ if not found:
1044+ return "user {} does not exist".format(e['name'])
1045+ return ret
1046+
1047+ def validate_flavor_data(self, expected, actual):
1048+ """Validate flavor data.
1049+
1050+ Validate a list of actual flavors vs a list of expected flavors.
1051+ """
1052+ self.log.debug('actual: {}'.format(repr(actual)))
1053+ act = [a.name for a in actual]
1054+ return self._validate_list_data(expected, act)
1055+
1056+ def tenant_exists(self, keystone, tenant):
1057+ """Return True if tenant exists."""
1058+ return tenant in [t.name for t in keystone.tenants.list()]
1059+
1060+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
1061+ tenant):
1062+ """Authenticates admin user with the keystone admin endpoint."""
1063+ unit = keystone_sentry
1064+ service_ip = unit.relation('shared-db',
1065+ 'mysql:shared-db')['private-address']
1066+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1067+ return keystone_client.Client(username=user, password=password,
1068+ tenant_name=tenant, auth_url=ep)
1069+
1070+ def authenticate_keystone_user(self, keystone, user, password, tenant):
1071+ """Authenticates a regular user with the keystone public endpoint."""
1072+ ep = keystone.service_catalog.url_for(service_type='identity',
1073+ endpoint_type='publicURL')
1074+ return keystone_client.Client(username=user, password=password,
1075+ tenant_name=tenant, auth_url=ep)
1076+
1077+ def authenticate_glance_admin(self, keystone):
1078+ """Authenticates admin user with glance."""
1079+ ep = keystone.service_catalog.url_for(service_type='image',
1080+ endpoint_type='adminURL')
1081+ return glance_client.Client(ep, token=keystone.auth_token)
1082+
1083+ def authenticate_nova_user(self, keystone, user, password, tenant):
1084+ """Authenticates a regular user with nova-api."""
1085+ ep = keystone.service_catalog.url_for(service_type='identity',
1086+ endpoint_type='publicURL')
1087+ return nova_client.Client(username=user, api_key=password,
1088+ project_id=tenant, auth_url=ep)
1089+
1090+ def create_cirros_image(self, glance, image_name):
1091+ """Download the latest cirros image and upload it to glance."""
1092+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
1093+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1094+ if http_proxy:
1095+ proxies = {'http': http_proxy}
1096+ opener = urllib.FancyURLopener(proxies)
1097+ else:
1098+ opener = urllib.FancyURLopener()
1099+
1100+ f = opener.open("http://download.cirros-cloud.net/version/released")
1101+ version = f.read().strip()
1102+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1103+ local_path = os.path.join('tests', cirros_img)
1104+
1105+ if not os.path.exists(local_path):
1106+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1107+ version, cirros_img)
1108+ opener.retrieve(cirros_url, local_path)
1109+ f.close()
1110+
1111+ with open(local_path) as f:
1112+ image = glance.images.create(name=image_name, is_public=True,
1113+ disk_format='qcow2',
1114+ container_format='bare', data=f)
1115+ count = 1
1116+ status = image.status
1117+ while status != 'active' and count < 10:
1118+ time.sleep(3)
1119+ image = glance.images.get(image.id)
1120+ status = image.status
1121+ self.log.debug('image status: {}'.format(status))
1122+ count += 1
1123+
1124+ if status != 'active':
1125+ self.log.error('image creation timed out')
1126+ return None
1127+
1128+ return image
1129+
1130+ def delete_image(self, glance, image):
1131+ """Delete the specified image."""
1132+ num_before = len(list(glance.images.list()))
1133+ glance.images.delete(image)
1134+
1135+ count = 1
1136+ num_after = len(list(glance.images.list()))
1137+ while num_after != (num_before - 1) and count < 10:
1138+ time.sleep(3)
1139+ num_after = len(list(glance.images.list()))
1140+ self.log.debug('number of images: {}'.format(num_after))
1141+ count += 1
1142+
1143+ if num_after != (num_before - 1):
1144+ self.log.error('image deletion timed out')
1145+ return False
1146+
1147+ return True
1148+
1149+ def create_instance(self, nova, image_name, instance_name, flavor):
1150+ """Create the specified instance."""
1151+ image = nova.images.find(name=image_name)
1152+ flavor = nova.flavors.find(name=flavor)
1153+ instance = nova.servers.create(name=instance_name, image=image,
1154+ flavor=flavor)
1155+
1156+ count = 1
1157+ status = instance.status
1158+ while status != 'ACTIVE' and count < 60:
1159+ time.sleep(3)
1160+ instance = nova.servers.get(instance.id)
1161+ status = instance.status
1162+ self.log.debug('instance status: {}'.format(status))
1163+ count += 1
1164+
1165+ if status != 'ACTIVE':
1166+ self.log.error('instance creation timed out')
1167+ return None
1168+
1169+ return instance
1170+
1171+ def delete_instance(self, nova, instance):
1172+ """Delete the specified instance."""
1173+ num_before = len(list(nova.servers.list()))
1174+ nova.servers.delete(instance)
1175+
1176+ count = 1
1177+ num_after = len(list(nova.servers.list()))
1178+ while num_after != (num_before - 1) and count < 10:
1179+ time.sleep(3)
1180+ num_after = len(list(nova.servers.list()))
1181+ self.log.debug('number of instances: {}'.format(num_after))
1182+ count += 1
1183+
1184+ if num_after != (num_before - 1):
1185+ self.log.error('instance deletion timed out')
1186+ return False
1187+
1188+ return True
1189
1190=== added file 'hooks/charmhelpers/contrib/openstack/context.py'
1191--- hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
1192+++ hooks/charmhelpers/contrib/openstack/context.py 2015-01-15 16:18:44 +0000
1193@@ -0,0 +1,1038 @@
1194+import json
1195+import os
1196+import time
1197+from base64 import b64decode
1198+from subprocess import check_call
1199+
1200+import six
1201+
1202+from charmhelpers.fetch import (
1203+ apt_install,
1204+ filter_installed_packages,
1205+)
1206+from charmhelpers.core.hookenv import (
1207+ config,
1208+ is_relation_made,
1209+ local_unit,
1210+ log,
1211+ relation_get,
1212+ relation_ids,
1213+ related_units,
1214+ relation_set,
1215+ unit_get,
1216+ unit_private_ip,
1217+ charm_name,
1218+ DEBUG,
1219+ INFO,
1220+ WARNING,
1221+ ERROR,
1222+)
1223+
1224+from charmhelpers.core.sysctl import create as sysctl_create
1225+
1226+from charmhelpers.core.host import (
1227+ mkdir,
1228+ write_file,
1229+)
1230+from charmhelpers.contrib.hahelpers.cluster import (
1231+ determine_apache_port,
1232+ determine_api_port,
1233+ https,
1234+ is_clustered,
1235+)
1236+from charmhelpers.contrib.hahelpers.apache import (
1237+ get_cert,
1238+ get_ca_cert,
1239+ install_ca_cert,
1240+)
1241+from charmhelpers.contrib.openstack.neutron import (
1242+ neutron_plugin_attribute,
1243+)
1244+from charmhelpers.contrib.network.ip import (
1245+ get_address_in_network,
1246+ get_ipv6_addr,
1247+ get_netmask_for_address,
1248+ format_ipv6_addr,
1249+ is_address_in_network,
1250+)
1251+from charmhelpers.contrib.openstack.utils import get_host_ip
1252+
1253+CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1254+ADDRESS_TYPES = ['admin', 'internal', 'public']
1255+
1256+
1257+class OSContextError(Exception):
1258+ pass
1259+
1260+
1261+def ensure_packages(packages):
1262+ """Install but do not upgrade required plugin packages."""
1263+ required = filter_installed_packages(packages)
1264+ if required:
1265+ apt_install(required, fatal=True)
1266+
1267+
1268+def context_complete(ctxt):
1269+ _missing = []
1270+ for k, v in six.iteritems(ctxt):
1271+ if v is None or v == '':
1272+ _missing.append(k)
1273+
1274+ if _missing:
1275+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
1276+ return False
1277+
1278+ return True
1279+
1280+
1281+def config_flags_parser(config_flags):
1282+ """Parses config flags string into dict.
1283+
1284+ The provided config_flags string may be a list of comma-separated values
1285+ which themselves may be comma-separated list of values.
1286+ """
1287+ if config_flags.find('==') >= 0:
1288+ log("config_flags is not in expected format (key=value)", level=ERROR)
1289+ raise OSContextError
1290+
1291+ # strip the following from each value.
1292+ post_strippers = ' ,'
1293+ # we strip any leading/trailing '=' or ' ' from the string then
1294+ # split on '='.
1295+ split = config_flags.strip(' =').split('=')
1296+ limit = len(split)
1297+ flags = {}
1298+ for i in range(0, limit - 1):
1299+ current = split[i]
1300+ next = split[i + 1]
1301+ vindex = next.rfind(',')
1302+ if (i == limit - 2) or (vindex < 0):
1303+ value = next
1304+ else:
1305+ value = next[:vindex]
1306+
1307+ if i == 0:
1308+ key = current
1309+ else:
1310+ # if this not the first entry, expect an embedded key.
1311+ index = current.rfind(',')
1312+ if index < 0:
1313+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
1314+ raise OSContextError
1315+ key = current[index + 1:]
1316+
1317+ # Add to collection.
1318+ flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
1319+
1320+ return flags
1321+
1322+
1323+class OSContextGenerator(object):
1324+ """Base class for all context generators."""
1325+ interfaces = []
1326+
1327+ def __call__(self):
1328+ raise NotImplementedError
1329+
1330+
1331+class SharedDBContext(OSContextGenerator):
1332+ interfaces = ['shared-db']
1333+
1334+ def __init__(self,
1335+ database=None, user=None, relation_prefix=None, ssl_dir=None):
1336+ """Allows inspecting relation for settings prefixed with
1337+ relation_prefix. This is useful for parsing access for multiple
1338+ databases returned via the shared-db interface (eg, nova_password,
1339+ quantum_password)
1340+ """
1341+ self.relation_prefix = relation_prefix
1342+ self.database = database
1343+ self.user = user
1344+ self.ssl_dir = ssl_dir
1345+
1346+ def __call__(self):
1347+ self.database = self.database or config('database')
1348+ self.user = self.user or config('database-user')
1349+ if None in [self.database, self.user]:
1350+ log("Could not generate shared_db context. Missing required charm "
1351+ "config options. (database name and user)", level=ERROR)
1352+ raise OSContextError
1353+
1354+ ctxt = {}
1355+
1356+ # NOTE(jamespage) if mysql charm provides a network upon which
1357+ # access to the database should be made, reconfigure relation
1358+ # with the service units local address and defer execution
1359+ access_network = relation_get('access-network')
1360+ if access_network is not None:
1361+ if self.relation_prefix is not None:
1362+ hostname_key = "{}_hostname".format(self.relation_prefix)
1363+ else:
1364+ hostname_key = "hostname"
1365+ access_hostname = get_address_in_network(access_network,
1366+ unit_get('private-address'))
1367+ set_hostname = relation_get(attribute=hostname_key,
1368+ unit=local_unit())
1369+ if set_hostname != access_hostname:
1370+ relation_set(relation_settings={hostname_key: access_hostname})
1371+ return ctxt # Defer any further hook execution for now....
1372+
1373+ password_setting = 'password'
1374+ if self.relation_prefix:
1375+ password_setting = self.relation_prefix + '_password'
1376+
1377+ for rid in relation_ids('shared-db'):
1378+ for unit in related_units(rid):
1379+ rdata = relation_get(rid=rid, unit=unit)
1380+ host = rdata.get('db_host')
1381+ host = format_ipv6_addr(host) or host
1382+ ctxt = {
1383+ 'database_host': host,
1384+ 'database': self.database,
1385+ 'database_user': self.user,
1386+ 'database_password': rdata.get(password_setting),
1387+ 'database_type': 'mysql'
1388+ }
1389+ if context_complete(ctxt):
1390+ db_ssl(rdata, ctxt, self.ssl_dir)
1391+ return ctxt
1392+ return {}
1393+
1394+
1395+class PostgresqlDBContext(OSContextGenerator):
1396+ interfaces = ['pgsql-db']
1397+
1398+ def __init__(self, database=None):
1399+ self.database = database
1400+
1401+ def __call__(self):
1402+ self.database = self.database or config('database')
1403+ if self.database is None:
1404+ log('Could not generate postgresql_db context. Missing required '
1405+ 'charm config options. (database name)', level=ERROR)
1406+ raise OSContextError
1407+
1408+ ctxt = {}
1409+ for rid in relation_ids(self.interfaces[0]):
1410+ for unit in related_units(rid):
1411+ rel_host = relation_get('host', rid=rid, unit=unit)
1412+ rel_user = relation_get('user', rid=rid, unit=unit)
1413+ rel_passwd = relation_get('password', rid=rid, unit=unit)
1414+ ctxt = {'database_host': rel_host,
1415+ 'database': self.database,
1416+ 'database_user': rel_user,
1417+ 'database_password': rel_passwd,
1418+ 'database_type': 'postgresql'}
1419+ if context_complete(ctxt):
1420+ return ctxt
1421+
1422+ return {}
1423+
1424+
1425+def db_ssl(rdata, ctxt, ssl_dir):
1426+ if 'ssl_ca' in rdata and ssl_dir:
1427+ ca_path = os.path.join(ssl_dir, 'db-client.ca')
1428+ with open(ca_path, 'w') as fh:
1429+ fh.write(b64decode(rdata['ssl_ca']))
1430+
1431+ ctxt['database_ssl_ca'] = ca_path
1432+ elif 'ssl_ca' in rdata:
1433+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
1434+ return ctxt
1435+
1436+ if 'ssl_cert' in rdata:
1437+ cert_path = os.path.join(
1438+ ssl_dir, 'db-client.cert')
1439+ if not os.path.exists(cert_path):
1440+ log("Waiting 1m for ssl client cert validity", level=INFO)
1441+ time.sleep(60)
1442+
1443+ with open(cert_path, 'w') as fh:
1444+ fh.write(b64decode(rdata['ssl_cert']))
1445+
1446+ ctxt['database_ssl_cert'] = cert_path
1447+ key_path = os.path.join(ssl_dir, 'db-client.key')
1448+ with open(key_path, 'w') as fh:
1449+ fh.write(b64decode(rdata['ssl_key']))
1450+
1451+ ctxt['database_ssl_key'] = key_path
1452+
1453+ return ctxt
1454+
1455+
1456+class IdentityServiceContext(OSContextGenerator):
1457+ interfaces = ['identity-service']
1458+
1459+ def __call__(self):
1460+ log('Generating template context for identity-service', level=DEBUG)
1461+ ctxt = {}
1462+ for rid in relation_ids('identity-service'):
1463+ for unit in related_units(rid):
1464+ rdata = relation_get(rid=rid, unit=unit)
1465+ serv_host = rdata.get('service_host')
1466+ serv_host = format_ipv6_addr(serv_host) or serv_host
1467+ auth_host = rdata.get('auth_host')
1468+ auth_host = format_ipv6_addr(auth_host) or auth_host
1469+ svc_protocol = rdata.get('service_protocol') or 'http'
1470+ auth_protocol = rdata.get('auth_protocol') or 'http'
1471+ ctxt = {'service_port': rdata.get('service_port'),
1472+ 'service_host': serv_host,
1473+ 'auth_host': auth_host,
1474+ 'auth_port': rdata.get('auth_port'),
1475+ 'admin_tenant_name': rdata.get('service_tenant'),
1476+ 'admin_user': rdata.get('service_username'),
1477+ 'admin_password': rdata.get('service_password'),
1478+ 'service_protocol': svc_protocol,
1479+ 'auth_protocol': auth_protocol}
1480+ if context_complete(ctxt):
1481+ # NOTE(jamespage) this is required for >= icehouse
1482+ # so a missing value just indicates keystone needs
1483+ # upgrading
1484+ ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
1485+ return ctxt
1486+
1487+ return {}
1488+
1489+
1490+class AMQPContext(OSContextGenerator):
1491+
1492+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
1493+ self.ssl_dir = ssl_dir
1494+ self.rel_name = rel_name
1495+ self.relation_prefix = relation_prefix
1496+ self.interfaces = [rel_name]
1497+
1498+ def __call__(self):
1499+ log('Generating template context for amqp', level=DEBUG)
1500+ conf = config()
1501+ if self.relation_prefix:
1502+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
1503+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
1504+ else:
1505+ user_setting = 'rabbit-user'
1506+ vhost_setting = 'rabbit-vhost'
1507+
1508+ try:
1509+ username = conf[user_setting]
1510+ vhost = conf[vhost_setting]
1511+ except KeyError as e:
1512+ log('Could not generate shared_db context. Missing required charm '
1513+ 'config options: %s.' % e, level=ERROR)
1514+ raise OSContextError
1515+
1516+ ctxt = {}
1517+ for rid in relation_ids(self.rel_name):
1518+ ha_vip_only = False
1519+ for unit in related_units(rid):
1520+ if relation_get('clustered', rid=rid, unit=unit):
1521+ ctxt['clustered'] = True
1522+ vip = relation_get('vip', rid=rid, unit=unit)
1523+ vip = format_ipv6_addr(vip) or vip
1524+ ctxt['rabbitmq_host'] = vip
1525+ else:
1526+ host = relation_get('private-address', rid=rid, unit=unit)
1527+ host = format_ipv6_addr(host) or host
1528+ ctxt['rabbitmq_host'] = host
1529+
1530+ ctxt.update({
1531+ 'rabbitmq_user': username,
1532+ 'rabbitmq_password': relation_get('password', rid=rid,
1533+ unit=unit),
1534+ 'rabbitmq_virtual_host': vhost,
1535+ })
1536+
1537+ ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
1538+ if ssl_port:
1539+ ctxt['rabbit_ssl_port'] = ssl_port
1540+
1541+ ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
1542+ if ssl_ca:
1543+ ctxt['rabbit_ssl_ca'] = ssl_ca
1544+
1545+ if relation_get('ha_queues', rid=rid, unit=unit) is not None:
1546+ ctxt['rabbitmq_ha_queues'] = True
1547+
1548+ ha_vip_only = relation_get('ha-vip-only',
1549+ rid=rid, unit=unit) is not None
1550+
1551+ if context_complete(ctxt):
1552+ if 'rabbit_ssl_ca' in ctxt:
1553+ if not self.ssl_dir:
1554+ log("Charm not setup for ssl support but ssl ca "
1555+ "found", level=INFO)
1556+ break
1557+
1558+ ca_path = os.path.join(
1559+ self.ssl_dir, 'rabbit-client-ca.pem')
1560+ with open(ca_path, 'w') as fh:
1561+ fh.write(b64decode(ctxt['rabbit_ssl_ca']))
1562+ ctxt['rabbit_ssl_ca'] = ca_path
1563+
1564+ # Sufficient information found = break out!
1565+ break
1566+
1567+ # Used for active/active rabbitmq >= grizzly
1568+ if (('clustered' not in ctxt or ha_vip_only) and
1569+ len(related_units(rid)) > 1):
1570+ rabbitmq_hosts = []
1571+ for unit in related_units(rid):
1572+ host = relation_get('private-address', rid=rid, unit=unit)
1573+ host = format_ipv6_addr(host) or host
1574+ rabbitmq_hosts.append(host)
1575+
1576+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
1577+
1578+ if not context_complete(ctxt):
1579+ return {}
1580+
1581+ return ctxt
1582+
1583+
1584+class CephContext(OSContextGenerator):
1585+ """Generates context for /etc/ceph/ceph.conf templates."""
1586+ interfaces = ['ceph']
1587+
1588+ def __call__(self):
1589+ if not relation_ids('ceph'):
1590+ return {}
1591+
1592+ log('Generating template context for ceph', level=DEBUG)
1593+ mon_hosts = []
1594+ auth = None
1595+ key = None
1596+ use_syslog = str(config('use-syslog')).lower()
1597+ for rid in relation_ids('ceph'):
1598+ for unit in related_units(rid):
1599+ auth = relation_get('auth', rid=rid, unit=unit)
1600+ key = relation_get('key', rid=rid, unit=unit)
1601+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1602+ unit=unit)
1603+ unit_priv_addr = relation_get('private-address', rid=rid,
1604+ unit=unit)
1605+ ceph_addr = ceph_pub_addr or unit_priv_addr
1606+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1607+ mon_hosts.append(ceph_addr)
1608+
1609+ ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1610+ 'auth': auth,
1611+ 'key': key,
1612+ 'use_syslog': use_syslog}
1613+
1614+ if not os.path.isdir('/etc/ceph'):
1615+ os.mkdir('/etc/ceph')
1616+
1617+ if not context_complete(ctxt):
1618+ return {}
1619+
1620+ ensure_packages(['ceph-common'])
1621+ return ctxt
1622+
1623+
1624+class HAProxyContext(OSContextGenerator):
1625+ """Provides half a context for the haproxy template, which describes
1626+ all peers to be included in the cluster. Each charm needs to include
1627+ its own context generator that describes the port mapping.
1628+ """
1629+ interfaces = ['cluster']
1630+
1631+ def __init__(self, singlenode_mode=False):
1632+ self.singlenode_mode = singlenode_mode
1633+
1634+ def __call__(self):
1635+ if not relation_ids('cluster') and not self.singlenode_mode:
1636+ return {}
1637+
1638+ if config('prefer-ipv6'):
1639+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1640+ else:
1641+ addr = get_host_ip(unit_get('private-address'))
1642+
1643+ l_unit = local_unit().replace('/', '-')
1644+ cluster_hosts = {}
1645+
1646+ # NOTE(jamespage): build out map of configured network endpoints
1647+ # and associated backends
1648+ for addr_type in ADDRESS_TYPES:
1649+ cfg_opt = 'os-{}-network'.format(addr_type)
1650+ laddr = get_address_in_network(config(cfg_opt))
1651+ if laddr:
1652+ netmask = get_netmask_for_address(laddr)
1653+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
1654+ netmask),
1655+ 'backends': {l_unit: laddr}}
1656+ for rid in relation_ids('cluster'):
1657+ for unit in related_units(rid):
1658+ _laddr = relation_get('{}-address'.format(addr_type),
1659+ rid=rid, unit=unit)
1660+ if _laddr:
1661+ _unit = unit.replace('/', '-')
1662+ cluster_hosts[laddr]['backends'][_unit] = _laddr
1663+
1664+ # NOTE(jamespage) add backend based on private address - this
1665+ # with either be the only backend or the fallback if no acls
1666+ # match in the frontend
1667+ cluster_hosts[addr] = {}
1668+ netmask = get_netmask_for_address(addr)
1669+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
1670+ 'backends': {l_unit: addr}}
1671+ for rid in relation_ids('cluster'):
1672+ for unit in related_units(rid):
1673+ _laddr = relation_get('private-address',
1674+ rid=rid, unit=unit)
1675+ if _laddr:
1676+ _unit = unit.replace('/', '-')
1677+ cluster_hosts[addr]['backends'][_unit] = _laddr
1678+
1679+ ctxt = {
1680+ 'frontends': cluster_hosts,
1681+ 'default_backend': addr
1682+ }
1683+
1684+ if config('haproxy-server-timeout'):
1685+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1686+
1687+ if config('haproxy-client-timeout'):
1688+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1689+
1690+ if config('prefer-ipv6'):
1691+ ctxt['ipv6'] = True
1692+ ctxt['local_host'] = 'ip6-localhost'
1693+ ctxt['haproxy_host'] = '::'
1694+ ctxt['stat_port'] = ':::8888'
1695+ else:
1696+ ctxt['local_host'] = '127.0.0.1'
1697+ ctxt['haproxy_host'] = '0.0.0.0'
1698+ ctxt['stat_port'] = ':8888'
1699+
1700+ for frontend in cluster_hosts:
1701+ if (len(cluster_hosts[frontend]['backends']) > 1 or
1702+ self.singlenode_mode):
1703+ # Enable haproxy when we have enough peers.
1704+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
1705+ level=DEBUG)
1706+ with open('/etc/default/haproxy', 'w') as out:
1707+ out.write('ENABLED=1\n')
1708+
1709+ return ctxt
1710+
1711+ log('HAProxy context is incomplete, this unit has no peers.',
1712+ level=INFO)
1713+ return {}
1714+
1715+
1716+class ImageServiceContext(OSContextGenerator):
1717+ interfaces = ['image-service']
1718+
1719+ def __call__(self):
1720+ """Obtains the glance API server from the image-service relation.
1721+ Useful in nova and cinder (currently).
1722+ """
1723+ log('Generating template context for image-service.', level=DEBUG)
1724+ rids = relation_ids('image-service')
1725+ if not rids:
1726+ return {}
1727+
1728+ for rid in rids:
1729+ for unit in related_units(rid):
1730+ api_server = relation_get('glance-api-server',
1731+ rid=rid, unit=unit)
1732+ if api_server:
1733+ return {'glance_api_servers': api_server}
1734+
1735+ log("ImageService context is incomplete. Missing required relation "
1736+ "data.", level=INFO)
1737+ return {}
1738+
1739+
1740+class ApacheSSLContext(OSContextGenerator):
1741+ """Generates a context for an apache vhost configuration that configures
1742+ HTTPS reverse proxying for one or many endpoints. Generated context
1743+ looks something like::
1744+
1745+ {
1746+ 'namespace': 'cinder',
1747+ 'private_address': 'iscsi.mycinderhost.com',
1748+ 'endpoints': [(8776, 8766), (8777, 8767)]
1749+ }
1750+
1751+ The endpoints list consists of a tuples mapping external ports
1752+ to internal ports.
1753+ """
1754+ interfaces = ['https']
1755+
1756+ # charms should inherit this context and set external ports
1757+ # and service namespace accordingly.
1758+ external_ports = []
1759+ service_namespace = None
1760+
1761+ def enable_modules(self):
1762+ cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
1763+ check_call(cmd)
1764+
1765+ def configure_cert(self, cn=None):
1766+ ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
1767+ mkdir(path=ssl_dir)
1768+ cert, key = get_cert(cn)
1769+ if cn:
1770+ cert_filename = 'cert_{}'.format(cn)
1771+ key_filename = 'key_{}'.format(cn)
1772+ else:
1773+ cert_filename = 'cert'
1774+ key_filename = 'key'
1775+
1776+ write_file(path=os.path.join(ssl_dir, cert_filename),
1777+ content=b64decode(cert))
1778+ write_file(path=os.path.join(ssl_dir, key_filename),
1779+ content=b64decode(key))
1780+
1781+ def configure_ca(self):
1782+ ca_cert = get_ca_cert()
1783+ if ca_cert:
1784+ install_ca_cert(b64decode(ca_cert))
1785+
1786+ def canonical_names(self):
1787+ """Figure out which canonical names clients will access this service.
1788+ """
1789+ cns = []
1790+ for r_id in relation_ids('identity-service'):
1791+ for unit in related_units(r_id):
1792+ rdata = relation_get(rid=r_id, unit=unit)
1793+ for k in rdata:
1794+ if k.startswith('ssl_key_'):
1795+ cns.append(k.lstrip('ssl_key_'))
1796+
1797+ return sorted(list(set(cns)))
1798+
1799+ def get_network_addresses(self):
1800+ """For each network configured, return corresponding address and vip
1801+ (if available).
1802+
1803+ Returns a list of tuples of the form:
1804+
1805+ [(address_in_net_a, vip_in_net_a),
1806+ (address_in_net_b, vip_in_net_b),
1807+ ...]
1808+
1809+ or, if no vip(s) available:
1810+
1811+ [(address_in_net_a, address_in_net_a),
1812+ (address_in_net_b, address_in_net_b),
1813+ ...]
1814+ """
1815+ addresses = []
1816+ if config('vip'):
1817+ vips = config('vip').split()
1818+ else:
1819+ vips = []
1820+
1821+ for net_type in ['os-internal-network', 'os-admin-network',
1822+ 'os-public-network']:
1823+ addr = get_address_in_network(config(net_type),
1824+ unit_get('private-address'))
1825+ if len(vips) > 1 and is_clustered():
1826+ if not config(net_type):
1827+ log("Multiple networks configured but net_type "
1828+ "is None (%s)." % net_type, level=WARNING)
1829+ continue
1830+
1831+ for vip in vips:
1832+ if is_address_in_network(config(net_type), vip):
1833+ addresses.append((addr, vip))
1834+ break
1835+
1836+ elif is_clustered() and config('vip'):
1837+ addresses.append((addr, config('vip')))
1838+ else:
1839+ addresses.append((addr, addr))
1840+
1841+ return sorted(addresses)
1842+
1843+ def __call__(self):
1844+ if isinstance(self.external_ports, six.string_types):
1845+ self.external_ports = [self.external_ports]
1846+
1847+ if not self.external_ports or not https():
1848+ return {}
1849+
1850+ self.configure_ca()
1851+ self.enable_modules()
1852+
1853+ ctxt = {'namespace': self.service_namespace,
1854+ 'endpoints': [],
1855+ 'ext_ports': []}
1856+
1857+ for cn in self.canonical_names():
1858+ self.configure_cert(cn)
1859+
1860+ addresses = self.get_network_addresses()
1861+ for address, endpoint in sorted(set(addresses)):
1862+ for api_port in self.external_ports:
1863+ ext_port = determine_apache_port(api_port,
1864+ singlenode_mode=True)
1865+ int_port = determine_api_port(api_port, singlenode_mode=True)
1866+ portmap = (address, endpoint, int(ext_port), int(int_port))
1867+ ctxt['endpoints'].append(portmap)
1868+ ctxt['ext_ports'].append(int(ext_port))
1869+
1870+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
1871+ return ctxt
1872+
1873+
1874+class NeutronContext(OSContextGenerator):
1875+ interfaces = []
1876+
1877+ @property
1878+ def plugin(self):
1879+ return None
1880+
1881+ @property
1882+ def network_manager(self):
1883+ return None
1884+
1885+ @property
1886+ def packages(self):
1887+ return neutron_plugin_attribute(self.plugin, 'packages',
1888+ self.network_manager)
1889+
1890+ @property
1891+ def neutron_security_groups(self):
1892+ return None
1893+
1894+ def _ensure_packages(self):
1895+ for pkgs in self.packages:
1896+ ensure_packages(pkgs)
1897+
1898+ def _save_flag_file(self):
1899+ if self.network_manager == 'quantum':
1900+ _file = '/etc/nova/quantum_plugin.conf'
1901+ else:
1902+ _file = '/etc/nova/neutron_plugin.conf'
1903+
1904+ with open(_file, 'wb') as out:
1905+ out.write(self.plugin + '\n')
1906+
1907+ def ovs_ctxt(self):
1908+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1909+ self.network_manager)
1910+ config = neutron_plugin_attribute(self.plugin, 'config',
1911+ self.network_manager)
1912+ ovs_ctxt = {'core_plugin': driver,
1913+ 'neutron_plugin': 'ovs',
1914+ 'neutron_security_groups': self.neutron_security_groups,
1915+ 'local_ip': unit_private_ip(),
1916+ 'config': config}
1917+
1918+ return ovs_ctxt
1919+
1920+ def nvp_ctxt(self):
1921+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1922+ self.network_manager)
1923+ config = neutron_plugin_attribute(self.plugin, 'config',
1924+ self.network_manager)
1925+ nvp_ctxt = {'core_plugin': driver,
1926+ 'neutron_plugin': 'nvp',
1927+ 'neutron_security_groups': self.neutron_security_groups,
1928+ 'local_ip': unit_private_ip(),
1929+ 'config': config}
1930+
1931+ return nvp_ctxt
1932+
1933+ def n1kv_ctxt(self):
1934+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1935+ self.network_manager)
1936+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1937+ self.network_manager)
1938+ n1kv_user_config_flags = config('n1kv-config-flags')
1939+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
1940+ n1kv_ctxt = {'core_plugin': driver,
1941+ 'neutron_plugin': 'n1kv',
1942+ 'neutron_security_groups': self.neutron_security_groups,
1943+ 'local_ip': unit_private_ip(),
1944+ 'config': n1kv_config,
1945+ 'vsm_ip': config('n1kv-vsm-ip'),
1946+ 'vsm_username': config('n1kv-vsm-username'),
1947+ 'vsm_password': config('n1kv-vsm-password'),
1948+ 'restrict_policy_profiles': restrict_policy_profiles}
1949+
1950+ if n1kv_user_config_flags:
1951+ flags = config_flags_parser(n1kv_user_config_flags)
1952+ n1kv_ctxt['user_config_flags'] = flags
1953+
1954+ return n1kv_ctxt
1955+
1956+ def calico_ctxt(self):
1957+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1958+ self.network_manager)
1959+ config = neutron_plugin_attribute(self.plugin, 'config',
1960+ self.network_manager)
1961+ calico_ctxt = {'core_plugin': driver,
1962+ 'neutron_plugin': 'Calico',
1963+ 'neutron_security_groups': self.neutron_security_groups,
1964+ 'local_ip': unit_private_ip(),
1965+ 'config': config}
1966+
1967+ return calico_ctxt
1968+
1969+ def neutron_ctxt(self):
1970+ if https():
1971+ proto = 'https'
1972+ else:
1973+ proto = 'http'
1974+
1975+ if is_clustered():
1976+ host = config('vip')
1977+ else:
1978+ host = unit_get('private-address')
1979+
1980+ ctxt = {'network_manager': self.network_manager,
1981+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1982+ return ctxt
1983+
1984+ def __call__(self):
1985+ self._ensure_packages()
1986+
1987+ if self.network_manager not in ['quantum', 'neutron']:
1988+ return {}
1989+
1990+ if not self.plugin:
1991+ return {}
1992+
1993+ ctxt = self.neutron_ctxt()
1994+
1995+ if self.plugin == 'ovs':
1996+ ctxt.update(self.ovs_ctxt())
1997+ elif self.plugin in ['nvp', 'nsx']:
1998+ ctxt.update(self.nvp_ctxt())
1999+ elif self.plugin == 'n1kv':
2000+ ctxt.update(self.n1kv_ctxt())
2001+ elif self.plugin == 'Calico':
2002+ ctxt.update(self.calico_ctxt())
2003+
2004+ alchemy_flags = config('neutron-alchemy-flags')
2005+ if alchemy_flags:
2006+ flags = config_flags_parser(alchemy_flags)
2007+ ctxt['neutron_alchemy_flags'] = flags
2008+
2009+ self._save_flag_file()
2010+ return ctxt
2011+
2012+
2013+class OSConfigFlagContext(OSContextGenerator):
2014+ """Provides support for user-defined config flags.
2015+
2016+ Users can define a comma-seperated list of key=value pairs
2017+ in the charm configuration and apply them at any point in
2018+ any file by using a template flag.
2019+
2020+ Sometimes users might want config flags inserted within a
2021+ specific section so this class allows users to specify the
2022+ template flag name, allowing for multiple template flags
2023+ (sections) within the same context.
2024+
2025+ NOTE: the value of config-flags may be a comma-separated list of
2026+ key=value pairs and some Openstack config files support
2027+ comma-separated lists as values.
2028+ """
2029+
2030+ def __init__(self, charm_flag='config-flags',
2031+ template_flag='user_config_flags'):
2032+ """
2033+ :param charm_flag: config flags in charm configuration.
2034+ :param template_flag: insert point for user-defined flags in template
2035+ file.
2036+ """
2037+ super(OSConfigFlagContext, self).__init__()
2038+ self._charm_flag = charm_flag
2039+ self._template_flag = template_flag
2040+
2041+ def __call__(self):
2042+ config_flags = config(self._charm_flag)
2043+ if not config_flags:
2044+ return {}
2045+
2046+ return {self._template_flag:
2047+ config_flags_parser(config_flags)}
2048+
2049+
2050+class SubordinateConfigContext(OSContextGenerator):
2051+
2052+ """
2053+ Responsible for inspecting relations to subordinates that
2054+ may be exporting required config via a json blob.
2055+
2056+ The subordinate interface allows subordinates to export their
2057+ configuration requirements to the principle for multiple config
2058+ files and multiple serivces. Ie, a subordinate that has interfaces
2059+ to both glance and nova may export to following yaml blob as json::
2060+
2061+ glance:
2062+ /etc/glance/glance-api.conf:
2063+ sections:
2064+ DEFAULT:
2065+ - [key1, value1]
2066+ /etc/glance/glance-registry.conf:
2067+ MYSECTION:
2068+ - [key2, value2]
2069+ nova:
2070+ /etc/nova/nova.conf:
2071+ sections:
2072+ DEFAULT:
2073+ - [key3, value3]
2074+
2075+
2076+ It is then up to the principle charms to subscribe this context to
2077+ the service+config file it is interestd in. Configuration data will
2078+ be available in the template context, in glance's case, as::
2079+
2080+ ctxt = {
2081+ ... other context ...
2082+ 'subordinate_config': {
2083+ 'DEFAULT': {
2084+ 'key1': 'value1',
2085+ },
2086+ 'MYSECTION': {
2087+ 'key2': 'value2',
2088+ },
2089+ }
2090+ }
2091+ """
2092+
2093+ def __init__(self, service, config_file, interface):
2094+ """
2095+ :param service : Service name key to query in any subordinate
2096+ data found
2097+ :param config_file : Service's config file to query sections
2098+ :param interface : Subordinate interface to inspect
2099+ """
2100+ self.service = service
2101+ self.config_file = config_file
2102+ self.interface = interface
2103+
2104+ def __call__(self):
2105+ ctxt = {'sections': {}}
2106+ for rid in relation_ids(self.interface):
2107+ for unit in related_units(rid):
2108+ sub_config = relation_get('subordinate_configuration',
2109+ rid=rid, unit=unit)
2110+ if sub_config and sub_config != '':
2111+ try:
2112+ sub_config = json.loads(sub_config)
2113+ except:
2114+ log('Could not parse JSON from subordinate_config '
2115+ 'setting from %s' % rid, level=ERROR)
2116+ continue
2117+
2118+ if self.service not in sub_config:
2119+ log('Found subordinate_config on %s but it contained'
2120+ 'nothing for %s service' % (rid, self.service),
2121+ level=INFO)
2122+ continue
2123+
2124+ sub_config = sub_config[self.service]
2125+ if self.config_file not in sub_config:
2126+ log('Found subordinate_config on %s but it contained'
2127+ 'nothing for %s' % (rid, self.config_file),
2128+ level=INFO)
2129+ continue
2130+
2131+ sub_config = sub_config[self.config_file]
2132+ for k, v in six.iteritems(sub_config):
2133+ if k == 'sections':
2134+ for section, config_dict in six.iteritems(v):
2135+ log("adding section '%s'" % (section),
2136+ level=DEBUG)
2137+ ctxt[k][section] = config_dict
2138+ else:
2139+ ctxt[k] = v
2140+
2141+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
2142+ return ctxt
2143+
2144+
2145+class LogLevelContext(OSContextGenerator):
2146+
2147+ def __call__(self):
2148+ ctxt = {}
2149+ ctxt['debug'] = \
2150+ False if config('debug') is None else config('debug')
2151+ ctxt['verbose'] = \
2152+ False if config('verbose') is None else config('verbose')
2153+
2154+ return ctxt
2155+
2156+
2157+class SyslogContext(OSContextGenerator):
2158+
2159+ def __call__(self):
2160+ ctxt = {'use_syslog': config('use-syslog')}
2161+ return ctxt
2162+
2163+
2164+class BindHostContext(OSContextGenerator):
2165+
2166+ def __call__(self):
2167+ if config('prefer-ipv6'):
2168+ return {'bind_host': '::'}
2169+ else:
2170+ return {'bind_host': '0.0.0.0'}
2171+
2172+
2173+class WorkerConfigContext(OSContextGenerator):
2174+
2175+ @property
2176+ def num_cpus(self):
2177+ try:
2178+ from psutil import NUM_CPUS
2179+ except ImportError:
2180+ apt_install('python-psutil', fatal=True)
2181+ from psutil import NUM_CPUS
2182+
2183+ return NUM_CPUS
2184+
2185+ def __call__(self):
2186+ multiplier = config('worker-multiplier') or 0
2187+ ctxt = {"workers": self.num_cpus * multiplier}
2188+ return ctxt
2189+
2190+
2191+class ZeroMQContext(OSContextGenerator):
2192+ interfaces = ['zeromq-configuration']
2193+
2194+ def __call__(self):
2195+ ctxt = {}
2196+ if is_relation_made('zeromq-configuration', 'host'):
2197+ for rid in relation_ids('zeromq-configuration'):
2198+ for unit in related_units(rid):
2199+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
2200+ ctxt['zmq_host'] = relation_get('host', unit, rid)
2201+
2202+ return ctxt
2203+
2204+
2205+class NotificationDriverContext(OSContextGenerator):
2206+
2207+ def __init__(self, zmq_relation='zeromq-configuration',
2208+ amqp_relation='amqp'):
2209+ """
2210+ :param zmq_relation: Name of Zeromq relation to check
2211+ """
2212+ self.zmq_relation = zmq_relation
2213+ self.amqp_relation = amqp_relation
2214+
2215+ def __call__(self):
2216+ ctxt = {'notifications': 'False'}
2217+ if is_relation_made(self.amqp_relation):
2218+ ctxt['notifications'] = "True"
2219+
2220+ return ctxt
2221+
2222+
2223+class SysctlContext(OSContextGenerator):
2224+ """This context check if the 'sysctl' option exists on configuration
2225+ then creates a file with the loaded contents"""
2226+ def __call__(self):
2227+ sysctl_dict = config('sysctl')
2228+ if sysctl_dict:
2229+ sysctl_create(sysctl_dict,
2230+ '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
2231+ return {'sysctl': sysctl_dict}
2232
2233=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
2234--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
2235+++ hooks/charmhelpers/contrib/openstack/ip.py 2015-01-15 16:18:44 +0000
2236@@ -0,0 +1,93 @@
2237+from charmhelpers.core.hookenv import (
2238+ config,
2239+ unit_get,
2240+)
2241+from charmhelpers.contrib.network.ip import (
2242+ get_address_in_network,
2243+ is_address_in_network,
2244+ is_ipv6,
2245+ get_ipv6_addr,
2246+)
2247+from charmhelpers.contrib.hahelpers.cluster import is_clustered
2248+
2249+PUBLIC = 'public'
2250+INTERNAL = 'int'
2251+ADMIN = 'admin'
2252+
2253+ADDRESS_MAP = {
2254+ PUBLIC: {
2255+ 'config': 'os-public-network',
2256+ 'fallback': 'public-address'
2257+ },
2258+ INTERNAL: {
2259+ 'config': 'os-internal-network',
2260+ 'fallback': 'private-address'
2261+ },
2262+ ADMIN: {
2263+ 'config': 'os-admin-network',
2264+ 'fallback': 'private-address'
2265+ }
2266+}
2267+
2268+
2269+def canonical_url(configs, endpoint_type=PUBLIC):
2270+ """Returns the correct HTTP URL to this host given the state of HTTPS
2271+ configuration, hacluster and charm configuration.
2272+
2273+ :param configs: OSTemplateRenderer config templating object to inspect
2274+ for a complete https context.
2275+ :param endpoint_type: str endpoint type to resolve.
2276+ :param returns: str base URL for services on the current service unit.
2277+ """
2278+ scheme = 'http'
2279+ if 'https' in configs.complete_contexts():
2280+ scheme = 'https'
2281+ address = resolve_address(endpoint_type)
2282+ if is_ipv6(address):
2283+ address = "[{}]".format(address)
2284+ return '%s://%s' % (scheme, address)
2285+
2286+
2287+def resolve_address(endpoint_type=PUBLIC):
2288+ """Return unit address depending on net config.
2289+
2290+ If unit is clustered with vip(s) and has net splits defined, return vip on
2291+ correct network. If clustered with no nets defined, return primary vip.
2292+
2293+ If not clustered, return unit address ensuring address is on configured net
2294+ split if one is configured.
2295+
2296+ :param endpoint_type: Network endpoing type
2297+ """
2298+ resolved_address = None
2299+ vips = config('vip')
2300+ if vips:
2301+ vips = vips.split()
2302+
2303+ net_type = ADDRESS_MAP[endpoint_type]['config']
2304+ net_addr = config(net_type)
2305+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
2306+ clustered = is_clustered()
2307+ if clustered:
2308+ if not net_addr:
2309+ # If no net-splits defined, we expect a single vip
2310+ resolved_address = vips[0]
2311+ else:
2312+ for vip in vips:
2313+ if is_address_in_network(net_addr, vip):
2314+ resolved_address = vip
2315+ break
2316+ else:
2317+ if config('prefer-ipv6'):
2318+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
2319+ else:
2320+ fallback_addr = unit_get(net_fallback)
2321+
2322+ resolved_address = get_address_in_network(net_addr, fallback_addr)
2323+
2324+ if resolved_address is None:
2325+ raise ValueError("Unable to resolve a suitable IP address based on "
2326+ "charm state and configuration. (net_type=%s, "
2327+ "clustered=%s)" % (net_type, clustered))
2328+
2329+ return resolved_address
2330
2331=== added file 'hooks/charmhelpers/contrib/openstack/neutron.py'
2332--- hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
2333+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-01-15 16:18:44 +0000
2334@@ -0,0 +1,223 @@
2335+# Various utilies for dealing with Neutron and the renaming from Quantum.
2336+
2337+from subprocess import check_output
2338+
2339+from charmhelpers.core.hookenv import (
2340+ config,
2341+ log,
2342+ ERROR,
2343+)
2344+
2345+from charmhelpers.contrib.openstack.utils import os_release
2346+
2347+
2348+def headers_package():
2349+ """Ensures correct linux-headers for running kernel are installed,
2350+ for building DKMS package"""
2351+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
2352+ return 'linux-headers-%s' % kver
2353+
2354+QUANTUM_CONF_DIR = '/etc/quantum'
2355+
2356+
2357+def kernel_version():
2358+ """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
2359+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
2360+ kver = kver.split('.')
2361+ return (int(kver[0]), int(kver[1]))
2362+
2363+
2364+def determine_dkms_package():
2365+ """ Determine which DKMS package should be used based on kernel version """
2366+ # NOTE: 3.13 kernels have support for GRE and VXLAN native
2367+ if kernel_version() >= (3, 13):
2368+ return []
2369+ else:
2370+ return ['openvswitch-datapath-dkms']
2371+
2372+
2373+# legacy
2374+
2375+
2376+def quantum_plugins():
2377+ from charmhelpers.contrib.openstack import context
2378+ return {
2379+ 'ovs': {
2380+ 'config': '/etc/quantum/plugins/openvswitch/'
2381+ 'ovs_quantum_plugin.ini',
2382+ 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
2383+ 'OVSQuantumPluginV2',
2384+ 'contexts': [
2385+ context.SharedDBContext(user=config('neutron-database-user'),
2386+ database=config('neutron-database'),
2387+ relation_prefix='neutron',
2388+ ssl_dir=QUANTUM_CONF_DIR)],
2389+ 'services': ['quantum-plugin-openvswitch-agent'],
2390+ 'packages': [[headers_package()] + determine_dkms_package(),
2391+ ['quantum-plugin-openvswitch-agent']],
2392+ 'server_packages': ['quantum-server',
2393+ 'quantum-plugin-openvswitch'],
2394+ 'server_services': ['quantum-server']
2395+ },
2396+ 'nvp': {
2397+ 'config': '/etc/quantum/plugins/nicira/nvp.ini',
2398+ 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
2399+ 'QuantumPlugin.NvpPluginV2',
2400+ 'contexts': [
2401+ context.SharedDBContext(user=config('neutron-database-user'),
2402+ database=config('neutron-database'),
2403+ relation_prefix='neutron',
2404+ ssl_dir=QUANTUM_CONF_DIR)],
2405+ 'services': [],
2406+ 'packages': [],
2407+ 'server_packages': ['quantum-server',
2408+ 'quantum-plugin-nicira'],
2409+ 'server_services': ['quantum-server']
2410+ }
2411+ }
2412+
2413+NEUTRON_CONF_DIR = '/etc/neutron'
2414+
2415+
2416+def neutron_plugins():
2417+ from charmhelpers.contrib.openstack import context
2418+ release = os_release('nova-common')
2419+ plugins = {
2420+ 'ovs': {
2421+ 'config': '/etc/neutron/plugins/openvswitch/'
2422+ 'ovs_neutron_plugin.ini',
2423+ 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
2424+ 'OVSNeutronPluginV2',
2425+ 'contexts': [
2426+ context.SharedDBContext(user=config('neutron-database-user'),
2427+ database=config('neutron-database'),
2428+ relation_prefix='neutron',
2429+ ssl_dir=NEUTRON_CONF_DIR)],
2430+ 'services': ['neutron-plugin-openvswitch-agent'],
2431+ 'packages': [[headers_package()] + determine_dkms_package(),
2432+ ['neutron-plugin-openvswitch-agent']],
2433+ 'server_packages': ['neutron-server',
2434+ 'neutron-plugin-openvswitch'],
2435+ 'server_services': ['neutron-server']
2436+ },
2437+ 'nvp': {
2438+ 'config': '/etc/neutron/plugins/nicira/nvp.ini',
2439+ 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
2440+ 'NeutronPlugin.NvpPluginV2',
2441+ 'contexts': [
2442+ context.SharedDBContext(user=config('neutron-database-user'),
2443+ database=config('neutron-database'),
2444+ relation_prefix='neutron',
2445+ ssl_dir=NEUTRON_CONF_DIR)],
2446+ 'services': [],
2447+ 'packages': [],
2448+ 'server_packages': ['neutron-server',
2449+ 'neutron-plugin-nicira'],
2450+ 'server_services': ['neutron-server']
2451+ },
2452+ 'nsx': {
2453+ 'config': '/etc/neutron/plugins/vmware/nsx.ini',
2454+ 'driver': 'vmware',
2455+ 'contexts': [
2456+ context.SharedDBContext(user=config('neutron-database-user'),
2457+ database=config('neutron-database'),
2458+ relation_prefix='neutron',
2459+ ssl_dir=NEUTRON_CONF_DIR)],
2460+ 'services': [],
2461+ 'packages': [],
2462+ 'server_packages': ['neutron-server',
2463+ 'neutron-plugin-vmware'],
2464+ 'server_services': ['neutron-server']
2465+ },
2466+ 'n1kv': {
2467+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
2468+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
2469+ 'contexts': [
2470+ context.SharedDBContext(user=config('neutron-database-user'),
2471+ database=config('neutron-database'),
2472+ relation_prefix='neutron',
2473+ ssl_dir=NEUTRON_CONF_DIR)],
2474+ 'services': [],
2475+ 'packages': [[headers_package()] + determine_dkms_package(),
2476+ ['neutron-plugin-cisco']],
2477+ 'server_packages': ['neutron-server',
2478+ 'neutron-plugin-cisco'],
2479+ 'server_services': ['neutron-server']
2480+ },
2481+ 'Calico': {
2482+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
2483+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
2484+ 'contexts': [
2485+ context.SharedDBContext(user=config('neutron-database-user'),
2486+ database=config('neutron-database'),
2487+ relation_prefix='neutron',
2488+ ssl_dir=NEUTRON_CONF_DIR)],
2489+ 'services': ['calico-felix',
2490+ 'bird',
2491+ 'neutron-dhcp-agent',
2492+ 'nova-api-metadata'],
2493+ 'packages': [[headers_package()] + determine_dkms_package(),
2494+ ['calico-compute',
2495+ 'bird',
2496+ 'neutron-dhcp-agent',
2497+ 'nova-api-metadata']],
2498+ 'server_packages': ['neutron-server', 'calico-control'],
2499+ 'server_services': ['neutron-server']
2500+ }
2501+ }
2502+ if release >= 'icehouse':
2503+ # NOTE: patch in ml2 plugin for icehouse onwards
2504+ plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
2505+ plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
2506+ plugins['ovs']['server_packages'] = ['neutron-server',
2507+ 'neutron-plugin-ml2']
2508+ # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
2509+ plugins['nvp'] = plugins['nsx']
2510+ return plugins
2511+
2512+
2513+def neutron_plugin_attribute(plugin, attr, net_manager=None):
2514+ manager = net_manager or network_manager()
2515+ if manager == 'quantum':
2516+ plugins = quantum_plugins()
2517+ elif manager == 'neutron':
2518+ plugins = neutron_plugins()
2519+ else:
2520+ log("Network manager '%s' does not support plugins." % (manager),
2521+ level=ERROR)
2522+ raise Exception
2523+
2524+ try:
2525+ _plugin = plugins[plugin]
2526+ except KeyError:
2527+ log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
2528+ raise Exception
2529+
2530+ try:
2531+ return _plugin[attr]
2532+ except KeyError:
2533+ return None
2534+
2535+
2536+def network_manager():
2537+ '''
2538+ Deals with the renaming of Quantum to Neutron in H and any situations
2539+ that require compatability (eg, deploying H with network-manager=quantum,
2540+ upgrading from G).
2541+ '''
2542+ release = os_release('nova-common')
2543+ manager = config('network-manager').lower()
2544+
2545+ if manager not in ['quantum', 'neutron']:
2546+ return manager
2547+
2548+ if release in ['essex']:
2549+ # E does not support neutron
2550+ log('Neutron networking not supported in Essex.', level=ERROR)
2551+ raise Exception
2552+ elif release in ['folsom', 'grizzly']:
2553+ # neutron is named quantum in F and G
2554+ return 'quantum'
2555+ else:
2556+ # ensure accurate naming for all releases post-H
2557+ return 'neutron'
2558
2559=== added directory 'hooks/charmhelpers/contrib/openstack/templates'
2560=== added file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
2561--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
2562+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 2015-01-15 16:18:44 +0000
2563@@ -0,0 +1,2 @@
2564+# dummy __init__.py to fool syncer into thinking this is a syncable python
2565+# module
2566
2567=== added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
2568--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
2569+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-01-15 16:18:44 +0000
2570@@ -0,0 +1,15 @@
2571+###############################################################################
2572+# [ WARNING ]
2573+# cinder configuration file maintained by Juju
2574+# local changes may be overwritten.
2575+###############################################################################
2576+[global]
2577+{% if auth -%}
2578+ auth_supported = {{ auth }}
2579+ keyring = /etc/ceph/$cluster.$name.keyring
2580+ mon host = {{ mon_hosts }}
2581+{% endif -%}
2582+ log to syslog = {{ use_syslog }}
2583+ err to syslog = {{ use_syslog }}
2584+ clog to syslog = {{ use_syslog }}
2585+
2586
2587=== added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
2588--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
2589+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-01-15 16:18:44 +0000
2590@@ -0,0 +1,58 @@
2591+global
2592+ log {{ local_host }} local0
2593+ log {{ local_host }} local1 notice
2594+ maxconn 20000
2595+ user haproxy
2596+ group haproxy
2597+ spread-checks 0
2598+
2599+defaults
2600+ log global
2601+ mode tcp
2602+ option tcplog
2603+ option dontlognull
2604+ retries 3
2605+ timeout queue 1000
2606+ timeout connect 1000
2607+{% if haproxy_client_timeout -%}
2608+ timeout client {{ haproxy_client_timeout }}
2609+{% else -%}
2610+ timeout client 30000
2611+{% endif -%}
2612+
2613+{% if haproxy_server_timeout -%}
2614+ timeout server {{ haproxy_server_timeout }}
2615+{% else -%}
2616+ timeout server 30000
2617+{% endif -%}
2618+
2619+listen stats {{ stat_port }}
2620+ mode http
2621+ stats enable
2622+ stats hide-version
2623+ stats realm Haproxy\ Statistics
2624+ stats uri /
2625+ stats auth admin:password
2626+
2627+{% if frontends -%}
2628+{% for service, ports in service_ports.items() -%}
2629+frontend tcp-in_{{ service }}
2630+ bind *:{{ ports[0] }}
2631+ {% if ipv6 -%}
2632+ bind :::{{ ports[0] }}
2633+ {% endif -%}
2634+ {% for frontend in frontends -%}
2635+ acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
2636+ use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
2637+ {% endfor -%}
2638+ default_backend {{ service }}_{{ default_backend }}
2639+
2640+{% for frontend in frontends -%}
2641+backend {{ service }}_{{ frontend }}
2642+ balance leastconn
2643+ {% for unit, address in frontends[frontend]['backends'].items() -%}
2644+ server {{ unit }} {{ address }}:{{ ports[1] }} check
2645+ {% endfor %}
2646+{% endfor -%}
2647+{% endfor -%}
2648+{% endif -%}
2649
2650=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
2651--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000
2652+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2015-01-15 16:18:44 +0000
2653@@ -0,0 +1,24 @@
2654+{% if endpoints -%}
2655+{% for ext_port in ext_ports -%}
2656+Listen {{ ext_port }}
2657+{% endfor -%}
2658+{% for address, endpoint, ext, int in endpoints -%}
2659+<VirtualHost {{ address }}:{{ ext }}>
2660+ ServerName {{ endpoint }}
2661+ SSLEngine on
2662+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
2663+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
2664+ ProxyPass / http://localhost:{{ int }}/
2665+ ProxyPassReverse / http://localhost:{{ int }}/
2666+ ProxyPreserveHost on
2667+</VirtualHost>
2668+{% endfor -%}
2669+<Proxy *>
2670+ Order deny,allow
2671+ Allow from all
2672+</Proxy>
2673+<Location />
2674+ Order allow,deny
2675+ Allow from all
2676+</Location>
2677+{% endif -%}
2678
2679=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
2680--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000
2681+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2015-01-15 16:18:44 +0000
2682@@ -0,0 +1,24 @@
2683+{% if endpoints -%}
2684+{% for ext_port in ext_ports -%}
2685+Listen {{ ext_port }}
2686+{% endfor -%}
2687+{% for address, endpoint, ext, int in endpoints -%}
2688+<VirtualHost {{ address }}:{{ ext }}>
2689+ ServerName {{ endpoint }}
2690+ SSLEngine on
2691+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
2692+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
2693+ ProxyPass / http://localhost:{{ int }}/
2694+ ProxyPassReverse / http://localhost:{{ int }}/
2695+ ProxyPreserveHost on
2696+</VirtualHost>
2697+{% endfor -%}
2698+<Proxy *>
2699+ Order deny,allow
2700+ Allow from all
2701+</Proxy>
2702+<Location />
2703+ Order allow,deny
2704+ Allow from all
2705+</Location>
2706+{% endif -%}
2707
2708=== added file 'hooks/charmhelpers/contrib/openstack/templating.py'
2709--- hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
2710+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-01-15 16:18:44 +0000
2711@@ -0,0 +1,279 @@
2712+import os
2713+
2714+import six
2715+
2716+from charmhelpers.fetch import apt_install
2717+from charmhelpers.core.hookenv import (
2718+ log,
2719+ ERROR,
2720+ INFO
2721+)
2722+from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
2723+
2724+try:
2725+ from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
2726+except ImportError:
2727+ # python-jinja2 may not be installed yet, or we're running unittests.
2728+ FileSystemLoader = ChoiceLoader = Environment = exceptions = None
2729+
2730+
2731+class OSConfigException(Exception):
2732+ pass
2733+
2734+
2735+def get_loader(templates_dir, os_release):
2736+ """
2737+ Create a jinja2.ChoiceLoader containing template dirs up to
2738+ and including os_release. If directory template directory
2739+ is missing at templates_dir, it will be omitted from the loader.
2740+ templates_dir is added to the bottom of the search list as a base
2741+ loading dir.
2742+
2743+ A charm may also ship a templates dir with this module
2744+ and it will be appended to the bottom of the search list, eg::
2745+
2746+ hooks/charmhelpers/contrib/openstack/templates
2747+
2748+ :param templates_dir (str): Base template directory containing release
2749+ sub-directories.
2750+ :param os_release (str): OpenStack release codename to construct template
2751+ loader.
2752+ :returns: jinja2.ChoiceLoader constructed with a list of
2753+ jinja2.FilesystemLoaders, ordered in descending
2754+ order by OpenStack release.
2755+ """
2756+ tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
2757+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
2758+
2759+ if not os.path.isdir(templates_dir):
2760+ log('Templates directory not found @ %s.' % templates_dir,
2761+ level=ERROR)
2762+ raise OSConfigException
2763+
2764+ # the bottom contains tempaltes_dir and possibly a common templates dir
2765+ # shipped with the helper.
2766+ loaders = [FileSystemLoader(templates_dir)]
2767+ helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
2768+ if os.path.isdir(helper_templates):
2769+ loaders.append(FileSystemLoader(helper_templates))
2770+
2771+ for rel, tmpl_dir in tmpl_dirs:
2772+ if os.path.isdir(tmpl_dir):
2773+ loaders.insert(0, FileSystemLoader(tmpl_dir))
2774+ if rel == os_release:
2775+ break
2776+ log('Creating choice loader with dirs: %s' %
2777+ [l.searchpath for l in loaders], level=INFO)
2778+ return ChoiceLoader(loaders)
2779+
2780+
2781+class OSConfigTemplate(object):
2782+ """
2783+ Associates a config file template with a list of context generators.
2784+ Responsible for constructing a template context based on those generators.
2785+ """
2786+ def __init__(self, config_file, contexts):
2787+ self.config_file = config_file
2788+
2789+ if hasattr(contexts, '__call__'):
2790+ self.contexts = [contexts]
2791+ else:
2792+ self.contexts = contexts
2793+
2794+ self._complete_contexts = []
2795+
2796+ def context(self):
2797+ ctxt = {}
2798+ for context in self.contexts:
2799+ _ctxt = context()
2800+ if _ctxt:
2801+ ctxt.update(_ctxt)
2802+ # track interfaces for every complete context.
2803+ [self._complete_contexts.append(interface)
2804+ for interface in context.interfaces
2805+ if interface not in self._complete_contexts]
2806+ return ctxt
2807+
2808+ def complete_contexts(self):
2809+ '''
2810+ Return a list of interfaces that have atisfied contexts.
2811+ '''
2812+ if self._complete_contexts:
2813+ return self._complete_contexts
2814+ self.context()
2815+ return self._complete_contexts
2816+
2817+
2818+class OSConfigRenderer(object):
2819+ """
2820+ This class provides a common templating system to be used by OpenStack
2821+ charms. It is intended to help charms share common code and templates,
2822+ and ease the burden of managing config templates across multiple OpenStack
2823+ releases.
2824+
2825+ Basic usage::
2826+
2827+ # import some common context generates from charmhelpers
2828+ from charmhelpers.contrib.openstack import context
2829+
2830+ # Create a renderer object for a specific OS release.
2831+ configs = OSConfigRenderer(templates_dir='/tmp/templates',
2832+ openstack_release='folsom')
2833+ # register some config files with context generators.
2834+ configs.register(config_file='/etc/nova/nova.conf',
2835+ contexts=[context.SharedDBContext(),
2836+ context.AMQPContext()])
2837+ configs.register(config_file='/etc/nova/api-paste.ini',
2838+ contexts=[context.IdentityServiceContext()])
2839+ configs.register(config_file='/etc/haproxy/haproxy.conf',
2840+ contexts=[context.HAProxyContext()])
2841+ # write out a single config
2842+ configs.write('/etc/nova/nova.conf')
2843+ # write out all registered configs
2844+ configs.write_all()
2845+
2846+ **OpenStack Releases and template loading**
2847+
2848+ When the object is instantiated, it is associated with a specific OS
2849+ release. This dictates how the template loader will be constructed.
2850+
2851+ The constructed loader attempts to load the template from several places
2852+ in the following order:
2853+ - from the most recent OS release-specific template dir (if one exists)
2854+ - the base templates_dir
2855+ - a template directory shipped in the charm with this helper file.
2856+
2857+ For the example above, '/tmp/templates' contains the following structure::
2858+
2859+ /tmp/templates/nova.conf
2860+ /tmp/templates/api-paste.ini
2861+ /tmp/templates/grizzly/api-paste.ini
2862+ /tmp/templates/havana/api-paste.ini
2863+
2864+ Since it was registered with the grizzly release, it first seraches
2865+ the grizzly directory for nova.conf, then the templates dir.
2866+
2867+ When writing api-paste.ini, it will find the template in the grizzly
2868+ directory.
2869+
2870+ If the object were created with folsom, it would fall back to the
2871+ base templates dir for its api-paste.ini template.
2872+
2873+ This system should help manage changes in config files through
2874+ openstack releases, allowing charms to fall back to the most recently
2875+ updated config template for a given release
2876+
2877+ The haproxy.conf, since it is not shipped in the templates dir, will
2878+ be loaded from the module directory's template directory, eg
2879+ $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
2880+ us to ship common templates (haproxy, apache) with the helpers.
2881+
2882+ **Context generators**
2883+
2884+ Context generators are used to generate template contexts during hook
2885+ execution. Doing so may require inspecting service relations, charm
2886+ config, etc. When registered, a config file is associated with a list
2887+ of generators. When a template is rendered and written, all context
2888+ generates are called in a chain to generate the context dictionary
2889+ passed to the jinja2 template. See context.py for more info.
2890+ """
2891+ def __init__(self, templates_dir, openstack_release):
2892+ if not os.path.isdir(templates_dir):
2893+ log('Could not locate templates dir %s' % templates_dir,
2894+ level=ERROR)
2895+ raise OSConfigException
2896+
2897+ self.templates_dir = templates_dir
2898+ self.openstack_release = openstack_release
2899+ self.templates = {}
2900+ self._tmpl_env = None
2901+
2902+ if None in [Environment, ChoiceLoader, FileSystemLoader]:
2903+ # if this code is running, the object is created pre-install hook.
2904+ # jinja2 shouldn't get touched until the module is reloaded on next
2905+ # hook execution, with proper jinja2 bits successfully imported.
2906+ apt_install('python-jinja2')
2907+
2908+ def register(self, config_file, contexts):
2909+ """
2910+ Register a config file with a list of context generators to be called
2911+ during rendering.
2912+ """
2913+ self.templates[config_file] = OSConfigTemplate(config_file=config_file,
2914+ contexts=contexts)
2915+ log('Registered config file: %s' % config_file, level=INFO)
2916+
2917+ def _get_tmpl_env(self):
2918+ if not self._tmpl_env:
2919+ loader = get_loader(self.templates_dir, self.openstack_release)
2920+ self._tmpl_env = Environment(loader=loader)
2921+
2922+ def _get_template(self, template):
2923+ self._get_tmpl_env()
2924+ template = self._tmpl_env.get_template(template)
2925+ log('Loaded template from %s' % template.filename, level=INFO)
2926+ return template
2927+
2928+ def render(self, config_file):
2929+ if config_file not in self.templates:
2930+ log('Config not registered: %s' % config_file, level=ERROR)
2931+ raise OSConfigException
2932+ ctxt = self.templates[config_file].context()
2933+
2934+ _tmpl = os.path.basename(config_file)
2935+ try:
2936+ template = self._get_template(_tmpl)
2937+ except exceptions.TemplateNotFound:
2938+ # if no template is found with basename, try looking for it
2939+ # using a munged full path, eg:
2940+ # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
2941+ _tmpl = '_'.join(config_file.split('/')[1:])
2942+ try:
2943+ template = self._get_template(_tmpl)
2944+ except exceptions.TemplateNotFound as e:
2945+ log('Could not load template from %s by %s or %s.' %
2946+ (self.templates_dir, os.path.basename(config_file), _tmpl),
2947+ level=ERROR)
2948+ raise e
2949+
2950+ log('Rendering from template: %s' % _tmpl, level=INFO)
2951+ return template.render(ctxt)
2952+
2953+ def write(self, config_file):
2954+ """
2955+ Write a single config file, raises if config file is not registered.
2956+ """
2957+ if config_file not in self.templates:
2958+ log('Config not registered: %s' % config_file, level=ERROR)
2959+ raise OSConfigException
2960+
2961+ _out = self.render(config_file)
2962+
2963+ with open(config_file, 'wb') as out:
2964+ out.write(_out)
2965+
2966+ log('Wrote template %s.' % config_file, level=INFO)
2967+
2968+ def write_all(self):
2969+ """
2970+ Write out all registered config files.
2971+ """
2972+ [self.write(k) for k in six.iterkeys(self.templates)]
2973+
2974+ def set_release(self, openstack_release):
2975+ """
2976+ Resets the template environment and generates a new template loader
2977+ based on a the new openstack release.
2978+ """
2979+ self._tmpl_env = None
2980+ self.openstack_release = openstack_release
2981+ self._get_tmpl_env()
2982+
2983+ def complete_contexts(self):
2984+ '''
2985+ Returns a list of context interfaces that yield a complete context.
2986+ '''
2987+ interfaces = []
2988+ [interfaces.extend(i.complete_contexts())
2989+ for i in six.itervalues(self.templates)]
2990+ return interfaces
2991
2992=== added file 'hooks/charmhelpers/contrib/openstack/utils.py'
2993--- hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
2994+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-01-15 16:18:44 +0000
2995@@ -0,0 +1,625 @@
2996+#!/usr/bin/python
2997+
2998+# Common python helper functions used for OpenStack charms.
2999+from collections import OrderedDict
3000+from functools import wraps
3001+
3002+import subprocess
3003+import json
3004+import os
3005+import socket
3006+import sys
3007+
3008+import six
3009+import yaml
3010+
3011+from charmhelpers.core.hookenv import (
3012+ config,
3013+ log as juju_log,
3014+ charm_dir,
3015+ INFO,
3016+ relation_ids,
3017+ relation_set
3018+)
3019+
3020+from charmhelpers.contrib.storage.linux.lvm import (
3021+ deactivate_lvm_volume_group,
3022+ is_lvm_physical_volume,
3023+ remove_lvm_physical_volume,
3024+)
3025+
3026+from charmhelpers.contrib.network.ip import (
3027+ get_ipv6_addr
3028+)
3029+
3030+from charmhelpers.core.host import lsb_release, mounts, umount
3031+from charmhelpers.fetch import apt_install, apt_cache, install_remote
3032+from charmhelpers.contrib.python.packages import pip_install
3033+from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
3034+from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
3035+
3036+CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
3037+CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
3038+
3039+DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
3040+ 'restricted main multiverse universe')
3041+
3042+
3043+UBUNTU_OPENSTACK_RELEASE = OrderedDict([
3044+ ('oneiric', 'diablo'),
3045+ ('precise', 'essex'),
3046+ ('quantal', 'folsom'),
3047+ ('raring', 'grizzly'),
3048+ ('saucy', 'havana'),
3049+ ('trusty', 'icehouse'),
3050+ ('utopic', 'juno'),
3051+ ('vivid', 'kilo'),
3052+])
3053+
3054+
3055+OPENSTACK_CODENAMES = OrderedDict([
3056+ ('2011.2', 'diablo'),
3057+ ('2012.1', 'essex'),
3058+ ('2012.2', 'folsom'),
3059+ ('2013.1', 'grizzly'),
3060+ ('2013.2', 'havana'),
3061+ ('2014.1', 'icehouse'),
3062+ ('2014.2', 'juno'),
3063+ ('2015.1', 'kilo'),
3064+])
3065+
3066+# The ugly duckling
3067+SWIFT_CODENAMES = OrderedDict([
3068+ ('1.4.3', 'diablo'),
3069+ ('1.4.8', 'essex'),
3070+ ('1.7.4', 'folsom'),
3071+ ('1.8.0', 'grizzly'),
3072+ ('1.7.7', 'grizzly'),
3073+ ('1.7.6', 'grizzly'),
3074+ ('1.10.0', 'havana'),
3075+ ('1.9.1', 'havana'),
3076+ ('1.9.0', 'havana'),
3077+ ('1.13.1', 'icehouse'),
3078+ ('1.13.0', 'icehouse'),
3079+ ('1.12.0', 'icehouse'),
3080+ ('1.11.0', 'icehouse'),
3081+ ('2.0.0', 'juno'),
3082+ ('2.1.0', 'juno'),
3083+ ('2.2.0', 'juno'),
3084+ ('2.2.1', 'kilo'),
3085+])
3086+
3087+DEFAULT_LOOPBACK_SIZE = '5G'
3088+
3089+
3090+def error_out(msg):
3091+ juju_log("FATAL ERROR: %s" % msg, level='ERROR')
3092+ sys.exit(1)
3093+
3094+
3095+def get_os_codename_install_source(src):
3096+ '''Derive OpenStack release codename from a given installation source.'''
3097+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3098+ rel = ''
3099+ if src is None:
3100+ return rel
3101+ if src in ['distro', 'distro-proposed']:
3102+ try:
3103+ rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
3104+ except KeyError:
3105+ e = 'Could not derive openstack release for '\
3106+ 'this Ubuntu release: %s' % ubuntu_rel
3107+ error_out(e)
3108+ return rel
3109+
3110+ if src.startswith('cloud:'):
3111+ ca_rel = src.split(':')[1]
3112+ ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
3113+ return ca_rel
3114+
3115+ # Best guess match based on deb string provided
3116+ if src.startswith('deb') or src.startswith('ppa'):
3117+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
3118+ if v in src:
3119+ return v
3120+
3121+
3122+def get_os_version_install_source(src):
3123+ codename = get_os_codename_install_source(src)
3124+ return get_os_version_codename(codename)
3125+
3126+
3127+def get_os_codename_version(vers):
3128+ '''Determine OpenStack codename from version number.'''
3129+ try:
3130+ return OPENSTACK_CODENAMES[vers]
3131+ except KeyError:
3132+ e = 'Could not determine OpenStack codename for version %s' % vers
3133+ error_out(e)
3134+
3135+
3136+def get_os_version_codename(codename):
3137+ '''Determine OpenStack version number from codename.'''
3138+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
3139+ if v == codename:
3140+ return k
3141+ e = 'Could not derive OpenStack version for '\
3142+ 'codename: %s' % codename
3143+ error_out(e)
3144+
3145+
3146+def get_os_codename_package(package, fatal=True):
3147+ '''Derive OpenStack release codename from an installed package.'''
3148+ import apt_pkg as apt
3149+
3150+ cache = apt_cache()
3151+
3152+ try:
3153+ pkg = cache[package]
3154+ except:
3155+ if not fatal:
3156+ return None
3157+ # the package is unknown to the current apt cache.
3158+ e = 'Could not determine version of package with no installation '\
3159+ 'candidate: %s' % package
3160+ error_out(e)
3161+
3162+ if not pkg.current_ver:
3163+ if not fatal:
3164+ return None
3165+ # package is known, but no version is currently installed.
3166+ e = 'Could not determine version of uninstalled package: %s' % package
3167+ error_out(e)
3168+
3169+ vers = apt.upstream_version(pkg.current_ver.ver_str)
3170+
3171+ try:
3172+ if 'swift' in pkg.name:
3173+ swift_vers = vers[:5]
3174+ if swift_vers not in SWIFT_CODENAMES:
3175+ # Deal with 1.10.0 upward
3176+ swift_vers = vers[:6]
3177+ return SWIFT_CODENAMES[swift_vers]
3178+ else:
3179+ vers = vers[:6]
3180+ return OPENSTACK_CODENAMES[vers]
3181+ except KeyError:
3182+ e = 'Could not determine OpenStack codename for version %s' % vers
3183+ error_out(e)
3184+
3185+
3186+def get_os_version_package(pkg, fatal=True):
3187+ '''Derive OpenStack version number from an installed package.'''
3188+ codename = get_os_codename_package(pkg, fatal=fatal)
3189+
3190+ if not codename:
3191+ return None
3192+
3193+ if 'swift' in pkg:
3194+ vers_map = SWIFT_CODENAMES
3195+ else:
3196+ vers_map = OPENSTACK_CODENAMES
3197+
3198+ for version, cname in six.iteritems(vers_map):
3199+ if cname == codename:
3200+ return version
3201+ # e = "Could not determine OpenStack version for package: %s" % pkg
3202+ # error_out(e)
3203+
3204+
3205+os_rel = None
3206+
3207+
3208+def os_release(package, base='essex'):
3209+ '''
3210+ Returns OpenStack release codename from a cached global.
3211+ If the codename can not be determined from either an installed package or
3212+ the installation source, the earliest release supported by the charm should
3213+ be returned.
3214+ '''
3215+ global os_rel
3216+ if os_rel:
3217+ return os_rel
3218+ os_rel = (get_os_codename_package(package, fatal=False) or
3219+ get_os_codename_install_source(config('openstack-origin')) or
3220+ base)
3221+ return os_rel
3222+
3223+
3224+def import_key(keyid):
3225+ cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
3226+ "--recv-keys %s" % keyid
3227+ try:
3228+ subprocess.check_call(cmd.split(' '))
3229+ except subprocess.CalledProcessError:
3230+ error_out("Error importing repo key %s" % keyid)
3231+
3232+
3233+def configure_installation_source(rel):
3234+ '''Configure apt installation source.'''
3235+ if rel == 'distro':
3236+ return
3237+ elif rel == 'distro-proposed':
3238+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3239+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
3240+ f.write(DISTRO_PROPOSED % ubuntu_rel)
3241+ elif rel[:4] == "ppa:":
3242+ src = rel
3243+ subprocess.check_call(["add-apt-repository", "-y", src])
3244+ elif rel[:3] == "deb":
3245+ l = len(rel.split('|'))
3246+ if l == 2:
3247+ src, key = rel.split('|')
3248+ juju_log("Importing PPA key from keyserver for %s" % src)
3249+ import_key(key)
3250+ elif l == 1:
3251+ src = rel
3252+ with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
3253+ f.write(src)
3254+ elif rel[:6] == 'cloud:':
3255+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
3256+ rel = rel.split(':')[1]
3257+ u_rel = rel.split('-')[0]
3258+ ca_rel = rel.split('-')[1]
3259+
3260+ if u_rel != ubuntu_rel:
3261+ e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
3262+ 'version (%s)' % (ca_rel, ubuntu_rel)
3263+ error_out(e)
3264+
3265+ if 'staging' in ca_rel:
3266+ # staging is just a regular PPA.
3267+ os_rel = ca_rel.split('/')[0]
3268+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
3269+ cmd = 'add-apt-repository -y %s' % ppa
3270+ subprocess.check_call(cmd.split(' '))
3271+ return
3272+
3273+ # map charm config options to actual archive pockets.
3274+ pockets = {
3275+ 'folsom': 'precise-updates/folsom',
3276+ 'folsom/updates': 'precise-updates/folsom',
3277+ 'folsom/proposed': 'precise-proposed/folsom',
3278+ 'grizzly': 'precise-updates/grizzly',
3279+ 'grizzly/updates': 'precise-updates/grizzly',
3280+ 'grizzly/proposed': 'precise-proposed/grizzly',
3281+ 'havana': 'precise-updates/havana',
3282+ 'havana/updates': 'precise-updates/havana',
3283+ 'havana/proposed': 'precise-proposed/havana',
3284+ 'icehouse': 'precise-updates/icehouse',
3285+ 'icehouse/updates': 'precise-updates/icehouse',
3286+ 'icehouse/proposed': 'precise-proposed/icehouse',
3287+ 'juno': 'trusty-updates/juno',
3288+ 'juno/updates': 'trusty-updates/juno',
3289+ 'juno/proposed': 'trusty-proposed/juno',
3290+ 'kilo': 'trusty-updates/kilo',
3291+ 'kilo/updates': 'trusty-updates/kilo',
3292+ 'kilo/proposed': 'trusty-proposed/kilo',
3293+ }
3294+
3295+ try:
3296+ pocket = pockets[ca_rel]
3297+ except KeyError:
3298+ e = 'Invalid Cloud Archive release specified: %s' % rel
3299+ error_out(e)
3300+
3301+ src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
3302+ apt_install('ubuntu-cloud-keyring', fatal=True)
3303+
3304+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
3305+ f.write(src)
3306+ else:
3307+ error_out("Invalid openstack-release specified: %s" % rel)
3308+
3309+
3310+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
3311+ """
3312+ Write an rc file in the charm-delivered directory containing
3313+ exported environment variables provided by env_vars. Any charm scripts run
3314+ outside the juju hook environment can source this scriptrc to obtain
3315+ updated config information necessary to perform health checks or
3316+ service changes.
3317+ """
3318+ juju_rc_path = "%s/%s" % (charm_dir(), script_path)
3319+ if not os.path.exists(os.path.dirname(juju_rc_path)):
3320+ os.mkdir(os.path.dirname(juju_rc_path))
3321+ with open(juju_rc_path, 'wb') as rc_script:
3322+ rc_script.write(
3323+ "#!/bin/bash\n")
3324+ [rc_script.write('export %s=%s\n' % (u, p))
3325+ for u, p in six.iteritems(env_vars) if u != "script_path"]
3326+
3327+
3328+def openstack_upgrade_available(package):
3329+ """
3330+ Determines if an OpenStack upgrade is available from installation
3331+ source, based on version of installed package.
3332+
3333+ :param package: str: Name of installed package.
3334+
3335+ :returns: bool: : Returns True if configured installation source offers
3336+ a newer version of package.
3337+
3338+ """
3339+
3340+ import apt_pkg as apt
3341+ src = config('openstack-origin')
3342+ cur_vers = get_os_version_package(package)
3343+ available_vers = get_os_version_install_source(src)
3344+ apt.init()
3345+ return apt.version_compare(available_vers, cur_vers) == 1
3346+
3347+
3348+def ensure_block_device(block_device):
3349+ '''
3350+ Confirm block_device, create as loopback if necessary.
3351+
3352+ :param block_device: str: Full path of block device to ensure.
3353+
3354+ :returns: str: Full path of ensured block device.
3355+ '''
3356+ _none = ['None', 'none', None]
3357+ if (block_device in _none):
3358+ error_out('prepare_storage(): Missing required input: block_device=%s.'
3359+ % block_device)
3360+
3361+ if block_device.startswith('/dev/'):
3362+ bdev = block_device
3363+ elif block_device.startswith('/'):
3364+ _bd = block_device.split('|')
3365+ if len(_bd) == 2:
3366+ bdev, size = _bd
3367+ else:
3368+ bdev = block_device
3369+ size = DEFAULT_LOOPBACK_SIZE
3370+ bdev = ensure_loopback_device(bdev, size)
3371+ else:
3372+ bdev = '/dev/%s' % block_device
3373+
3374+ if not is_block_device(bdev):
3375+ error_out('Failed to locate valid block device at %s' % bdev)
3376+
3377+ return bdev
3378+
3379+
3380+def clean_storage(block_device):
3381+ '''
3382+ Ensures a block device is clean. That is:
3383+ - unmounted
3384+ - any lvm volume groups are deactivated
3385+ - any lvm physical device signatures removed
3386+ - partition table wiped
3387+
3388+ :param block_device: str: Full path to block device to clean.
3389+ '''
3390+ for mp, d in mounts():
3391+ if d == block_device:
3392+ juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
3393+ (d, mp), level=INFO)
3394+ umount(mp, persist=True)
3395+
3396+ if is_lvm_physical_volume(block_device):
3397+ deactivate_lvm_volume_group(block_device)
3398+ remove_lvm_physical_volume(block_device)
3399+ else:
3400+ zap_disk(block_device)
3401+
3402+
3403+def is_ip(address):
3404+ """
3405+ Returns True if address is a valid IP address.
3406+ """
3407+ try:
3408+ # Test to see if already an IPv4 address
3409+ socket.inet_aton(address)
3410+ return True
3411+ except socket.error:
3412+ return False
3413+
3414+
3415+def ns_query(address):
3416+ try:
3417+ import dns.resolver
3418+ except ImportError:
3419+ apt_install('python-dnspython')
3420+ import dns.resolver
3421+
3422+ if isinstance(address, dns.name.Name):
3423+ rtype = 'PTR'
3424+ elif isinstance(address, six.string_types):
3425+ rtype = 'A'
3426+ else:
3427+ return None
3428+
3429+ answers = dns.resolver.query(address, rtype)
3430+ if answers:
3431+ return str(answers[0])
3432+ return None
3433+
3434+
3435+def get_host_ip(hostname):
3436+ """
3437+ Resolves the IP for a given hostname, or returns
3438+ the input if it is already an IP.
3439+ """
3440+ if is_ip(hostname):
3441+ return hostname
3442+
3443+ return ns_query(hostname)
3444+
3445+
3446+def get_hostname(address, fqdn=True):
3447+ """
3448+ Resolves hostname for given IP, or returns the input
3449+ if it is already a hostname.
3450+ """
3451+ if is_ip(address):
3452+ try:
3453+ import dns.reversename
3454+ except ImportError:
3455+ apt_install('python-dnspython')
3456+ import dns.reversename
3457+
3458+ rev = dns.reversename.from_address(address)
3459+ result = ns_query(rev)
3460+ if not result:
3461+ return None
3462+ else:
3463+ result = address
3464+
3465+ if fqdn:
3466+ # strip trailing .
3467+ if result.endswith('.'):
3468+ return result[:-1]
3469+ else:
3470+ return result
3471+ else:
3472+ return result.split('.')[0]
3473+
3474+
3475+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
3476+ mm_map = {}
3477+ if os.path.isfile(mm_file):
3478+ with open(mm_file, 'r') as f:
3479+ mm_map = json.load(f)
3480+ return mm_map
3481+
3482+
3483+def sync_db_with_multi_ipv6_addresses(database, database_user,
3484+ relation_prefix=None):
3485+ hosts = get_ipv6_addr(dynamic_only=False)
3486+
3487+ kwargs = {'database': database,
3488+ 'username': database_user,
3489+ 'hostname': json.dumps(hosts)}
3490+
3491+ if relation_prefix:
3492+ for key in list(kwargs.keys()):
3493+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
3494+ del kwargs[key]
3495+
3496+ for rid in relation_ids('shared-db'):
3497+ relation_set(relation_id=rid, **kwargs)
3498+
3499+
3500+def os_requires_version(ostack_release, pkg):
3501+ """
3502+ Decorator for hook to specify minimum supported release
3503+ """
3504+ def wrap(f):
3505+ @wraps(f)
3506+ def wrapped_f(*args):
3507+ if os_release(pkg) < ostack_release:
3508+ raise Exception("This hook is not supported on releases"
3509+ " before %s" % ostack_release)
3510+ f(*args)
3511+ return wrapped_f
3512+ return wrap
3513+
3514+
3515+def git_install_requested():
3516+ """Returns true if openstack-origin-git is specified."""
3517+ return config('openstack-origin-git') != "None"
3518+
3519+
3520+requirements_dir = None
3521+
3522+
3523+def git_clone_and_install(file_name, core_project):
3524+ """Clone/install all OpenStack repos specified in yaml config file."""
3525+ global requirements_dir
3526+
3527+ if file_name == "None":
3528+ return
3529+
3530+ yaml_file = os.path.join(charm_dir(), file_name)
3531+
3532+ # clone/install the requirements project first
3533+ installed = _git_clone_and_install_subset(yaml_file,
3534+ whitelist=['requirements'])
3535+ if 'requirements' not in installed:
3536+ error_out('requirements git repository must be specified')
3537+
3538+ # clone/install all other projects except requirements and the core project
3539+ blacklist = ['requirements', core_project]
3540+ _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
3541+ update_requirements=True)
3542+
3543+ # clone/install the core project
3544+ whitelist = [core_project]
3545+ installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
3546+ update_requirements=True)
3547+ if core_project not in installed:
3548+ error_out('{} git repository must be specified'.format(core_project))
3549+
3550+
3551+def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
3552+ update_requirements=False):
3553+ """Clone/install subset of OpenStack repos specified in yaml config file."""
3554+ global requirements_dir
3555+ installed = []
3556+
3557+ with open(yaml_file, 'r') as fd:
3558+ projects = yaml.load(fd)
3559+ for proj, val in projects.items():
3560+ # The project subset is chosen based on the following 3 rules:
3561+ # 1) If project is in blacklist, we don't clone/install it, period.
3562+ # 2) If whitelist is empty, we clone/install everything else.
3563+ # 3) If whitelist is not empty, we clone/install everything in the
3564+ # whitelist.
3565+ if proj in blacklist:
3566+ continue
3567+ if whitelist and proj not in whitelist:
3568+ continue
3569+ repo = val['repository']
3570+ branch = val['branch']
3571+ repo_dir = _git_clone_and_install_single(repo, branch,
3572+ update_requirements)
3573+ if proj == 'requirements':
3574+ requirements_dir = repo_dir
3575+ installed.append(proj)
3576+ return installed
3577+
3578+
3579+def _git_clone_and_install_single(repo, branch, update_requirements=False):
3580+ """Clone and install a single git repository."""
3581+ dest_parent_dir = "/mnt/openstack-git/"
3582+ dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
3583+
3584+ if not os.path.exists(dest_parent_dir):
3585+ juju_log('Host dir not mounted at {}. '
3586+ 'Creating directory there instead.'.format(dest_parent_dir))
3587+ os.mkdir(dest_parent_dir)
3588+
3589+ if not os.path.exists(dest_dir):
3590+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
3591+ repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
3592+ else:
3593+ repo_dir = dest_dir
3594+
3595+ if update_requirements:
3596+ if not requirements_dir:
3597+ error_out('requirements repo must be cloned before '
3598+ 'updating from global requirements.')
3599+ _git_update_requirements(repo_dir, requirements_dir)
3600+
3601+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
3602+ pip_install(repo_dir)
3603+
3604+ return repo_dir
3605+
3606+
3607+def _git_update_requirements(package_dir, reqs_dir):
3608+ """Update from global requirements.
3609+
3610+ Update an OpenStack git directory's requirements.txt and
3611+ test-requirements.txt from global-requirements.txt."""
3612+ orig_dir = os.getcwd()
3613+ os.chdir(reqs_dir)
3614+ cmd = "python update.py {}".format(package_dir)
3615+ try:
3616+ subprocess.check_call(cmd.split(' '))
3617+ except subprocess.CalledProcessError:
3618+ package = os.path.basename(package_dir)
3619+ error_out("Error updating {} from global-requirements.txt".format(package))
3620+ os.chdir(orig_dir)
3621
3622=== added directory 'hooks/charmhelpers/contrib/python'
3623=== added file 'hooks/charmhelpers/contrib/python/__init__.py'
3624=== added file 'hooks/charmhelpers/contrib/python/packages.py'
3625--- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
3626+++ hooks/charmhelpers/contrib/python/packages.py 2015-01-15 16:18:44 +0000
3627@@ -0,0 +1,77 @@
3628+#!/usr/bin/env python
3629+# coding: utf-8
3630+
3631+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
3632+
3633+from charmhelpers.fetch import apt_install, apt_update
3634+from charmhelpers.core.hookenv import log
3635+
3636+try:
3637+ from pip import main as pip_execute
3638+except ImportError:
3639+ apt_update()
3640+ apt_install('python-pip')
3641+ from pip import main as pip_execute
3642+
3643+
3644+def parse_options(given, available):
3645+ """Given a set of options, check if available"""
3646+ for key, value in sorted(given.items()):
3647+ if key in available:
3648+ yield "--{0}={1}".format(key, value)
3649+
3650+
3651+def pip_install_requirements(requirements, **options):
3652+ """Install a requirements file """
3653+ command = ["install"]
3654+
3655+ available_options = ('proxy', 'src', 'log', )
3656+ for option in parse_options(options, available_options):
3657+ command.append(option)
3658+
3659+ command.append("-r {0}".format(requirements))
3660+ log("Installing from file: {} with options: {}".format(requirements,
3661+ command))
3662+ pip_execute(command)
3663+
3664+
3665+def pip_install(package, fatal=False, **options):
3666+ """Install a python package"""
3667+ command = ["install"]
3668+
3669+ available_options = ('proxy', 'src', 'log', "index-url", )
3670+ for option in parse_options(options, available_options):
3671+ command.append(option)
3672+
3673+ if isinstance(package, list):
3674+ command.extend(package)
3675+ else:
3676+ command.append(package)
3677+
3678+ log("Installing {} package with options: {}".format(package,
3679+ command))
3680+ pip_execute(command)
3681+
3682+
3683+def pip_uninstall(package, **options):
3684+ """Uninstall a python package"""
3685+ command = ["uninstall", "-q", "-y"]
3686+
3687+ available_options = ('proxy', 'log', )
3688+ for option in parse_options(options, available_options):
3689+ command.append(option)
3690+
3691+ if isinstance(package, list):
3692+ command.extend(package)
3693+ else:
3694+ command.append(package)
3695+
3696+ log("Uninstalling {} package with options: {}".format(package,
3697+ command))
3698+ pip_execute(command)
3699+
3700+
3701+def pip_list():
3702+ """Returns the list of current python installed packages
3703+ """
3704+ return pip_execute(["list"])
3705
3706=== added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
3707--- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
3708+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-01-15 16:18:44 +0000
3709@@ -0,0 +1,428 @@
3710+#
3711+# Copyright 2012 Canonical Ltd.
3712+#
3713+# This file is sourced from lp:openstack-charm-helpers
3714+#
3715+# Authors:
3716+# James Page <james.page@ubuntu.com>
3717+# Adam Gandelman <adamg@ubuntu.com>
3718+#
3719+
3720+import os
3721+import shutil
3722+import json
3723+import time
3724+
3725+from subprocess import (
3726+ check_call,
3727+ check_output,
3728+ CalledProcessError,
3729+)
3730+from charmhelpers.core.hookenv import (
3731+ relation_get,
3732+ relation_ids,
3733+ related_units,
3734+ log,
3735+ DEBUG,
3736+ INFO,
3737+ WARNING,
3738+ ERROR,
3739+)
3740+from charmhelpers.core.host import (
3741+ mount,
3742+ mounts,
3743+ service_start,
3744+ service_stop,
3745+ service_running,
3746+ umount,
3747+)
3748+from charmhelpers.fetch import (
3749+ apt_install,
3750+)
3751+
3752+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
3753+KEYFILE = '/etc/ceph/ceph.client.{}.key'
3754+
3755+CEPH_CONF = """[global]
3756+ auth supported = {auth}
3757+ keyring = {keyring}
3758+ mon host = {mon_hosts}
3759+ log to syslog = {use_syslog}
3760+ err to syslog = {use_syslog}
3761+ clog to syslog = {use_syslog}
3762+"""
3763+
3764+
3765+def install():
3766+ """Basic Ceph client installation."""
3767+ ceph_dir = "/etc/ceph"
3768+ if not os.path.exists(ceph_dir):
3769+ os.mkdir(ceph_dir)
3770+
3771+ apt_install('ceph-common', fatal=True)
3772+
3773+
3774+def rbd_exists(service, pool, rbd_img):
3775+ """Check to see if a RADOS block device exists."""
3776+ try:
3777+ out = check_output(['rbd', 'list', '--id',
3778+ service, '--pool', pool]).decode('UTF-8')
3779+ except CalledProcessError:
3780+ return False
3781+
3782+ return rbd_img in out
3783+
3784+
3785+def create_rbd_image(service, pool, image, sizemb):
3786+ """Create a new RADOS block device."""
3787+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
3788+ '--pool', pool]
3789+ check_call(cmd)
3790+
3791+
3792+def pool_exists(service, name):
3793+ """Check to see if a RADOS pool already exists."""
3794+ try:
3795+ out = check_output(['rados', '--id', service,
3796+ 'lspools']).decode('UTF-8')
3797+ except CalledProcessError:
3798+ return False
3799+
3800+ return name in out
3801+
3802+
3803+def get_osds(service):
3804+ """Return a list of all Ceph Object Storage Daemons currently in the
3805+ cluster.
3806+ """
3807+ version = ceph_version()
3808+ if version and version >= '0.56':
3809+ return json.loads(check_output(['ceph', '--id', service,
3810+ 'osd', 'ls',
3811+ '--format=json']).decode('UTF-8'))
3812+
3813+ return None
3814+
3815+
3816+def create_pool(service, name, replicas=3):
3817+ """Create a new RADOS pool."""
3818+ if pool_exists(service, name):
3819+ log("Ceph pool {} already exists, skipping creation".format(name),
3820+ level=WARNING)
3821+ return
3822+
3823+ # Calculate the number of placement groups based
3824+ # on upstream recommended best practices.
3825+ osds = get_osds(service)
3826+ if osds:
3827+ pgnum = (len(osds) * 100 // replicas)
3828+ else:
3829+ # NOTE(james-page): Default to 200 for older ceph versions
3830+ # which don't support OSD query from cli
3831+ pgnum = 200
3832+
3833+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
3834+ check_call(cmd)
3835+
3836+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
3837+ str(replicas)]
3838+ check_call(cmd)
3839+
3840+
3841+def delete_pool(service, name):
3842+ """Delete a RADOS pool from ceph."""
3843+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
3844+ '--yes-i-really-really-mean-it']
3845+ check_call(cmd)
3846+
3847+
3848+def _keyfile_path(service):
3849+ return KEYFILE.format(service)
3850+
3851+
3852+def _keyring_path(service):
3853+ return KEYRING.format(service)
3854+
3855+
3856+def create_keyring(service, key):
3857+ """Create a new Ceph keyring containing key."""
3858+ keyring = _keyring_path(service)
3859+ if os.path.exists(keyring):
3860+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
3861+ return
3862+
3863+ cmd = ['ceph-authtool', keyring, '--create-keyring',
3864+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
3865+ check_call(cmd)
3866+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
3867+
3868+
3869+def delete_keyring(service):
3870+ """Delete an existing Ceph keyring."""
3871+ keyring = _keyring_path(service)
3872+ if not os.path.exists(keyring):
3873+ log('Keyring does not exist at %s' % keyring, level=WARNING)
3874+ return
3875+
3876+ os.remove(keyring)
3877+ log('Deleted ring at %s.' % keyring, level=INFO)
3878+
3879+
3880+def create_key_file(service, key):
3881+ """Create a file containing key."""
3882+ keyfile = _keyfile_path(service)
3883+ if os.path.exists(keyfile):
3884+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
3885+ return
3886+
3887+ with open(keyfile, 'w') as fd:
3888+ fd.write(key)
3889+
3890+ log('Created new keyfile at %s.' % keyfile, level=INFO)
3891+
3892+
3893+def get_ceph_nodes():
3894+ """Query named relation 'ceph' to determine current nodes."""
3895+ hosts = []
3896+ for r_id in relation_ids('ceph'):
3897+ for unit in related_units(r_id):
3898+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
3899+
3900+ return hosts
3901+
3902+
3903+def configure(service, key, auth, use_syslog):
3904+ """Perform basic configuration of Ceph."""
3905+ create_keyring(service, key)
3906+ create_key_file(service, key)
3907+ hosts = get_ceph_nodes()
3908+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
3909+ ceph_conf.write(CEPH_CONF.format(auth=auth,
3910+ keyring=_keyring_path(service),
3911+ mon_hosts=",".join(map(str, hosts)),
3912+ use_syslog=use_syslog))
3913+ modprobe('rbd')
3914+
3915+
3916+def image_mapped(name):
3917+ """Determine whether a RADOS block device is mapped locally."""
3918+ try:
3919+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
3920+ except CalledProcessError:
3921+ return False
3922+
3923+ return name in out
3924+
3925+
3926+def map_block_storage(service, pool, image):
3927+ """Map a RADOS block device for local use."""
3928+ cmd = [
3929+ 'rbd',
3930+ 'map',
3931+ '{}/{}'.format(pool, image),
3932+ '--user',
3933+ service,
3934+ '--secret',
3935+ _keyfile_path(service),
3936+ ]
3937+ check_call(cmd)
3938+
3939+
3940+def filesystem_mounted(fs):
3941+ """Determine whether a filesytems is already mounted."""
3942+ return fs in [f for f, m in mounts()]
3943+
3944+
3945+def make_filesystem(blk_device, fstype='ext4', timeout=10):
3946+ """Make a new filesystem on the specified block device."""
3947+ count = 0
3948+ e_noent = os.errno.ENOENT
3949+ while not os.path.exists(blk_device):
3950+ if count >= timeout:
3951+ log('Gave up waiting on block device %s' % blk_device,
3952+ level=ERROR)
3953+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
3954+
3955+ log('Waiting for block device %s to appear' % blk_device,
3956+ level=DEBUG)
3957+ count += 1
3958+ time.sleep(1)
3959+ else:
3960+ log('Formatting block device %s as filesystem %s.' %
3961+ (blk_device, fstype), level=INFO)
3962+ check_call(['mkfs', '-t', fstype, blk_device])
3963+
3964+
3965+def place_data_on_block_device(blk_device, data_src_dst):
3966+ """Migrate data in data_src_dst to blk_device and then remount."""
3967+ # mount block device into /mnt
3968+ mount(blk_device, '/mnt')
3969+ # copy data to /mnt
3970+ copy_files(data_src_dst, '/mnt')
3971+ # umount block device
3972+ umount('/mnt')
3973+ # Grab user/group ID's from original source
3974+ _dir = os.stat(data_src_dst)
3975+ uid = _dir.st_uid
3976+ gid = _dir.st_gid
3977+ # re-mount where the data should originally be
3978+ # TODO: persist is currently a NO-OP in core.host
3979+ mount(blk_device, data_src_dst, persist=True)
3980+ # ensure original ownership of new mount.
3981+ os.chown(data_src_dst, uid, gid)
3982+
3983+
3984+# TODO: re-use
3985+def modprobe(module):
3986+ """Load a kernel module and configure for auto-load on reboot."""
3987+ log('Loading kernel module', level=INFO)
3988+ cmd = ['modprobe', module]
3989+ check_call(cmd)
3990+ with open('/etc/modules', 'r+') as modules:
3991+ if module not in modules.read():
3992+ modules.write(module)
3993+
3994+
3995+def copy_files(src, dst, symlinks=False, ignore=None):
3996+ """Copy files from src to dst."""
3997+ for item in os.listdir(src):
3998+ s = os.path.join(src, item)
3999+ d = os.path.join(dst, item)
4000+ if os.path.isdir(s):
4001+ shutil.copytree(s, d, symlinks, ignore)
4002+ else:
4003+ shutil.copy2(s, d)
4004+
4005+
4006+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
4007+ blk_device, fstype, system_services=[],
4008+ replicas=3):
4009+ """NOTE: This function must only be called from a single service unit for
4010+ the same rbd_img otherwise data loss will occur.
4011+
4012+ Ensures given pool and RBD image exists, is mapped to a block device,
4013+ and the device is formatted and mounted at the given mount_point.
4014+
4015+ If formatting a device for the first time, data existing at mount_point
4016+ will be migrated to the RBD device before being re-mounted.
4017+
4018+ All services listed in system_services will be stopped prior to data
4019+ migration and restarted when complete.
4020+ """
4021+ # Ensure pool, RBD image, RBD mappings are in place.
4022+ if not pool_exists(service, pool):
4023+ log('Creating new pool {}.'.format(pool), level=INFO)
4024+ create_pool(service, pool, replicas=replicas)
4025+
4026+ if not rbd_exists(service, pool, rbd_img):
4027+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
4028+ create_rbd_image(service, pool, rbd_img, sizemb)
4029+
4030+ if not image_mapped(rbd_img):
4031+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
4032+ level=INFO)
4033+ map_block_storage(service, pool, rbd_img)
4034+
4035+ # make file system
4036+ # TODO: What happens if for whatever reason this is run again and
4037+ # the data is already in the rbd device and/or is mounted??
4038+ # When it is mounted already, it will fail to make the fs
4039+ # XXX: This is really sketchy! Need to at least add an fstab entry
4040+ # otherwise this hook will blow away existing data if its executed
4041+ # after a reboot.
4042+ if not filesystem_mounted(mount_point):
4043+ make_filesystem(blk_device, fstype)
4044+
4045+ for svc in system_services:
4046+ if service_running(svc):
4047+ log('Stopping services {} prior to migrating data.'
4048+ .format(svc), level=DEBUG)
4049+ service_stop(svc)
4050+
4051+ place_data_on_block_device(blk_device, mount_point)
4052+
4053+ for svc in system_services:
4054+ log('Starting service {} after migrating data.'
4055+ .format(svc), level=DEBUG)
4056+ service_start(svc)
4057+
4058+
4059+def ensure_ceph_keyring(service, user=None, group=None):
4060+ """Ensures a ceph keyring is created for a named service and optionally
4061+ ensures user and group ownership.
4062+
4063+ Returns False if no ceph key is available in relation state.
4064+ """
4065+ key = None
4066+ for rid in relation_ids('ceph'):
4067+ for unit in related_units(rid):
4068+ key = relation_get('key', rid=rid, unit=unit)
4069+ if key:
4070+ break
4071+
4072+ if not key:
4073+ return False
4074+
4075+ create_keyring(service=service, key=key)
4076+ keyring = _keyring_path(service)
4077+ if user and group:
4078+ check_call(['chown', '%s.%s' % (user, group), keyring])
4079+
4080+ return True
4081+
4082+
4083+def ceph_version():
4084+ """Retrieve the local version of ceph."""
4085+ if os.path.exists('/usr/bin/ceph'):
4086+ cmd = ['ceph', '-v']
4087+ output = check_output(cmd).decode('US-ASCII')
4088+ output = output.split()
4089+ if len(output) > 3:
4090+ return output[2]
4091+ else:
4092+ return None
4093+ else:
4094+ return None
4095+
4096+
4097+class CephBrokerRq(object):
4098+ """Ceph broker request.
4099+
4100+ Multiple operations can be added to a request and sent to the Ceph broker
4101+ to be executed.
4102+
4103+ Request is json-encoded for sending over the wire.
4104+
4105+ The API is versioned and defaults to version 1.
4106+ """
4107+ def __init__(self, api_version=1):
4108+ self.api_version = api_version
4109+ self.ops = []
4110+
4111+ def add_op_create_pool(self, name, replica_count=3):
4112+ self.ops.append({'op': 'create-pool', 'name': name,
4113+ 'replicas': replica_count})
4114+
4115+ @property
4116+ def request(self):
4117+ return json.dumps({'api-version': self.api_version, 'ops': self.ops})
4118+
4119+
4120+class CephBrokerRsp(object):
4121+ """Ceph broker response.
4122+
4123+ Response is json-decoded and contents provided as methods/properties.
4124+
4125+ The API is versioned and defaults to version 1.
4126+ """
4127+ def __init__(self, encoded_rsp):
4128+ self.api_version = None
4129+ self.rsp = json.loads(encoded_rsp)
4130+
4131+ @property
4132+ def exit_code(self):
4133+ return self.rsp.get('exit-code')
4134+
4135+ @property
4136+ def exit_msg(self):
4137+ return self.rsp.get('stderr')
4138
4139=== added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
4140--- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
4141+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-15 16:18:44 +0000
4142@@ -0,0 +1,62 @@
4143+import os
4144+import re
4145+from subprocess import (
4146+ check_call,
4147+ check_output,
4148+)
4149+
4150+import six
4151+
4152+
4153+##################################################
4154+# loopback device helpers.
4155+##################################################
4156+def loopback_devices():
4157+ '''
4158+ Parse through 'losetup -a' output to determine currently mapped
4159+ loopback devices. Output is expected to look like:
4160+
4161+ /dev/loop0: [0807]:961814 (/tmp/my.img)
4162+
4163+ :returns: dict: a dict mapping {loopback_dev: backing_file}
4164+ '''
4165+ loopbacks = {}
4166+ cmd = ['losetup', '-a']
4167+ devs = [d.strip().split(' ') for d in
4168+ check_output(cmd).splitlines() if d != '']
4169+ for dev, _, f in devs:
4170+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
4171+ return loopbacks
4172+
4173+
4174+def create_loopback(file_path):
4175+ '''
4176+ Create a loopback device for a given backing file.
4177+
4178+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
4179+ '''
4180+ file_path = os.path.abspath(file_path)
4181+ check_call(['losetup', '--find', file_path])
4182+ for d, f in six.iteritems(loopback_devices()):
4183+ if f == file_path:
4184+ return d
4185+
4186+
4187+def ensure_loopback_device(path, size):
4188+ '''
4189+ Ensure a loopback device exists for a given backing file path and size.
4190+ If it a loopback device is not mapped to file, a new one will be created.
4191+
4192+ TODO: Confirm size of found loopback device.
4193+
4194+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
4195+ '''
4196+ for d, f in six.iteritems(loopback_devices()):
4197+ if f == path:
4198+ return d
4199+
4200+ if not os.path.exists(path):
4201+ cmd = ['truncate', '--size', size, path]
4202+ check_call(cmd)
4203+
4204+ return create_loopback(path)
4205
4206=== added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
4207--- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
4208+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2015-01-15 16:18:44 +0000
4209@@ -0,0 +1,89 @@
4210+from subprocess import (
4211+ CalledProcessError,
4212+ check_call,
4213+ check_output,
4214+ Popen,
4215+ PIPE,
4216+)
4217+
4218+
4219+##################################################
4220+# LVM helpers.
4221+##################################################
4222+def deactivate_lvm_volume_group(block_device):
4223+ '''
4224+ Deactivate any volume gruop associated with an LVM physical volume.
4225+
4226+ :param block_device: str: Full path to LVM physical volume
4227+ '''
4228+ vg = list_lvm_volume_group(block_device)
4229+ if vg:
4230+ cmd = ['vgchange', '-an', vg]
4231+ check_call(cmd)
4232+
4233+
4234+def is_lvm_physical_volume(block_device):
4235+ '''
4236+ Determine whether a block device is initialized as an LVM PV.
4237+
4238+ :param block_device: str: Full path of block device to inspect.
4239+
4240+ :returns: boolean: True if block device is a PV, False if not.
4241+ '''
4242+ try:
4243+ check_output(['pvdisplay', block_device])
4244+ return True
4245+ except CalledProcessError:
4246+ return False
4247+
4248+
4249+def remove_lvm_physical_volume(block_device):
4250+ '''
4251+ Remove LVM PV signatures from a given block device.
4252+
4253+ :param block_device: str: Full path of block device to scrub.
4254+ '''
4255+ p = Popen(['pvremove', '-ff', block_device],
4256+ stdin=PIPE)
4257+ p.communicate(input='y\n')
4258+
4259+
4260+def list_lvm_volume_group(block_device):
4261+ '''
4262+ List LVM volume group associated with a given block device.
4263+
4264+ Assumes block device is a valid LVM PV.
4265+
4266+ :param block_device: str: Full path of block device to inspect.
4267+
4268+ :returns: str: Name of volume group associated with block device or None
4269+ '''
4270+ vg = None
4271+ pvd = check_output(['pvdisplay', block_device]).splitlines()
4272+ for l in pvd:
4273+ l = l.decode('UTF-8')
4274+ if l.strip().startswith('VG Name'):
4275+ vg = ' '.join(l.strip().split()[2:])
4276+ return vg
4277+
4278+
4279+def create_lvm_physical_volume(block_device):
4280+ '''
4281+ Initialize a block device as an LVM physical volume.
4282+
4283+ :param block_device: str: Full path of block device to initialize.
4284+
4285+ '''
4286+ check_call(['pvcreate', block_device])
4287+
4288+
4289+def create_lvm_volume_group(volume_group, block_device):
4290+ '''
4291+ Create an LVM volume group backed by a given block device.
4292+
4293+ Assumes block device has already been initialized as an LVM PV.
4294+
4295+ :param volume_group: str: Name of volume group to create.
4296+ :block_device: str: Full path of PV-initialized block device.
4297+ '''
4298+ check_call(['vgcreate', volume_group, block_device])
4299
4300=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
4301--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-17 14:11:53 +0000
4302+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-01-15 16:18:44 +0000
4303@@ -30,7 +30,8 @@
4304 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
4305 call(['sgdisk', '--zap-all', '--mbrtogpt',
4306 '--clear', block_device])
4307- dev_end = check_output(['blockdev', '--getsz', block_device])
4308+ dev_end = check_output(['blockdev', '--getsz',
4309+ block_device]).decode('UTF-8')
4310 gpt_end = int(dev_end.split()[0]) - 100
4311 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
4312 'bs=1M', 'count=1'])
4313@@ -47,7 +48,7 @@
4314 it doesn't.
4315 '''
4316 is_partition = bool(re.search(r".*[0-9]+\b", device))
4317- out = check_output(['mount'])
4318+ out = check_output(['mount']).decode('UTF-8')
4319 if is_partition:
4320 return bool(re.search(device + r"\b", out))
4321 return bool(re.search(device + r"[0-9]+\b", out))
4322
4323=== added file 'hooks/charmhelpers/core/decorators.py'
4324--- hooks/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000
4325+++ hooks/charmhelpers/core/decorators.py 2015-01-15 16:18:44 +0000
4326@@ -0,0 +1,41 @@
4327+#
4328+# Copyright 2014 Canonical Ltd.
4329+#
4330+# Authors:
4331+# Edward Hope-Morley <opentastic@gmail.com>
4332+#
4333+
4334+import time
4335+
4336+from charmhelpers.core.hookenv import (
4337+ log,
4338+ INFO,
4339+)
4340+
4341+
4342+def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
4343+ """If the decorated function raises exception exc_type, allow num_retries
4344+ retry attempts before raise the exception.
4345+ """
4346+ def _retry_on_exception_inner_1(f):
4347+ def _retry_on_exception_inner_2(*args, **kwargs):
4348+ retries = num_retries
4349+ multiplier = 1
4350+ while True:
4351+ try:
4352+ return f(*args, **kwargs)
4353+ except exc_type:
4354+ if not retries:
4355+ raise
4356+
4357+ delay = base_delay * multiplier
4358+ multiplier += 1
4359+ log("Retrying '%s' %d more times (delay=%s)" %
4360+ (f.__name__, retries, delay), level=INFO)
4361+ retries -= 1
4362+ if delay:
4363+ time.sleep(delay)
4364+
4365+ return _retry_on_exception_inner_2
4366+
4367+ return _retry_on_exception_inner_1
4368
4369=== modified file 'hooks/charmhelpers/core/fstab.py'
4370--- hooks/charmhelpers/core/fstab.py 2014-07-24 09:43:27 +0000
4371+++ hooks/charmhelpers/core/fstab.py 2015-01-15 16:18:44 +0000
4372@@ -3,10 +3,11 @@
4373
4374 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
4375
4376+import io
4377 import os
4378
4379
4380-class Fstab(file):
4381+class Fstab(io.FileIO):
4382 """This class extends file in order to implement a file reader/writer
4383 for file `/etc/fstab`
4384 """
4385@@ -24,8 +25,8 @@
4386 options = "defaults"
4387
4388 self.options = options
4389- self.d = d
4390- self.p = p
4391+ self.d = int(d)
4392+ self.p = int(p)
4393
4394 def __eq__(self, o):
4395 return str(self) == str(o)
4396@@ -45,7 +46,7 @@
4397 self._path = path
4398 else:
4399 self._path = self.DEFAULT_PATH
4400- file.__init__(self, self._path, 'r+')
4401+ super(Fstab, self).__init__(self._path, 'rb+')
4402
4403 def _hydrate_entry(self, line):
4404 # NOTE: use split with no arguments to split on any
4405@@ -58,8 +59,9 @@
4406 def entries(self):
4407 self.seek(0)
4408 for line in self.readlines():
4409+ line = line.decode('us-ascii')
4410 try:
4411- if not line.startswith("#"):
4412+ if line.strip() and not line.startswith("#"):
4413 yield self._hydrate_entry(line)
4414 except ValueError:
4415 pass
4416@@ -75,14 +77,14 @@
4417 if self.get_entry_by_attr('device', entry.device):
4418 return False
4419
4420- self.write(str(entry) + '\n')
4421+ self.write((str(entry) + '\n').encode('us-ascii'))
4422 self.truncate()
4423 return entry
4424
4425 def remove_entry(self, entry):
4426 self.seek(0)
4427
4428- lines = self.readlines()
4429+ lines = [l.decode('us-ascii') for l in self.readlines()]
4430
4431 found = False
4432 for index, line in enumerate(lines):
4433@@ -97,7 +99,7 @@
4434 lines.remove(line)
4435
4436 self.seek(0)
4437- self.write(''.join(lines))
4438+ self.write(''.join(lines).encode('us-ascii'))
4439 self.truncate()
4440 return True
4441
4442
4443=== modified file 'hooks/charmhelpers/core/hookenv.py'
4444--- hooks/charmhelpers/core/hookenv.py 2014-10-21 07:28:36 +0000
4445+++ hooks/charmhelpers/core/hookenv.py 2015-01-15 16:18:44 +0000
4446@@ -9,9 +9,14 @@
4447 import yaml
4448 import subprocess
4449 import sys
4450-import UserDict
4451 from subprocess import CalledProcessError
4452
4453+import six
4454+if not six.PY3:
4455+ from UserDict import UserDict
4456+else:
4457+ from collections import UserDict
4458+
4459 CRITICAL = "CRITICAL"
4460 ERROR = "ERROR"
4461 WARNING = "WARNING"
4462@@ -63,16 +68,18 @@
4463 command = ['juju-log']
4464 if level:
4465 command += ['-l', level]
4466+ if not isinstance(message, six.string_types):
4467+ message = repr(message)
4468 command += [message]
4469 subprocess.call(command)
4470
4471
4472-class Serializable(UserDict.IterableUserDict):
4473+class Serializable(UserDict):
4474 """Wrapper, an object that can be serialized to yaml or json"""
4475
4476 def __init__(self, obj):
4477 # wrap the object
4478- UserDict.IterableUserDict.__init__(self)
4479+ UserDict.__init__(self)
4480 self.data = obj
4481
4482 def __getattr__(self, attr):
4483@@ -218,7 +225,7 @@
4484 prev_keys = []
4485 if self._prev_dict is not None:
4486 prev_keys = self._prev_dict.keys()
4487- return list(set(prev_keys + dict.keys(self)))
4488+ return list(set(prev_keys + list(dict.keys(self))))
4489
4490 def load_previous(self, path=None):
4491 """Load previous copy of config from disk.
4492@@ -269,7 +276,7 @@
4493
4494 """
4495 if self._prev_dict:
4496- for k, v in self._prev_dict.iteritems():
4497+ for k, v in six.iteritems(self._prev_dict):
4498 if k not in self:
4499 self[k] = v
4500 with open(self.path, 'w') as f:
4501@@ -284,7 +291,8 @@
4502 config_cmd_line.append(scope)
4503 config_cmd_line.append('--format=json')
4504 try:
4505- config_data = json.loads(subprocess.check_output(config_cmd_line))
4506+ config_data = json.loads(
4507+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
4508 if scope is not None:
4509 return config_data
4510 return Config(config_data)
4511@@ -303,10 +311,10 @@
4512 if unit:
4513 _args.append(unit)
4514 try:
4515- return json.loads(subprocess.check_output(_args))
4516+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
4517 except ValueError:
4518 return None
4519- except CalledProcessError, e:
4520+ except CalledProcessError as e:
4521 if e.returncode == 2:
4522 return None
4523 raise
4524@@ -318,7 +326,7 @@
4525 relation_cmd_line = ['relation-set']
4526 if relation_id is not None:
4527 relation_cmd_line.extend(('-r', relation_id))
4528- for k, v in (relation_settings.items() + kwargs.items()):
4529+ for k, v in (list(relation_settings.items()) + list(kwargs.items())):
4530 if v is None:
4531 relation_cmd_line.append('{}='.format(k))
4532 else:
4533@@ -335,7 +343,8 @@
4534 relid_cmd_line = ['relation-ids', '--format=json']
4535 if reltype is not None:
4536 relid_cmd_line.append(reltype)
4537- return json.loads(subprocess.check_output(relid_cmd_line)) or []
4538+ return json.loads(
4539+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
4540 return []
4541
4542
4543@@ -346,7 +355,8 @@
4544 units_cmd_line = ['relation-list', '--format=json']
4545 if relid is not None:
4546 units_cmd_line.extend(('-r', relid))
4547- return json.loads(subprocess.check_output(units_cmd_line)) or []
4548+ return json.loads(
4549+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
4550
4551
4552 @cached
4553@@ -386,21 +396,31 @@
4554
4555
4556 @cached
4557+def metadata():
4558+ """Get the current charm metadata.yaml contents as a python object"""
4559+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
4560+ return yaml.safe_load(md)
4561+
4562+
4563+@cached
4564 def relation_types():
4565 """Get a list of relation types supported by this charm"""
4566- charmdir = os.environ.get('CHARM_DIR', '')
4567- mdf = open(os.path.join(charmdir, 'metadata.yaml'))
4568- md = yaml.safe_load(mdf)
4569 rel_types = []
4570+ md = metadata()
4571 for key in ('provides', 'requires', 'peers'):
4572 section = md.get(key)
4573 if section:
4574 rel_types.extend(section.keys())
4575- mdf.close()
4576 return rel_types
4577
4578
4579 @cached
4580+def charm_name():
4581+ """Get the name of the current charm as is specified on metadata.yaml"""
4582+ return metadata().get('name')
4583+
4584+
4585+@cached
4586 def relations():
4587 """Get a nested dictionary of relation data for all related units"""
4588 rels = {}
4589@@ -455,7 +475,7 @@
4590 """Get the unit ID for the remote unit"""
4591 _args = ['unit-get', '--format=json', attribute]
4592 try:
4593- return json.loads(subprocess.check_output(_args))
4594+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
4595 except ValueError:
4596 return None
4597
4598
4599=== modified file 'hooks/charmhelpers/core/host.py'
4600--- hooks/charmhelpers/core/host.py 2014-10-21 07:28:36 +0000
4601+++ hooks/charmhelpers/core/host.py 2015-01-15 16:18:44 +0000
4602@@ -14,11 +14,12 @@
4603 import subprocess
4604 import hashlib
4605 from contextlib import contextmanager
4606-
4607 from collections import OrderedDict
4608
4609-from hookenv import log
4610-from fstab import Fstab
4611+import six
4612+
4613+from .hookenv import log
4614+from .fstab import Fstab
4615
4616
4617 def service_start(service_name):
4618@@ -54,7 +55,9 @@
4619 def service_running(service):
4620 """Determine whether a system service is running"""
4621 try:
4622- output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
4623+ output = subprocess.check_output(
4624+ ['service', service, 'status'],
4625+ stderr=subprocess.STDOUT).decode('UTF-8')
4626 except subprocess.CalledProcessError:
4627 return False
4628 else:
4629@@ -67,7 +70,9 @@
4630 def service_available(service_name):
4631 """Determine whether a system service is available"""
4632 try:
4633- subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
4634+ subprocess.check_output(
4635+ ['service', service_name, 'status'],
4636+ stderr=subprocess.STDOUT).decode('UTF-8')
4637 except subprocess.CalledProcessError as e:
4638 return 'unrecognized service' not in e.output
4639 else:
4640@@ -96,6 +101,26 @@
4641 return user_info
4642
4643
4644+def add_group(group_name, system_group=False):
4645+ """Add a group to the system"""
4646+ try:
4647+ group_info = grp.getgrnam(group_name)
4648+ log('group {0} already exists!'.format(group_name))
4649+ except KeyError:
4650+ log('creating group {0}'.format(group_name))
4651+ cmd = ['addgroup']
4652+ if system_group:
4653+ cmd.append('--system')
4654+ else:
4655+ cmd.extend([
4656+ '--group',
4657+ ])
4658+ cmd.append(group_name)
4659+ subprocess.check_call(cmd)
4660+ group_info = grp.getgrnam(group_name)
4661+ return group_info
4662+
4663+
4664 def add_user_to_group(username, group):
4665 """Add a user to a group"""
4666 cmd = [
4667@@ -115,7 +140,7 @@
4668 cmd.append(from_path)
4669 cmd.append(to_path)
4670 log(" ".join(cmd))
4671- return subprocess.check_output(cmd).strip()
4672+ return subprocess.check_output(cmd).decode('UTF-8').strip()
4673
4674
4675 def symlink(source, destination):
4676@@ -130,23 +155,26 @@
4677 subprocess.check_call(cmd)
4678
4679
4680-def mkdir(path, owner='root', group='root', perms=0555, force=False):
4681+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
4682 """Create a directory"""
4683 log("Making dir {} {}:{} {:o}".format(path, owner, group,
4684 perms))
4685 uid = pwd.getpwnam(owner).pw_uid
4686 gid = grp.getgrnam(group).gr_gid
4687 realpath = os.path.abspath(path)
4688- if os.path.exists(realpath):
4689- if force and not os.path.isdir(realpath):
4690+ path_exists = os.path.exists(realpath)
4691+ if path_exists and force:
4692+ if not os.path.isdir(realpath):
4693 log("Removing non-directory file {} prior to mkdir()".format(path))
4694 os.unlink(realpath)
4695- else:
4696+ os.makedirs(realpath, perms)
4697+ os.chown(realpath, uid, gid)
4698+ elif not path_exists:
4699 os.makedirs(realpath, perms)
4700- os.chown(realpath, uid, gid)
4701-
4702-
4703-def write_file(path, content, owner='root', group='root', perms=0444):
4704+ os.chown(realpath, uid, gid)
4705+
4706+
4707+def write_file(path, content, owner='root', group='root', perms=0o444):
4708 """Create or overwrite a file with the contents of a string"""
4709 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
4710 uid = pwd.getpwnam(owner).pw_uid
4711@@ -177,7 +205,7 @@
4712 cmd_args.extend([device, mountpoint])
4713 try:
4714 subprocess.check_output(cmd_args)
4715- except subprocess.CalledProcessError, e:
4716+ except subprocess.CalledProcessError as e:
4717 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
4718 return False
4719
4720@@ -191,7 +219,7 @@
4721 cmd_args = ['umount', mountpoint]
4722 try:
4723 subprocess.check_output(cmd_args)
4724- except subprocess.CalledProcessError, e:
4725+ except subprocess.CalledProcessError as e:
4726 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
4727 return False
4728
4729@@ -218,8 +246,8 @@
4730 """
4731 if os.path.exists(path):
4732 h = getattr(hashlib, hash_type)()
4733- with open(path, 'r') as source:
4734- h.update(source.read()) # IGNORE:E1101 - it does have update
4735+ with open(path, 'rb') as source:
4736+ h.update(source.read())
4737 return h.hexdigest()
4738 else:
4739 return None
4740@@ -297,7 +325,7 @@
4741 if length is None:
4742 length = random.choice(range(35, 45))
4743 alphanumeric_chars = [
4744- l for l in (string.letters + string.digits)
4745+ l for l in (string.ascii_letters + string.digits)
4746 if l not in 'l0QD1vAEIOUaeiou']
4747 random_chars = [
4748 random.choice(alphanumeric_chars) for _ in range(length)]
4749@@ -306,14 +334,14 @@
4750
4751 def list_nics(nic_type):
4752 '''Return a list of nics of given type(s)'''
4753- if isinstance(nic_type, basestring):
4754+ if isinstance(nic_type, six.string_types):
4755 int_types = [nic_type]
4756 else:
4757 int_types = nic_type
4758 interfaces = []
4759 for int_type in int_types:
4760 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
4761- ip_output = subprocess.check_output(cmd).split('\n')
4762+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
4763 ip_output = (line for line in ip_output if line)
4764 for line in ip_output:
4765 if line.split()[1].startswith(int_type):
4766@@ -335,7 +363,7 @@
4767
4768 def get_nic_mtu(nic):
4769 cmd = ['ip', 'addr', 'show', nic]
4770- ip_output = subprocess.check_output(cmd).split('\n')
4771+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
4772 mtu = ""
4773 for line in ip_output:
4774 words = line.split()
4775@@ -346,7 +374,7 @@
4776
4777 def get_nic_hwaddr(nic):
4778 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
4779- ip_output = subprocess.check_output(cmd)
4780+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
4781 hwaddr = ""
4782 words = ip_output.split()
4783 if 'link/ether' in words:
4784@@ -363,8 +391,8 @@
4785
4786 '''
4787 import apt_pkg
4788- from charmhelpers.fetch import apt_cache
4789 if not pkgcache:
4790+ from charmhelpers.fetch import apt_cache
4791 pkgcache = apt_cache()
4792 pkg = pkgcache[package]
4793 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
4794
4795=== modified file 'hooks/charmhelpers/core/services/__init__.py'
4796--- hooks/charmhelpers/core/services/__init__.py 2014-09-17 14:11:53 +0000
4797+++ hooks/charmhelpers/core/services/__init__.py 2015-01-15 16:18:44 +0000
4798@@ -1,2 +1,2 @@
4799-from .base import *
4800-from .helpers import *
4801+from .base import * # NOQA
4802+from .helpers import * # NOQA
4803
4804=== modified file 'hooks/charmhelpers/core/services/helpers.py'
4805--- hooks/charmhelpers/core/services/helpers.py 2014-09-27 17:33:59 +0000
4806+++ hooks/charmhelpers/core/services/helpers.py 2015-01-15 16:18:44 +0000
4807@@ -196,7 +196,7 @@
4808 if not os.path.isabs(file_name):
4809 file_name = os.path.join(hookenv.charm_dir(), file_name)
4810 with open(file_name, 'w') as file_stream:
4811- os.fchmod(file_stream.fileno(), 0600)
4812+ os.fchmod(file_stream.fileno(), 0o600)
4813 yaml.dump(config_data, file_stream)
4814
4815 def read_context(self, file_name):
4816@@ -211,15 +211,19 @@
4817
4818 class TemplateCallback(ManagerCallback):
4819 """
4820- Callback class that will render a Jinja2 template, for use as a ready action.
4821-
4822- :param str source: The template source file, relative to `$CHARM_DIR/templates`
4823+ Callback class that will render a Jinja2 template, for use as a ready
4824+ action.
4825+
4826+ :param str source: The template source file, relative to
4827+ `$CHARM_DIR/templates`
4828+
4829 :param str target: The target to write the rendered template to
4830 :param str owner: The owner of the rendered file
4831 :param str group: The group of the rendered file
4832 :param int perms: The permissions of the rendered file
4833 """
4834- def __init__(self, source, target, owner='root', group='root', perms=0444):
4835+ def __init__(self, source, target,
4836+ owner='root', group='root', perms=0o444):
4837 self.source = source
4838 self.target = target
4839 self.owner = owner
4840
4841=== modified file 'hooks/charmhelpers/core/templating.py'
4842--- hooks/charmhelpers/core/templating.py 2014-09-17 14:11:53 +0000
4843+++ hooks/charmhelpers/core/templating.py 2015-01-15 16:18:44 +0000
4844@@ -4,7 +4,8 @@
4845 from charmhelpers.core import hookenv
4846
4847
4848-def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
4849+def render(source, target, context, owner='root', group='root',
4850+ perms=0o444, templates_dir=None):
4851 """
4852 Render a template.
4853
4854@@ -47,5 +48,5 @@
4855 level=hookenv.ERROR)
4856 raise e
4857 content = template.render(context)
4858- host.mkdir(os.path.dirname(target))
4859+ host.mkdir(os.path.dirname(target), owner, group)
4860 host.write_file(target, content, owner, group, perms)
4861
4862=== modified file 'hooks/charmhelpers/fetch/__init__.py'
4863--- hooks/charmhelpers/fetch/__init__.py 2014-10-21 07:28:36 +0000
4864+++ hooks/charmhelpers/fetch/__init__.py 2015-01-15 16:18:44 +0000
4865@@ -5,10 +5,6 @@
4866 from charmhelpers.core.host import (
4867 lsb_release
4868 )
4869-from urlparse import (
4870- urlparse,
4871- urlunparse,
4872-)
4873 import subprocess
4874 from charmhelpers.core.hookenv import (
4875 config,
4876@@ -16,6 +12,12 @@
4877 )
4878 import os
4879
4880+import six
4881+if six.PY3:
4882+ from urllib.parse import urlparse, urlunparse
4883+else:
4884+ from urlparse import urlparse, urlunparse
4885+
4886
4887 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
4888 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
4889@@ -62,9 +64,16 @@
4890 'trusty-juno/updates': 'trusty-updates/juno',
4891 'trusty-updates/juno': 'trusty-updates/juno',
4892 'juno/proposed': 'trusty-proposed/juno',
4893- 'juno/proposed': 'trusty-proposed/juno',
4894 'trusty-juno/proposed': 'trusty-proposed/juno',
4895 'trusty-proposed/juno': 'trusty-proposed/juno',
4896+ # Kilo
4897+ 'kilo': 'trusty-updates/kilo',
4898+ 'trusty-kilo': 'trusty-updates/kilo',
4899+ 'trusty-kilo/updates': 'trusty-updates/kilo',
4900+ 'trusty-updates/kilo': 'trusty-updates/kilo',
4901+ 'kilo/proposed': 'trusty-proposed/kilo',
4902+ 'trusty-kilo/proposed': 'trusty-proposed/kilo',
4903+ 'trusty-proposed/kilo': 'trusty-proposed/kilo',
4904 }
4905
4906 # The order of this list is very important. Handlers should be listed in from
4907@@ -149,7 +158,7 @@
4908 cmd = ['apt-get', '--assume-yes']
4909 cmd.extend(options)
4910 cmd.append('install')
4911- if isinstance(packages, basestring):
4912+ if isinstance(packages, six.string_types):
4913 cmd.append(packages)
4914 else:
4915 cmd.extend(packages)
4916@@ -182,7 +191,7 @@
4917 def apt_purge(packages, fatal=False):
4918 """Purge one or more packages"""
4919 cmd = ['apt-get', '--assume-yes', 'purge']
4920- if isinstance(packages, basestring):
4921+ if isinstance(packages, six.string_types):
4922 cmd.append(packages)
4923 else:
4924 cmd.extend(packages)
4925@@ -193,7 +202,7 @@
4926 def apt_hold(packages, fatal=False):
4927 """Hold one or more packages"""
4928 cmd = ['apt-mark', 'hold']
4929- if isinstance(packages, basestring):
4930+ if isinstance(packages, six.string_types):
4931 cmd.append(packages)
4932 else:
4933 cmd.extend(packages)
4934@@ -256,11 +265,11 @@
4935 elif source == 'distro':
4936 pass
4937 else:
4938- raise SourceConfigError("Unknown source: {!r}".format(source))
4939+ log("Unknown source: {!r}".format(source))
4940
4941 if key:
4942 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
4943- with NamedTemporaryFile() as key_file:
4944+ with NamedTemporaryFile('w+') as key_file:
4945 key_file.write(key)
4946 key_file.flush()
4947 key_file.seek(0)
4948@@ -297,14 +306,14 @@
4949 sources = safe_load((config(sources_var) or '').strip()) or []
4950 keys = safe_load((config(keys_var) or '').strip()) or None
4951
4952- if isinstance(sources, basestring):
4953+ if isinstance(sources, six.string_types):
4954 sources = [sources]
4955
4956 if keys is None:
4957 for source in sources:
4958 add_source(source, None)
4959 else:
4960- if isinstance(keys, basestring):
4961+ if isinstance(keys, six.string_types):
4962 keys = [keys]
4963
4964 if len(sources) != len(keys):
4965@@ -401,7 +410,7 @@
4966 while result is None or result == APT_NO_LOCK:
4967 try:
4968 result = subprocess.check_call(cmd, env=env)
4969- except subprocess.CalledProcessError, e:
4970+ except subprocess.CalledProcessError as e:
4971 retry_count = retry_count + 1
4972 if retry_count > APT_NO_LOCK_RETRY_COUNT:
4973 raise
4974
4975=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
4976--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-27 17:33:59 +0000
4977+++ hooks/charmhelpers/fetch/archiveurl.py 2015-01-15 16:18:44 +0000
4978@@ -1,8 +1,23 @@
4979 import os
4980-import urllib2
4981-from urllib import urlretrieve
4982-import urlparse
4983 import hashlib
4984+import re
4985+
4986+import six
4987+if six.PY3:
4988+ from urllib.request import (
4989+ build_opener, install_opener, urlopen, urlretrieve,
4990+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
4991+ )
4992+ from urllib.parse import urlparse, urlunparse, parse_qs
4993+ from urllib.error import URLError
4994+else:
4995+ from urllib import urlretrieve
4996+ from urllib2 import (
4997+ build_opener, install_opener, urlopen,
4998+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
4999+ URLError
5000+ )
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches