Merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha into lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next

Proposed by Liam Young
Status: Merged
Merged at revision: 32
Proposed branch: lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha
Merge into: lp:~openstack-charmers-archive/charms/trusty/ceph-radosgw/next
Diff against target: 5611 lines (+4608/-116)
46 files modified
charm-helpers-hooks.yaml (+8/-1)
config.yaml (+20/-0)
files/ports.conf (+11/-0)
hooks/ceph_radosgw_context.py (+29/-0)
hooks/charmhelpers/__init__.py (+22/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+248/-0)
hooks/charmhelpers/contrib/network/ip.py (+351/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+92/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+278/-0)
hooks/charmhelpers/contrib/openstack/context.py (+1038/-0)
hooks/charmhelpers/contrib/openstack/ip.py (+93/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+223/-0)
hooks/charmhelpers/contrib/openstack/templates/__init__.py (+2/-0)
hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+15/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+58/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+24/-0)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+24/-0)
hooks/charmhelpers/contrib/openstack/templating.py (+279/-0)
hooks/charmhelpers/contrib/openstack/utils.py (+625/-0)
hooks/charmhelpers/contrib/python/packages.py (+77/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+428/-0)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+89/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/decorators.py (+41/-0)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+36/-16)
hooks/charmhelpers/core/host.py (+52/-24)
hooks/charmhelpers/core/services/__init__.py (+2/-2)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/templating.py (+3/-2)
hooks/charmhelpers/fetch/__init__.py (+22/-13)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+12/-5)
hooks/hooks.py (+118/-8)
hooks/utils.py (+36/-2)
metadata.yaml (+6/-0)
templates/ceph.conf (+1/-1)
templates/rgw (+1/-1)
tests/charmhelpers/__init__.py (+22/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+3/-3)
tests/charmhelpers/contrib/amulet/utils.py (+6/-4)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/ceph-radosgw/next-support-ha
Reviewer Review Type Date Requested Status
James Page Approve
Review via email: mp+243263@code.launchpad.net

Description of the change

Add HA support

To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Approve

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2014-09-27 02:57:08 +0000
+++ charm-helpers-hooks.yaml 2015-01-15 16:18:44 +0000
@@ -5,5 +5,12 @@
5 - fetch5 - fetch
6 - contrib.storage.linux:6 - contrib.storage.linux:
7 - utils7 - utils
8 - contrib.hahelpers:
9 - apache
10 - cluster
8 - payload.execd11 - payload.execd
9 - contrib.openstack.alternatives12 - contrib.openstack|inc=*
13 - contrib.network.ip
14 - contrib.openstack.ip
15 - contrib.storage.linux
16 - contrib.python.packages
1017
=== modified file 'config.yaml'
--- config.yaml 2015-01-14 09:10:04 +0000
+++ config.yaml 2015-01-15 16:18:44 +0000
@@ -67,3 +67,23 @@
67 .67 .
68 Enable this option to disable use of Apache and enable the embedded68 Enable this option to disable use of Apache and enable the embedded
69 web container feature.69 web container feature.
70 vip:
71 type: string
72 default:
73 description: |
74 Virtual IP(s) to use to front API services in HA configuration.
75 .
76 If multiple networks are being used, a VIP should be provided for each
77 network, separated by spaces.
78 ha-bindiface:
79 type: string
80 default: eth0
81 description: |
82 Default network interface on which HA cluster will bind to communication
83 with the other members of the HA Cluster.
84 ha-mcastport:
85 type: int
86 default: 5414
87 description: |
88 Default multicast port number that will be used to communicate between
89 HA Cluster nodes.
7090
=== added file 'files/ports.conf'
--- files/ports.conf 1970-01-01 00:00:00 +0000
+++ files/ports.conf 2015-01-15 16:18:44 +0000
@@ -0,0 +1,11 @@
1Listen 70
2
3<IfModule ssl_module>
4 Listen 443
5</IfModule>
6
7<IfModule mod_gnutls.c>
8 Listen 443
9</IfModule>
10
11# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
012
=== added file 'hooks/ceph_radosgw_context.py'
--- hooks/ceph_radosgw_context.py 1970-01-01 00:00:00 +0000
+++ hooks/ceph_radosgw_context.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,29 @@
1from charmhelpers.contrib.openstack import context
2from charmhelpers.contrib.hahelpers.cluster import (
3 determine_api_port,
4 determine_apache_port,
5)
6
7
8class HAProxyContext(context.HAProxyContext):
9
10 def __call__(self):
11 ctxt = super(HAProxyContext, self).__call__()
12
13 # Apache ports
14 a_cephradosgw_api = determine_apache_port(80,
15 singlenode_mode=True)
16
17 port_mapping = {
18 'cephradosgw-server': [
19 80, a_cephradosgw_api]
20 }
21
22 ctxt['cephradosgw_bind_port'] = determine_api_port(
23 80,
24 singlenode_mode=True,
25 )
26
27 # for haproxy.conf
28 ctxt['service_ports'] = port_mapping
29 return ctxt
030
=== modified file 'hooks/charmhelpers/__init__.py'
--- hooks/charmhelpers/__init__.py 2014-01-24 16:02:57 +0000
+++ hooks/charmhelpers/__init__.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,22 @@
1# Bootstrap charm-helpers, installing its dependencies if necessary using
2# only standard libraries.
3import subprocess
4import sys
5
6try:
7 import six # flake8: noqa
8except ImportError:
9 if sys.version_info.major == 2:
10 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
11 else:
12 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
13 import six # flake8: noqa
14
15try:
16 import yaml # flake8: noqa
17except ImportError:
18 if sys.version_info.major == 2:
19 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
20 else:
21 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
22 import yaml # flake8: noqa
023
=== added directory 'hooks/charmhelpers/contrib/hahelpers'
=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,66 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import subprocess
12
13from charmhelpers.core.hookenv import (
14 config as config_get,
15 relation_get,
16 relation_ids,
17 related_units as relation_list,
18 log,
19 INFO,
20)
21
22
23def get_cert(cn=None):
24 # TODO: deal with multiple https endpoints via charm config
25 cert = config_get('ssl_cert')
26 key = config_get('ssl_key')
27 if not (cert and key):
28 log("Inspecting identity-service relations for SSL certificate.",
29 level=INFO)
30 cert = key = None
31 if cn:
32 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
33 ssl_key_attr = 'ssl_key_{}'.format(cn)
34 else:
35 ssl_cert_attr = 'ssl_cert'
36 ssl_key_attr = 'ssl_key'
37 for r_id in relation_ids('identity-service'):
38 for unit in relation_list(r_id):
39 if not cert:
40 cert = relation_get(ssl_cert_attr,
41 rid=r_id, unit=unit)
42 if not key:
43 key = relation_get(ssl_key_attr,
44 rid=r_id, unit=unit)
45 return (cert, key)
46
47
48def get_ca_cert():
49 ca_cert = config_get('ssl_ca')
50 if ca_cert is None:
51 log("Inspecting identity-service relations for CA SSL certificate.",
52 level=INFO)
53 for r_id in relation_ids('identity-service'):
54 for unit in relation_list(r_id):
55 if ca_cert is None:
56 ca_cert = relation_get('ca_cert',
57 rid=r_id, unit=unit)
58 return ca_cert
59
60
61def install_ca_cert(ca_cert):
62 if ca_cert:
63 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
64 'w') as crt:
65 crt.write(ca_cert)
66 subprocess.check_call(['update-ca-certificates', '--fresh'])
067
=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,248 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# Authors:
5# James Page <james.page@ubuntu.com>
6# Adam Gandelman <adamg@ubuntu.com>
7#
8
9"""
10Helpers for clustering and determining "cluster leadership" and other
11clustering-related helpers.
12"""
13
14import subprocess
15import os
16
17from socket import gethostname as get_unit_hostname
18
19import six
20
21from charmhelpers.core.hookenv import (
22 log,
23 relation_ids,
24 related_units as relation_list,
25 relation_get,
26 config as config_get,
27 INFO,
28 ERROR,
29 WARNING,
30 unit_get,
31)
32from charmhelpers.core.decorators import (
33 retry_on_exception,
34)
35
36
37class HAIncompleteConfig(Exception):
38 pass
39
40
41class CRMResourceNotFound(Exception):
42 pass
43
44
45def is_elected_leader(resource):
46 """
47 Returns True if the charm executing this is the elected cluster leader.
48
49 It relies on two mechanisms to determine leadership:
50 1. If the charm is part of a corosync cluster, call corosync to
51 determine leadership.
52 2. If the charm is not part of a corosync cluster, the leader is
53 determined as being "the alive unit with the lowest unit numer". In
54 other words, the oldest surviving unit.
55 """
56 if is_clustered():
57 if not is_crm_leader(resource):
58 log('Deferring action to CRM leader.', level=INFO)
59 return False
60 else:
61 peers = peer_units()
62 if peers and not oldest_peer(peers):
63 log('Deferring action to oldest service unit.', level=INFO)
64 return False
65 return True
66
67
68def is_clustered():
69 for r_id in (relation_ids('ha') or []):
70 for unit in (relation_list(r_id) or []):
71 clustered = relation_get('clustered',
72 rid=r_id,
73 unit=unit)
74 if clustered:
75 return True
76 return False
77
78
79@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
80def is_crm_leader(resource, retry=False):
81 """
82 Returns True if the charm calling this is the elected corosync leader,
83 as returned by calling the external "crm" command.
84
85 We allow this operation to be retried to avoid the possibility of getting a
86 false negative. See LP #1396246 for more info.
87 """
88 cmd = ['crm', 'resource', 'show', resource]
89 try:
90 status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
91 if not isinstance(status, six.text_type):
92 status = six.text_type(status, "utf-8")
93 except subprocess.CalledProcessError:
94 status = None
95
96 if status and get_unit_hostname() in status:
97 return True
98
99 if status and "resource %s is NOT running" % (resource) in status:
100 raise CRMResourceNotFound("CRM resource %s not found" % (resource))
101
102 return False
103
104
105def is_leader(resource):
106 log("is_leader is deprecated. Please consider using is_crm_leader "
107 "instead.", level=WARNING)
108 return is_crm_leader(resource)
109
110
111def peer_units(peer_relation="cluster"):
112 peers = []
113 for r_id in (relation_ids(peer_relation) or []):
114 for unit in (relation_list(r_id) or []):
115 peers.append(unit)
116 return peers
117
118
119def peer_ips(peer_relation='cluster', addr_key='private-address'):
120 '''Return a dict of peers and their private-address'''
121 peers = {}
122 for r_id in relation_ids(peer_relation):
123 for unit in relation_list(r_id):
124 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
125 return peers
126
127
128def oldest_peer(peers):
129 """Determines who the oldest peer is by comparing unit numbers."""
130 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
131 for peer in peers:
132 remote_unit_no = int(peer.split('/')[1])
133 if remote_unit_no < local_unit_no:
134 return False
135 return True
136
137
138def eligible_leader(resource):
139 log("eligible_leader is deprecated. Please consider using "
140 "is_elected_leader instead.", level=WARNING)
141 return is_elected_leader(resource)
142
143
144def https():
145 '''
146 Determines whether enough data has been provided in configuration
147 or relation data to configure HTTPS
148 .
149 returns: boolean
150 '''
151 if config_get('use-https') == "yes":
152 return True
153 if config_get('ssl_cert') and config_get('ssl_key'):
154 return True
155 for r_id in relation_ids('identity-service'):
156 for unit in relation_list(r_id):
157 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
158 rel_state = [
159 relation_get('https_keystone', rid=r_id, unit=unit),
160 relation_get('ca_cert', rid=r_id, unit=unit),
161 ]
162 # NOTE: works around (LP: #1203241)
163 if (None not in rel_state) and ('' not in rel_state):
164 return True
165 return False
166
167
168def determine_api_port(public_port, singlenode_mode=False):
169 '''
170 Determine correct API server listening port based on
171 existence of HTTPS reverse proxy and/or haproxy.
172
173 public_port: int: standard public port for given service
174
175 singlenode_mode: boolean: Shuffle ports when only a single unit is present
176
177 returns: int: the correct listening port for the API service
178 '''
179 i = 0
180 if singlenode_mode:
181 i += 1
182 elif len(peer_units()) > 0 or is_clustered():
183 i += 1
184 if https():
185 i += 1
186 return public_port - (i * 10)
187
188
189def determine_apache_port(public_port, singlenode_mode=False):
190 '''
191 Description: Determine correct apache listening port based on public IP +
192 state of the cluster.
193
194 public_port: int: standard public port for given service
195
196 singlenode_mode: boolean: Shuffle ports when only a single unit is present
197
198 returns: int: the correct listening port for the HAProxy service
199 '''
200 i = 0
201 if singlenode_mode:
202 i += 1
203 elif len(peer_units()) > 0 or is_clustered():
204 i += 1
205 return public_port - (i * 10)
206
207
208def get_hacluster_config():
209 '''
210 Obtains all relevant configuration from charm configuration required
211 for initiating a relation to hacluster:
212
213 ha-bindiface, ha-mcastport, vip
214
215 returns: dict: A dict containing settings keyed by setting name.
216 raises: HAIncompleteConfig if settings are missing.
217 '''
218 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
219 conf = {}
220 for setting in settings:
221 conf[setting] = config_get(setting)
222 missing = []
223 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
224 if missing:
225 log('Insufficient config data to configure hacluster.', level=ERROR)
226 raise HAIncompleteConfig
227 return conf
228
229
230def canonical_url(configs, vip_setting='vip'):
231 '''
232 Returns the correct HTTP URL to this host given the state of HTTPS
233 configuration and hacluster.
234
235 :configs : OSTemplateRenderer: A config tempating object to inspect for
236 a complete https context.
237
238 :vip_setting: str: Setting in charm config that specifies
239 VIP address.
240 '''
241 scheme = 'http'
242 if 'https' in configs.complete_contexts():
243 scheme = 'https'
244 if is_clustered():
245 addr = config_get(vip_setting)
246 else:
247 addr = unit_get('private-address')
248 return '%s://%s' % (scheme, addr)
0249
=== added directory 'hooks/charmhelpers/contrib/network'
=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
=== added file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,351 @@
1import glob
2import re
3import subprocess
4
5from functools import partial
6
7from charmhelpers.core.hookenv import unit_get
8from charmhelpers.fetch import apt_install
9from charmhelpers.core.hookenv import (
10 log
11)
12
13try:
14 import netifaces
15except ImportError:
16 apt_install('python-netifaces')
17 import netifaces
18
19try:
20 import netaddr
21except ImportError:
22 apt_install('python-netaddr')
23 import netaddr
24
25
26def _validate_cidr(network):
27 try:
28 netaddr.IPNetwork(network)
29 except (netaddr.core.AddrFormatError, ValueError):
30 raise ValueError("Network (%s) is not in CIDR presentation format" %
31 network)
32
33
34def no_ip_found_error_out(network):
35 errmsg = ("No IP address found in network: %s" % network)
36 raise ValueError(errmsg)
37
38
39def get_address_in_network(network, fallback=None, fatal=False):
40 """Get an IPv4 or IPv6 address within the network from the host.
41
42 :param network (str): CIDR presentation format. For example,
43 '192.168.1.0/24'.
44 :param fallback (str): If no address is found, return fallback.
45 :param fatal (boolean): If no address is found, fallback is not
46 set and fatal is True then exit(1).
47 """
48 if network is None:
49 if fallback is not None:
50 return fallback
51
52 if fatal:
53 no_ip_found_error_out(network)
54 else:
55 return None
56
57 _validate_cidr(network)
58 network = netaddr.IPNetwork(network)
59 for iface in netifaces.interfaces():
60 addresses = netifaces.ifaddresses(iface)
61 if network.version == 4 and netifaces.AF_INET in addresses:
62 addr = addresses[netifaces.AF_INET][0]['addr']
63 netmask = addresses[netifaces.AF_INET][0]['netmask']
64 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
65 if cidr in network:
66 return str(cidr.ip)
67
68 if network.version == 6 and netifaces.AF_INET6 in addresses:
69 for addr in addresses[netifaces.AF_INET6]:
70 if not addr['addr'].startswith('fe80'):
71 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
72 addr['netmask']))
73 if cidr in network:
74 return str(cidr.ip)
75
76 if fallback is not None:
77 return fallback
78
79 if fatal:
80 no_ip_found_error_out(network)
81
82 return None
83
84
85def is_ipv6(address):
86 """Determine whether provided address is IPv6 or not."""
87 try:
88 address = netaddr.IPAddress(address)
89 except netaddr.AddrFormatError:
90 # probably a hostname - so not an address at all!
91 return False
92
93 return address.version == 6
94
95
96def is_address_in_network(network, address):
97 """
98 Determine whether the provided address is within a network range.
99
100 :param network (str): CIDR presentation format. For example,
101 '192.168.1.0/24'.
102 :param address: An individual IPv4 or IPv6 address without a net
103 mask or subnet prefix. For example, '192.168.1.1'.
104 :returns boolean: Flag indicating whether address is in network.
105 """
106 try:
107 network = netaddr.IPNetwork(network)
108 except (netaddr.core.AddrFormatError, ValueError):
109 raise ValueError("Network (%s) is not in CIDR presentation format" %
110 network)
111
112 try:
113 address = netaddr.IPAddress(address)
114 except (netaddr.core.AddrFormatError, ValueError):
115 raise ValueError("Address (%s) is not in correct presentation format" %
116 address)
117
118 if address in network:
119 return True
120 else:
121 return False
122
123
124def _get_for_address(address, key):
125 """Retrieve an attribute of or the physical interface that
126 the IP address provided could be bound to.
127
128 :param address (str): An individual IPv4 or IPv6 address without a net
129 mask or subnet prefix. For example, '192.168.1.1'.
130 :param key: 'iface' for the physical interface name or an attribute
131 of the configured interface, for example 'netmask'.
132 :returns str: Requested attribute or None if address is not bindable.
133 """
134 address = netaddr.IPAddress(address)
135 for iface in netifaces.interfaces():
136 addresses = netifaces.ifaddresses(iface)
137 if address.version == 4 and netifaces.AF_INET in addresses:
138 addr = addresses[netifaces.AF_INET][0]['addr']
139 netmask = addresses[netifaces.AF_INET][0]['netmask']
140 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
141 cidr = network.cidr
142 if address in cidr:
143 if key == 'iface':
144 return iface
145 else:
146 return addresses[netifaces.AF_INET][0][key]
147
148 if address.version == 6 and netifaces.AF_INET6 in addresses:
149 for addr in addresses[netifaces.AF_INET6]:
150 if not addr['addr'].startswith('fe80'):
151 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
152 addr['netmask']))
153 cidr = network.cidr
154 if address in cidr:
155 if key == 'iface':
156 return iface
157 elif key == 'netmask' and cidr:
158 return str(cidr).split('/')[1]
159 else:
160 return addr[key]
161
162 return None
163
164
165get_iface_for_address = partial(_get_for_address, key='iface')
166
167
168get_netmask_for_address = partial(_get_for_address, key='netmask')
169
170
171def format_ipv6_addr(address):
172 """If address is IPv6, wrap it in '[]' otherwise return None.
173
174 This is required by most configuration files when specifying IPv6
175 addresses.
176 """
177 if is_ipv6(address):
178 return "[%s]" % address
179
180 return None
181
182
183def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
184 fatal=True, exc_list=None):
185 """Return the assigned IP address for a given interface, if any."""
186 # Extract nic if passed /dev/ethX
187 if '/' in iface:
188 iface = iface.split('/')[-1]
189
190 if not exc_list:
191 exc_list = []
192
193 try:
194 inet_num = getattr(netifaces, inet_type)
195 except AttributeError:
196 raise Exception("Unknown inet type '%s'" % str(inet_type))
197
198 interfaces = netifaces.interfaces()
199 if inc_aliases:
200 ifaces = []
201 for _iface in interfaces:
202 if iface == _iface or _iface.split(':')[0] == iface:
203 ifaces.append(_iface)
204
205 if fatal and not ifaces:
206 raise Exception("Invalid interface '%s'" % iface)
207
208 ifaces.sort()
209 else:
210 if iface not in interfaces:
211 if fatal:
212 raise Exception("Interface '%s' not found " % (iface))
213 else:
214 return []
215
216 else:
217 ifaces = [iface]
218
219 addresses = []
220 for netiface in ifaces:
221 net_info = netifaces.ifaddresses(netiface)
222 if inet_num in net_info:
223 for entry in net_info[inet_num]:
224 if 'addr' in entry and entry['addr'] not in exc_list:
225 addresses.append(entry['addr'])
226
227 if fatal and not addresses:
228 raise Exception("Interface '%s' doesn't have any %s addresses." %
229 (iface, inet_type))
230
231 return sorted(addresses)
232
233
234get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
235
236
237def get_iface_from_addr(addr):
238 """Work out on which interface the provided address is configured."""
239 for iface in netifaces.interfaces():
240 addresses = netifaces.ifaddresses(iface)
241 for inet_type in addresses:
242 for _addr in addresses[inet_type]:
243 _addr = _addr['addr']
244 # link local
245 ll_key = re.compile("(.+)%.*")
246 raw = re.match(ll_key, _addr)
247 if raw:
248 _addr = raw.group(1)
249
250 if _addr == addr:
251 log("Address '%s' is configured on iface '%s'" %
252 (addr, iface))
253 return iface
254
255 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
256 raise Exception(msg)
257
258
259def sniff_iface(f):
260 """Ensure decorated function is called with a value for iface.
261
262 If no iface provided, inject net iface inferred from unit private address.
263 """
264 def iface_sniffer(*args, **kwargs):
265 if not kwargs.get('iface', None):
266 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
267
268 return f(*args, **kwargs)
269
270 return iface_sniffer
271
272
273@sniff_iface
274def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
275 dynamic_only=True):
276 """Get assigned IPv6 address for a given interface.
277
278 Returns list of addresses found. If no address found, returns empty list.
279
280 If iface is None, we infer the current primary interface by doing a reverse
281 lookup on the unit private-address.
282
283 We currently only support scope global IPv6 addresses i.e. non-temporary
284 addresses. If no global IPv6 address is found, return the first one found
285 in the ipv6 address list.
286 """
287 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
288 inc_aliases=inc_aliases, fatal=fatal,
289 exc_list=exc_list)
290
291 if addresses:
292 global_addrs = []
293 for addr in addresses:
294 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
295 m = re.match(key_scope_link_local, addr)
296 if m:
297 eui_64_mac = m.group(1)
298 iface = m.group(2)
299 else:
300 global_addrs.append(addr)
301
302 if global_addrs:
303 # Make sure any found global addresses are not temporary
304 cmd = ['ip', 'addr', 'show', iface]
305 out = subprocess.check_output(cmd).decode('UTF-8')
306 if dynamic_only:
307 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
308 else:
309 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
310
311 addrs = []
312 for line in out.split('\n'):
313 line = line.strip()
314 m = re.match(key, line)
315 if m and 'temporary' not in line:
316 # Return the first valid address we find
317 for addr in global_addrs:
318 if m.group(1) == addr:
319 if not dynamic_only or \
320 m.group(1).endswith(eui_64_mac):
321 addrs.append(addr)
322
323 if addrs:
324 return addrs
325
326 if fatal:
327 raise Exception("Interface '%s' does not have a scope global "
328 "non-temporary ipv6 address." % iface)
329
330 return []
331
332
333def get_bridges(vnic_dir='/sys/devices/virtual/net'):
334 """Return a list of bridges on the system."""
335 b_regex = "%s/*/bridge" % vnic_dir
336 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
337
338
339def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
340 """Return a list of nics comprising a given bridge on the system."""
341 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
342 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
343
344
345def is_bridge_member(nic):
346 """Check if a given nic is a member of a bridge."""
347 for bridge in get_bridges():
348 if nic in get_bridge_nics(bridge):
349 return True
350
351 return False
0352
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,92 @@
1import six
2from charmhelpers.contrib.amulet.deployment import (
3 AmuletDeployment
4)
5
6
7class OpenStackAmuletDeployment(AmuletDeployment):
8 """OpenStack amulet deployment.
9
10 This class inherits from AmuletDeployment and has additional support
11 that is specifically for use by OpenStack charms.
12 """
13
14 def __init__(self, series=None, openstack=None, source=None, stable=True):
15 """Initialize the deployment environment."""
16 super(OpenStackAmuletDeployment, self).__init__(series)
17 self.openstack = openstack
18 self.source = source
19 self.stable = stable
20 # Note(coreycb): this needs to be changed when new next branches come
21 # out.
22 self.current_next = "trusty"
23
24 def _determine_branch_locations(self, other_services):
25 """Determine the branch locations for the other services.
26
27 Determine if the local branch being tested is derived from its
28 stable or next (dev) branch, and based on this, use the corresonding
29 stable or next branches for the other_services."""
30 base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
31
32 if self.stable:
33 for svc in other_services:
34 temp = 'lp:charms/{}'
35 svc['location'] = temp.format(svc['name'])
36 else:
37 for svc in other_services:
38 if svc['name'] in base_charms:
39 temp = 'lp:charms/{}'
40 svc['location'] = temp.format(svc['name'])
41 else:
42 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
43 svc['location'] = temp.format(self.current_next,
44 svc['name'])
45 return other_services
46
47 def _add_services(self, this_service, other_services):
48 """Add services to the deployment and set openstack-origin/source."""
49 other_services = self._determine_branch_locations(other_services)
50
51 super(OpenStackAmuletDeployment, self)._add_services(this_service,
52 other_services)
53
54 services = other_services
55 services.append(this_service)
56 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
57 'ceph-osd', 'ceph-radosgw']
58
59 if self.openstack:
60 for svc in services:
61 if svc['name'] not in use_source:
62 config = {'openstack-origin': self.openstack}
63 self.d.configure(svc['name'], config)
64
65 if self.source:
66 for svc in services:
67 if svc['name'] in use_source:
68 config = {'source': self.source}
69 self.d.configure(svc['name'], config)
70
71 def _configure_services(self, configs):
72 """Configure all of the services."""
73 for service, config in six.iteritems(configs):
74 self.d.configure(service, config)
75
76 def _get_openstack_release(self):
77 """Get openstack release.
78
79 Return an integer representing the enum value of the openstack
80 release.
81 """
82 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
83 self.precise_havana, self.precise_icehouse,
84 self.trusty_icehouse) = range(6)
85 releases = {
86 ('precise', None): self.precise_essex,
87 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
88 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
89 ('precise', 'cloud:precise-havana'): self.precise_havana,
90 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
91 ('trusty', None): self.trusty_icehouse}
92 return releases[(self.series, self.openstack)]
093
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,278 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10import six
11
12from charmhelpers.contrib.amulet.utils import (
13 AmuletUtils
14)
15
16DEBUG = logging.DEBUG
17ERROR = logging.ERROR
18
19
20class OpenStackAmuletUtils(AmuletUtils):
21 """OpenStack amulet utilities.
22
23 This class inherits from AmuletUtils and has additional support
24 that is specifically for use by OpenStack charms.
25 """
26
27 def __init__(self, log_level=ERROR):
28 """Initialize the deployment environment."""
29 super(OpenStackAmuletUtils, self).__init__(log_level)
30
31 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
32 public_port, expected):
33 """Validate endpoint data.
34
35 Validate actual endpoint data vs expected endpoint data. The ports
36 are used to find the matching endpoint.
37 """
38 found = False
39 for ep in endpoints:
40 self.log.debug('endpoint: {}'.format(repr(ep)))
41 if (admin_port in ep.adminurl and
42 internal_port in ep.internalurl and
43 public_port in ep.publicurl):
44 found = True
45 actual = {'id': ep.id,
46 'region': ep.region,
47 'adminurl': ep.adminurl,
48 'internalurl': ep.internalurl,
49 'publicurl': ep.publicurl,
50 'service_id': ep.service_id}
51 ret = self._validate_dict_data(expected, actual)
52 if ret:
53 return 'unexpected endpoint data - {}'.format(ret)
54
55 if not found:
56 return 'endpoint not found'
57
58 def validate_svc_catalog_endpoint_data(self, expected, actual):
59 """Validate service catalog endpoint data.
60
61 Validate a list of actual service catalog endpoints vs a list of
62 expected service catalog endpoints.
63 """
64 self.log.debug('actual: {}'.format(repr(actual)))
65 for k, v in six.iteritems(expected):
66 if k in actual:
67 ret = self._validate_dict_data(expected[k][0], actual[k][0])
68 if ret:
69 return self.endpoint_error(k, ret)
70 else:
71 return "endpoint {} does not exist".format(k)
72 return ret
73
74 def validate_tenant_data(self, expected, actual):
75 """Validate tenant data.
76
77 Validate a list of actual tenant data vs list of expected tenant
78 data.
79 """
80 self.log.debug('actual: {}'.format(repr(actual)))
81 for e in expected:
82 found = False
83 for act in actual:
84 a = {'enabled': act.enabled, 'description': act.description,
85 'name': act.name, 'id': act.id}
86 if e['name'] == a['name']:
87 found = True
88 ret = self._validate_dict_data(e, a)
89 if ret:
90 return "unexpected tenant data - {}".format(ret)
91 if not found:
92 return "tenant {} does not exist".format(e['name'])
93 return ret
94
95 def validate_role_data(self, expected, actual):
96 """Validate role data.
97
98 Validate a list of actual role data vs a list of expected role
99 data.
100 """
101 self.log.debug('actual: {}'.format(repr(actual)))
102 for e in expected:
103 found = False
104 for act in actual:
105 a = {'name': act.name, 'id': act.id}
106 if e['name'] == a['name']:
107 found = True
108 ret = self._validate_dict_data(e, a)
109 if ret:
110 return "unexpected role data - {}".format(ret)
111 if not found:
112 return "role {} does not exist".format(e['name'])
113 return ret
114
115 def validate_user_data(self, expected, actual):
116 """Validate user data.
117
118 Validate a list of actual user data vs a list of expected user
119 data.
120 """
121 self.log.debug('actual: {}'.format(repr(actual)))
122 for e in expected:
123 found = False
124 for act in actual:
125 a = {'enabled': act.enabled, 'name': act.name,
126 'email': act.email, 'tenantId': act.tenantId,
127 'id': act.id}
128 if e['name'] == a['name']:
129 found = True
130 ret = self._validate_dict_data(e, a)
131 if ret:
132 return "unexpected user data - {}".format(ret)
133 if not found:
134 return "user {} does not exist".format(e['name'])
135 return ret
136
137 def validate_flavor_data(self, expected, actual):
138 """Validate flavor data.
139
140 Validate a list of actual flavors vs a list of expected flavors.
141 """
142 self.log.debug('actual: {}'.format(repr(actual)))
143 act = [a.name for a in actual]
144 return self._validate_list_data(expected, act)
145
146 def tenant_exists(self, keystone, tenant):
147 """Return True if tenant exists."""
148 return tenant in [t.name for t in keystone.tenants.list()]
149
150 def authenticate_keystone_admin(self, keystone_sentry, user, password,
151 tenant):
152 """Authenticates admin user with the keystone admin endpoint."""
153 unit = keystone_sentry
154 service_ip = unit.relation('shared-db',
155 'mysql:shared-db')['private-address']
156 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
157 return keystone_client.Client(username=user, password=password,
158 tenant_name=tenant, auth_url=ep)
159
160 def authenticate_keystone_user(self, keystone, user, password, tenant):
161 """Authenticates a regular user with the keystone public endpoint."""
162 ep = keystone.service_catalog.url_for(service_type='identity',
163 endpoint_type='publicURL')
164 return keystone_client.Client(username=user, password=password,
165 tenant_name=tenant, auth_url=ep)
166
167 def authenticate_glance_admin(self, keystone):
168 """Authenticates admin user with glance."""
169 ep = keystone.service_catalog.url_for(service_type='image',
170 endpoint_type='adminURL')
171 return glance_client.Client(ep, token=keystone.auth_token)
172
173 def authenticate_nova_user(self, keystone, user, password, tenant):
174 """Authenticates a regular user with nova-api."""
175 ep = keystone.service_catalog.url_for(service_type='identity',
176 endpoint_type='publicURL')
177 return nova_client.Client(username=user, api_key=password,
178 project_id=tenant, auth_url=ep)
179
180 def create_cirros_image(self, glance, image_name):
181 """Download the latest cirros image and upload it to glance."""
182 http_proxy = os.getenv('AMULET_HTTP_PROXY')
183 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
184 if http_proxy:
185 proxies = {'http': http_proxy}
186 opener = urllib.FancyURLopener(proxies)
187 else:
188 opener = urllib.FancyURLopener()
189
190 f = opener.open("http://download.cirros-cloud.net/version/released")
191 version = f.read().strip()
192 cirros_img = "cirros-{}-x86_64-disk.img".format(version)
193 local_path = os.path.join('tests', cirros_img)
194
195 if not os.path.exists(local_path):
196 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
197 version, cirros_img)
198 opener.retrieve(cirros_url, local_path)
199 f.close()
200
201 with open(local_path) as f:
202 image = glance.images.create(name=image_name, is_public=True,
203 disk_format='qcow2',
204 container_format='bare', data=f)
205 count = 1
206 status = image.status
207 while status != 'active' and count < 10:
208 time.sleep(3)
209 image = glance.images.get(image.id)
210 status = image.status
211 self.log.debug('image status: {}'.format(status))
212 count += 1
213
214 if status != 'active':
215 self.log.error('image creation timed out')
216 return None
217
218 return image
219
220 def delete_image(self, glance, image):
221 """Delete the specified image."""
222 num_before = len(list(glance.images.list()))
223 glance.images.delete(image)
224
225 count = 1
226 num_after = len(list(glance.images.list()))
227 while num_after != (num_before - 1) and count < 10:
228 time.sleep(3)
229 num_after = len(list(glance.images.list()))
230 self.log.debug('number of images: {}'.format(num_after))
231 count += 1
232
233 if num_after != (num_before - 1):
234 self.log.error('image deletion timed out')
235 return False
236
237 return True
238
239 def create_instance(self, nova, image_name, instance_name, flavor):
240 """Create the specified instance."""
241 image = nova.images.find(name=image_name)
242 flavor = nova.flavors.find(name=flavor)
243 instance = nova.servers.create(name=instance_name, image=image,
244 flavor=flavor)
245
246 count = 1
247 status = instance.status
248 while status != 'ACTIVE' and count < 60:
249 time.sleep(3)
250 instance = nova.servers.get(instance.id)
251 status = instance.status
252 self.log.debug('instance status: {}'.format(status))
253 count += 1
254
255 if status != 'ACTIVE':
256 self.log.error('instance creation timed out')
257 return None
258
259 return instance
260
261 def delete_instance(self, nova, instance):
262 """Delete the specified instance."""
263 num_before = len(list(nova.servers.list()))
264 nova.servers.delete(instance)
265
266 count = 1
267 num_after = len(list(nova.servers.list()))
268 while num_after != (num_before - 1) and count < 10:
269 time.sleep(3)
270 num_after = len(list(nova.servers.list()))
271 self.log.debug('number of instances: {}'.format(num_after))
272 count += 1
273
274 if num_after != (num_before - 1):
275 self.log.error('instance deletion timed out')
276 return False
277
278 return True
0279
=== added file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,1038 @@
1import json
2import os
3import time
4from base64 import b64decode
5from subprocess import check_call
6
7import six
8
9from charmhelpers.fetch import (
10 apt_install,
11 filter_installed_packages,
12)
13from charmhelpers.core.hookenv import (
14 config,
15 is_relation_made,
16 local_unit,
17 log,
18 relation_get,
19 relation_ids,
20 related_units,
21 relation_set,
22 unit_get,
23 unit_private_ip,
24 charm_name,
25 DEBUG,
26 INFO,
27 WARNING,
28 ERROR,
29)
30
31from charmhelpers.core.sysctl import create as sysctl_create
32
33from charmhelpers.core.host import (
34 mkdir,
35 write_file,
36)
37from charmhelpers.contrib.hahelpers.cluster import (
38 determine_apache_port,
39 determine_api_port,
40 https,
41 is_clustered,
42)
43from charmhelpers.contrib.hahelpers.apache import (
44 get_cert,
45 get_ca_cert,
46 install_ca_cert,
47)
48from charmhelpers.contrib.openstack.neutron import (
49 neutron_plugin_attribute,
50)
51from charmhelpers.contrib.network.ip import (
52 get_address_in_network,
53 get_ipv6_addr,
54 get_netmask_for_address,
55 format_ipv6_addr,
56 is_address_in_network,
57)
58from charmhelpers.contrib.openstack.utils import get_host_ip
59
60CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
61ADDRESS_TYPES = ['admin', 'internal', 'public']
62
63
64class OSContextError(Exception):
65 pass
66
67
68def ensure_packages(packages):
69 """Install but do not upgrade required plugin packages."""
70 required = filter_installed_packages(packages)
71 if required:
72 apt_install(required, fatal=True)
73
74
75def context_complete(ctxt):
76 _missing = []
77 for k, v in six.iteritems(ctxt):
78 if v is None or v == '':
79 _missing.append(k)
80
81 if _missing:
82 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
83 return False
84
85 return True
86
87
88def config_flags_parser(config_flags):
89 """Parses config flags string into dict.
90
91 The provided config_flags string may be a list of comma-separated values
92 which themselves may be comma-separated list of values.
93 """
94 if config_flags.find('==') >= 0:
95 log("config_flags is not in expected format (key=value)", level=ERROR)
96 raise OSContextError
97
98 # strip the following from each value.
99 post_strippers = ' ,'
100 # we strip any leading/trailing '=' or ' ' from the string then
101 # split on '='.
102 split = config_flags.strip(' =').split('=')
103 limit = len(split)
104 flags = {}
105 for i in range(0, limit - 1):
106 current = split[i]
107 next = split[i + 1]
108 vindex = next.rfind(',')
109 if (i == limit - 2) or (vindex < 0):
110 value = next
111 else:
112 value = next[:vindex]
113
114 if i == 0:
115 key = current
116 else:
117 # if this not the first entry, expect an embedded key.
118 index = current.rfind(',')
119 if index < 0:
120 log("Invalid config value(s) at index %s" % (i), level=ERROR)
121 raise OSContextError
122 key = current[index + 1:]
123
124 # Add to collection.
125 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
126
127 return flags
128
129
130class OSContextGenerator(object):
131 """Base class for all context generators."""
132 interfaces = []
133
134 def __call__(self):
135 raise NotImplementedError
136
137
138class SharedDBContext(OSContextGenerator):
139 interfaces = ['shared-db']
140
141 def __init__(self,
142 database=None, user=None, relation_prefix=None, ssl_dir=None):
143 """Allows inspecting relation for settings prefixed with
144 relation_prefix. This is useful for parsing access for multiple
145 databases returned via the shared-db interface (eg, nova_password,
146 quantum_password)
147 """
148 self.relation_prefix = relation_prefix
149 self.database = database
150 self.user = user
151 self.ssl_dir = ssl_dir
152
153 def __call__(self):
154 self.database = self.database or config('database')
155 self.user = self.user or config('database-user')
156 if None in [self.database, self.user]:
157 log("Could not generate shared_db context. Missing required charm "
158 "config options. (database name and user)", level=ERROR)
159 raise OSContextError
160
161 ctxt = {}
162
163 # NOTE(jamespage) if mysql charm provides a network upon which
164 # access to the database should be made, reconfigure relation
165 # with the service units local address and defer execution
166 access_network = relation_get('access-network')
167 if access_network is not None:
168 if self.relation_prefix is not None:
169 hostname_key = "{}_hostname".format(self.relation_prefix)
170 else:
171 hostname_key = "hostname"
172 access_hostname = get_address_in_network(access_network,
173 unit_get('private-address'))
174 set_hostname = relation_get(attribute=hostname_key,
175 unit=local_unit())
176 if set_hostname != access_hostname:
177 relation_set(relation_settings={hostname_key: access_hostname})
178 return ctxt # Defer any further hook execution for now....
179
180 password_setting = 'password'
181 if self.relation_prefix:
182 password_setting = self.relation_prefix + '_password'
183
184 for rid in relation_ids('shared-db'):
185 for unit in related_units(rid):
186 rdata = relation_get(rid=rid, unit=unit)
187 host = rdata.get('db_host')
188 host = format_ipv6_addr(host) or host
189 ctxt = {
190 'database_host': host,
191 'database': self.database,
192 'database_user': self.user,
193 'database_password': rdata.get(password_setting),
194 'database_type': 'mysql'
195 }
196 if context_complete(ctxt):
197 db_ssl(rdata, ctxt, self.ssl_dir)
198 return ctxt
199 return {}
200
201
202class PostgresqlDBContext(OSContextGenerator):
203 interfaces = ['pgsql-db']
204
205 def __init__(self, database=None):
206 self.database = database
207
208 def __call__(self):
209 self.database = self.database or config('database')
210 if self.database is None:
211 log('Could not generate postgresql_db context. Missing required '
212 'charm config options. (database name)', level=ERROR)
213 raise OSContextError
214
215 ctxt = {}
216 for rid in relation_ids(self.interfaces[0]):
217 for unit in related_units(rid):
218 rel_host = relation_get('host', rid=rid, unit=unit)
219 rel_user = relation_get('user', rid=rid, unit=unit)
220 rel_passwd = relation_get('password', rid=rid, unit=unit)
221 ctxt = {'database_host': rel_host,
222 'database': self.database,
223 'database_user': rel_user,
224 'database_password': rel_passwd,
225 'database_type': 'postgresql'}
226 if context_complete(ctxt):
227 return ctxt
228
229 return {}
230
231
232def db_ssl(rdata, ctxt, ssl_dir):
233 if 'ssl_ca' in rdata and ssl_dir:
234 ca_path = os.path.join(ssl_dir, 'db-client.ca')
235 with open(ca_path, 'w') as fh:
236 fh.write(b64decode(rdata['ssl_ca']))
237
238 ctxt['database_ssl_ca'] = ca_path
239 elif 'ssl_ca' in rdata:
240 log("Charm not setup for ssl support but ssl ca found", level=INFO)
241 return ctxt
242
243 if 'ssl_cert' in rdata:
244 cert_path = os.path.join(
245 ssl_dir, 'db-client.cert')
246 if not os.path.exists(cert_path):
247 log("Waiting 1m for ssl client cert validity", level=INFO)
248 time.sleep(60)
249
250 with open(cert_path, 'w') as fh:
251 fh.write(b64decode(rdata['ssl_cert']))
252
253 ctxt['database_ssl_cert'] = cert_path
254 key_path = os.path.join(ssl_dir, 'db-client.key')
255 with open(key_path, 'w') as fh:
256 fh.write(b64decode(rdata['ssl_key']))
257
258 ctxt['database_ssl_key'] = key_path
259
260 return ctxt
261
262
263class IdentityServiceContext(OSContextGenerator):
264 interfaces = ['identity-service']
265
266 def __call__(self):
267 log('Generating template context for identity-service', level=DEBUG)
268 ctxt = {}
269 for rid in relation_ids('identity-service'):
270 for unit in related_units(rid):
271 rdata = relation_get(rid=rid, unit=unit)
272 serv_host = rdata.get('service_host')
273 serv_host = format_ipv6_addr(serv_host) or serv_host
274 auth_host = rdata.get('auth_host')
275 auth_host = format_ipv6_addr(auth_host) or auth_host
276 svc_protocol = rdata.get('service_protocol') or 'http'
277 auth_protocol = rdata.get('auth_protocol') or 'http'
278 ctxt = {'service_port': rdata.get('service_port'),
279 'service_host': serv_host,
280 'auth_host': auth_host,
281 'auth_port': rdata.get('auth_port'),
282 'admin_tenant_name': rdata.get('service_tenant'),
283 'admin_user': rdata.get('service_username'),
284 'admin_password': rdata.get('service_password'),
285 'service_protocol': svc_protocol,
286 'auth_protocol': auth_protocol}
287 if context_complete(ctxt):
288 # NOTE(jamespage) this is required for >= icehouse
289 # so a missing value just indicates keystone needs
290 # upgrading
291 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
292 return ctxt
293
294 return {}
295
296
297class AMQPContext(OSContextGenerator):
298
299 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
300 self.ssl_dir = ssl_dir
301 self.rel_name = rel_name
302 self.relation_prefix = relation_prefix
303 self.interfaces = [rel_name]
304
305 def __call__(self):
306 log('Generating template context for amqp', level=DEBUG)
307 conf = config()
308 if self.relation_prefix:
309 user_setting = '%s-rabbit-user' % (self.relation_prefix)
310 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
311 else:
312 user_setting = 'rabbit-user'
313 vhost_setting = 'rabbit-vhost'
314
315 try:
316 username = conf[user_setting]
317 vhost = conf[vhost_setting]
318 except KeyError as e:
319 log('Could not generate shared_db context. Missing required charm '
320 'config options: %s.' % e, level=ERROR)
321 raise OSContextError
322
323 ctxt = {}
324 for rid in relation_ids(self.rel_name):
325 ha_vip_only = False
326 for unit in related_units(rid):
327 if relation_get('clustered', rid=rid, unit=unit):
328 ctxt['clustered'] = True
329 vip = relation_get('vip', rid=rid, unit=unit)
330 vip = format_ipv6_addr(vip) or vip
331 ctxt['rabbitmq_host'] = vip
332 else:
333 host = relation_get('private-address', rid=rid, unit=unit)
334 host = format_ipv6_addr(host) or host
335 ctxt['rabbitmq_host'] = host
336
337 ctxt.update({
338 'rabbitmq_user': username,
339 'rabbitmq_password': relation_get('password', rid=rid,
340 unit=unit),
341 'rabbitmq_virtual_host': vhost,
342 })
343
344 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
345 if ssl_port:
346 ctxt['rabbit_ssl_port'] = ssl_port
347
348 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
349 if ssl_ca:
350 ctxt['rabbit_ssl_ca'] = ssl_ca
351
352 if relation_get('ha_queues', rid=rid, unit=unit) is not None:
353 ctxt['rabbitmq_ha_queues'] = True
354
355 ha_vip_only = relation_get('ha-vip-only',
356 rid=rid, unit=unit) is not None
357
358 if context_complete(ctxt):
359 if 'rabbit_ssl_ca' in ctxt:
360 if not self.ssl_dir:
361 log("Charm not setup for ssl support but ssl ca "
362 "found", level=INFO)
363 break
364
365 ca_path = os.path.join(
366 self.ssl_dir, 'rabbit-client-ca.pem')
367 with open(ca_path, 'w') as fh:
368 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
369 ctxt['rabbit_ssl_ca'] = ca_path
370
371 # Sufficient information found = break out!
372 break
373
374 # Used for active/active rabbitmq >= grizzly
375 if (('clustered' not in ctxt or ha_vip_only) and
376 len(related_units(rid)) > 1):
377 rabbitmq_hosts = []
378 for unit in related_units(rid):
379 host = relation_get('private-address', rid=rid, unit=unit)
380 host = format_ipv6_addr(host) or host
381 rabbitmq_hosts.append(host)
382
383 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
384
385 if not context_complete(ctxt):
386 return {}
387
388 return ctxt
389
390
391class CephContext(OSContextGenerator):
392 """Generates context for /etc/ceph/ceph.conf templates."""
393 interfaces = ['ceph']
394
395 def __call__(self):
396 if not relation_ids('ceph'):
397 return {}
398
399 log('Generating template context for ceph', level=DEBUG)
400 mon_hosts = []
401 auth = None
402 key = None
403 use_syslog = str(config('use-syslog')).lower()
404 for rid in relation_ids('ceph'):
405 for unit in related_units(rid):
406 auth = relation_get('auth', rid=rid, unit=unit)
407 key = relation_get('key', rid=rid, unit=unit)
408 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
409 unit=unit)
410 unit_priv_addr = relation_get('private-address', rid=rid,
411 unit=unit)
412 ceph_addr = ceph_pub_addr or unit_priv_addr
413 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
414 mon_hosts.append(ceph_addr)
415
416 ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
417 'auth': auth,
418 'key': key,
419 'use_syslog': use_syslog}
420
421 if not os.path.isdir('/etc/ceph'):
422 os.mkdir('/etc/ceph')
423
424 if not context_complete(ctxt):
425 return {}
426
427 ensure_packages(['ceph-common'])
428 return ctxt
429
430
431class HAProxyContext(OSContextGenerator):
432 """Provides half a context for the haproxy template, which describes
433 all peers to be included in the cluster. Each charm needs to include
434 its own context generator that describes the port mapping.
435 """
436 interfaces = ['cluster']
437
438 def __init__(self, singlenode_mode=False):
439 self.singlenode_mode = singlenode_mode
440
441 def __call__(self):
442 if not relation_ids('cluster') and not self.singlenode_mode:
443 return {}
444
445 if config('prefer-ipv6'):
446 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
447 else:
448 addr = get_host_ip(unit_get('private-address'))
449
450 l_unit = local_unit().replace('/', '-')
451 cluster_hosts = {}
452
453 # NOTE(jamespage): build out map of configured network endpoints
454 # and associated backends
455 for addr_type in ADDRESS_TYPES:
456 cfg_opt = 'os-{}-network'.format(addr_type)
457 laddr = get_address_in_network(config(cfg_opt))
458 if laddr:
459 netmask = get_netmask_for_address(laddr)
460 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
461 netmask),
462 'backends': {l_unit: laddr}}
463 for rid in relation_ids('cluster'):
464 for unit in related_units(rid):
465 _laddr = relation_get('{}-address'.format(addr_type),
466 rid=rid, unit=unit)
467 if _laddr:
468 _unit = unit.replace('/', '-')
469 cluster_hosts[laddr]['backends'][_unit] = _laddr
470
471 # NOTE(jamespage) add backend based on private address - this
472 # with either be the only backend or the fallback if no acls
473 # match in the frontend
474 cluster_hosts[addr] = {}
475 netmask = get_netmask_for_address(addr)
476 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
477 'backends': {l_unit: addr}}
478 for rid in relation_ids('cluster'):
479 for unit in related_units(rid):
480 _laddr = relation_get('private-address',
481 rid=rid, unit=unit)
482 if _laddr:
483 _unit = unit.replace('/', '-')
484 cluster_hosts[addr]['backends'][_unit] = _laddr
485
486 ctxt = {
487 'frontends': cluster_hosts,
488 'default_backend': addr
489 }
490
491 if config('haproxy-server-timeout'):
492 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
493
494 if config('haproxy-client-timeout'):
495 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
496
497 if config('prefer-ipv6'):
498 ctxt['ipv6'] = True
499 ctxt['local_host'] = 'ip6-localhost'
500 ctxt['haproxy_host'] = '::'
501 ctxt['stat_port'] = ':::8888'
502 else:
503 ctxt['local_host'] = '127.0.0.1'
504 ctxt['haproxy_host'] = '0.0.0.0'
505 ctxt['stat_port'] = ':8888'
506
507 for frontend in cluster_hosts:
508 if (len(cluster_hosts[frontend]['backends']) > 1 or
509 self.singlenode_mode):
510 # Enable haproxy when we have enough peers.
511 log('Ensuring haproxy enabled in /etc/default/haproxy.',
512 level=DEBUG)
513 with open('/etc/default/haproxy', 'w') as out:
514 out.write('ENABLED=1\n')
515
516 return ctxt
517
518 log('HAProxy context is incomplete, this unit has no peers.',
519 level=INFO)
520 return {}
521
522
523class ImageServiceContext(OSContextGenerator):
524 interfaces = ['image-service']
525
526 def __call__(self):
527 """Obtains the glance API server from the image-service relation.
528 Useful in nova and cinder (currently).
529 """
530 log('Generating template context for image-service.', level=DEBUG)
531 rids = relation_ids('image-service')
532 if not rids:
533 return {}
534
535 for rid in rids:
536 for unit in related_units(rid):
537 api_server = relation_get('glance-api-server',
538 rid=rid, unit=unit)
539 if api_server:
540 return {'glance_api_servers': api_server}
541
542 log("ImageService context is incomplete. Missing required relation "
543 "data.", level=INFO)
544 return {}
545
546
547class ApacheSSLContext(OSContextGenerator):
548 """Generates a context for an apache vhost configuration that configures
549 HTTPS reverse proxying for one or many endpoints. Generated context
550 looks something like::
551
552 {
553 'namespace': 'cinder',
554 'private_address': 'iscsi.mycinderhost.com',
555 'endpoints': [(8776, 8766), (8777, 8767)]
556 }
557
558 The endpoints list consists of a tuples mapping external ports
559 to internal ports.
560 """
561 interfaces = ['https']
562
563 # charms should inherit this context and set external ports
564 # and service namespace accordingly.
565 external_ports = []
566 service_namespace = None
567
568 def enable_modules(self):
569 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
570 check_call(cmd)
571
572 def configure_cert(self, cn=None):
573 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
574 mkdir(path=ssl_dir)
575 cert, key = get_cert(cn)
576 if cn:
577 cert_filename = 'cert_{}'.format(cn)
578 key_filename = 'key_{}'.format(cn)
579 else:
580 cert_filename = 'cert'
581 key_filename = 'key'
582
583 write_file(path=os.path.join(ssl_dir, cert_filename),
584 content=b64decode(cert))
585 write_file(path=os.path.join(ssl_dir, key_filename),
586 content=b64decode(key))
587
588 def configure_ca(self):
589 ca_cert = get_ca_cert()
590 if ca_cert:
591 install_ca_cert(b64decode(ca_cert))
592
593 def canonical_names(self):
594 """Figure out which canonical names clients will access this service.
595 """
596 cns = []
597 for r_id in relation_ids('identity-service'):
598 for unit in related_units(r_id):
599 rdata = relation_get(rid=r_id, unit=unit)
600 for k in rdata:
601 if k.startswith('ssl_key_'):
602 cns.append(k.lstrip('ssl_key_'))
603
604 return sorted(list(set(cns)))
605
606 def get_network_addresses(self):
607 """For each network configured, return corresponding address and vip
608 (if available).
609
610 Returns a list of tuples of the form:
611
612 [(address_in_net_a, vip_in_net_a),
613 (address_in_net_b, vip_in_net_b),
614 ...]
615
616 or, if no vip(s) available:
617
618 [(address_in_net_a, address_in_net_a),
619 (address_in_net_b, address_in_net_b),
620 ...]
621 """
622 addresses = []
623 if config('vip'):
624 vips = config('vip').split()
625 else:
626 vips = []
627
628 for net_type in ['os-internal-network', 'os-admin-network',
629 'os-public-network']:
630 addr = get_address_in_network(config(net_type),
631 unit_get('private-address'))
632 if len(vips) > 1 and is_clustered():
633 if not config(net_type):
634 log("Multiple networks configured but net_type "
635 "is None (%s)." % net_type, level=WARNING)
636 continue
637
638 for vip in vips:
639 if is_address_in_network(config(net_type), vip):
640 addresses.append((addr, vip))
641 break
642
643 elif is_clustered() and config('vip'):
644 addresses.append((addr, config('vip')))
645 else:
646 addresses.append((addr, addr))
647
648 return sorted(addresses)
649
650 def __call__(self):
651 if isinstance(self.external_ports, six.string_types):
652 self.external_ports = [self.external_ports]
653
654 if not self.external_ports or not https():
655 return {}
656
657 self.configure_ca()
658 self.enable_modules()
659
660 ctxt = {'namespace': self.service_namespace,
661 'endpoints': [],
662 'ext_ports': []}
663
664 for cn in self.canonical_names():
665 self.configure_cert(cn)
666
667 addresses = self.get_network_addresses()
668 for address, endpoint in sorted(set(addresses)):
669 for api_port in self.external_ports:
670 ext_port = determine_apache_port(api_port,
671 singlenode_mode=True)
672 int_port = determine_api_port(api_port, singlenode_mode=True)
673 portmap = (address, endpoint, int(ext_port), int(int_port))
674 ctxt['endpoints'].append(portmap)
675 ctxt['ext_ports'].append(int(ext_port))
676
677 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
678 return ctxt
679
680
681class NeutronContext(OSContextGenerator):
682 interfaces = []
683
684 @property
685 def plugin(self):
686 return None
687
688 @property
689 def network_manager(self):
690 return None
691
692 @property
693 def packages(self):
694 return neutron_plugin_attribute(self.plugin, 'packages',
695 self.network_manager)
696
697 @property
698 def neutron_security_groups(self):
699 return None
700
701 def _ensure_packages(self):
702 for pkgs in self.packages:
703 ensure_packages(pkgs)
704
705 def _save_flag_file(self):
706 if self.network_manager == 'quantum':
707 _file = '/etc/nova/quantum_plugin.conf'
708 else:
709 _file = '/etc/nova/neutron_plugin.conf'
710
711 with open(_file, 'wb') as out:
712 out.write(self.plugin + '\n')
713
714 def ovs_ctxt(self):
715 driver = neutron_plugin_attribute(self.plugin, 'driver',
716 self.network_manager)
717 config = neutron_plugin_attribute(self.plugin, 'config',
718 self.network_manager)
719 ovs_ctxt = {'core_plugin': driver,
720 'neutron_plugin': 'ovs',
721 'neutron_security_groups': self.neutron_security_groups,
722 'local_ip': unit_private_ip(),
723 'config': config}
724
725 return ovs_ctxt
726
727 def nvp_ctxt(self):
728 driver = neutron_plugin_attribute(self.plugin, 'driver',
729 self.network_manager)
730 config = neutron_plugin_attribute(self.plugin, 'config',
731 self.network_manager)
732 nvp_ctxt = {'core_plugin': driver,
733 'neutron_plugin': 'nvp',
734 'neutron_security_groups': self.neutron_security_groups,
735 'local_ip': unit_private_ip(),
736 'config': config}
737
738 return nvp_ctxt
739
740 def n1kv_ctxt(self):
741 driver = neutron_plugin_attribute(self.plugin, 'driver',
742 self.network_manager)
743 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
744 self.network_manager)
745 n1kv_user_config_flags = config('n1kv-config-flags')
746 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
747 n1kv_ctxt = {'core_plugin': driver,
748 'neutron_plugin': 'n1kv',
749 'neutron_security_groups': self.neutron_security_groups,
750 'local_ip': unit_private_ip(),
751 'config': n1kv_config,
752 'vsm_ip': config('n1kv-vsm-ip'),
753 'vsm_username': config('n1kv-vsm-username'),
754 'vsm_password': config('n1kv-vsm-password'),
755 'restrict_policy_profiles': restrict_policy_profiles}
756
757 if n1kv_user_config_flags:
758 flags = config_flags_parser(n1kv_user_config_flags)
759 n1kv_ctxt['user_config_flags'] = flags
760
761 return n1kv_ctxt
762
763 def calico_ctxt(self):
764 driver = neutron_plugin_attribute(self.plugin, 'driver',
765 self.network_manager)
766 config = neutron_plugin_attribute(self.plugin, 'config',
767 self.network_manager)
768 calico_ctxt = {'core_plugin': driver,
769 'neutron_plugin': 'Calico',
770 'neutron_security_groups': self.neutron_security_groups,
771 'local_ip': unit_private_ip(),
772 'config': config}
773
774 return calico_ctxt
775
776 def neutron_ctxt(self):
777 if https():
778 proto = 'https'
779 else:
780 proto = 'http'
781
782 if is_clustered():
783 host = config('vip')
784 else:
785 host = unit_get('private-address')
786
787 ctxt = {'network_manager': self.network_manager,
788 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
789 return ctxt
790
791 def __call__(self):
792 self._ensure_packages()
793
794 if self.network_manager not in ['quantum', 'neutron']:
795 return {}
796
797 if not self.plugin:
798 return {}
799
800 ctxt = self.neutron_ctxt()
801
802 if self.plugin == 'ovs':
803 ctxt.update(self.ovs_ctxt())
804 elif self.plugin in ['nvp', 'nsx']:
805 ctxt.update(self.nvp_ctxt())
806 elif self.plugin == 'n1kv':
807 ctxt.update(self.n1kv_ctxt())
808 elif self.plugin == 'Calico':
809 ctxt.update(self.calico_ctxt())
810
811 alchemy_flags = config('neutron-alchemy-flags')
812 if alchemy_flags:
813 flags = config_flags_parser(alchemy_flags)
814 ctxt['neutron_alchemy_flags'] = flags
815
816 self._save_flag_file()
817 return ctxt
818
819
820class OSConfigFlagContext(OSContextGenerator):
821 """Provides support for user-defined config flags.
822
823 Users can define a comma-seperated list of key=value pairs
824 in the charm configuration and apply them at any point in
825 any file by using a template flag.
826
827 Sometimes users might want config flags inserted within a
828 specific section so this class allows users to specify the
829 template flag name, allowing for multiple template flags
830 (sections) within the same context.
831
832 NOTE: the value of config-flags may be a comma-separated list of
833 key=value pairs and some Openstack config files support
834 comma-separated lists as values.
835 """
836
837 def __init__(self, charm_flag='config-flags',
838 template_flag='user_config_flags'):
839 """
840 :param charm_flag: config flags in charm configuration.
841 :param template_flag: insert point for user-defined flags in template
842 file.
843 """
844 super(OSConfigFlagContext, self).__init__()
845 self._charm_flag = charm_flag
846 self._template_flag = template_flag
847
848 def __call__(self):
849 config_flags = config(self._charm_flag)
850 if not config_flags:
851 return {}
852
853 return {self._template_flag:
854 config_flags_parser(config_flags)}
855
856
857class SubordinateConfigContext(OSContextGenerator):
858
859 """
860 Responsible for inspecting relations to subordinates that
861 may be exporting required config via a json blob.
862
863 The subordinate interface allows subordinates to export their
864 configuration requirements to the principle for multiple config
865 files and multiple serivces. Ie, a subordinate that has interfaces
866 to both glance and nova may export to following yaml blob as json::
867
868 glance:
869 /etc/glance/glance-api.conf:
870 sections:
871 DEFAULT:
872 - [key1, value1]
873 /etc/glance/glance-registry.conf:
874 MYSECTION:
875 - [key2, value2]
876 nova:
877 /etc/nova/nova.conf:
878 sections:
879 DEFAULT:
880 - [key3, value3]
881
882
883 It is then up to the principle charms to subscribe this context to
884 the service+config file it is interestd in. Configuration data will
885 be available in the template context, in glance's case, as::
886
887 ctxt = {
888 ... other context ...
889 'subordinate_config': {
890 'DEFAULT': {
891 'key1': 'value1',
892 },
893 'MYSECTION': {
894 'key2': 'value2',
895 },
896 }
897 }
898 """
899
900 def __init__(self, service, config_file, interface):
901 """
902 :param service : Service name key to query in any subordinate
903 data found
904 :param config_file : Service's config file to query sections
905 :param interface : Subordinate interface to inspect
906 """
907 self.service = service
908 self.config_file = config_file
909 self.interface = interface
910
911 def __call__(self):
912 ctxt = {'sections': {}}
913 for rid in relation_ids(self.interface):
914 for unit in related_units(rid):
915 sub_config = relation_get('subordinate_configuration',
916 rid=rid, unit=unit)
917 if sub_config and sub_config != '':
918 try:
919 sub_config = json.loads(sub_config)
920 except:
921 log('Could not parse JSON from subordinate_config '
922 'setting from %s' % rid, level=ERROR)
923 continue
924
925 if self.service not in sub_config:
926 log('Found subordinate_config on %s but it contained'
927 'nothing for %s service' % (rid, self.service),
928 level=INFO)
929 continue
930
931 sub_config = sub_config[self.service]
932 if self.config_file not in sub_config:
933 log('Found subordinate_config on %s but it contained'
934 'nothing for %s' % (rid, self.config_file),
935 level=INFO)
936 continue
937
938 sub_config = sub_config[self.config_file]
939 for k, v in six.iteritems(sub_config):
940 if k == 'sections':
941 for section, config_dict in six.iteritems(v):
942 log("adding section '%s'" % (section),
943 level=DEBUG)
944 ctxt[k][section] = config_dict
945 else:
946 ctxt[k] = v
947
948 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
949 return ctxt
950
951
952class LogLevelContext(OSContextGenerator):
953
954 def __call__(self):
955 ctxt = {}
956 ctxt['debug'] = \
957 False if config('debug') is None else config('debug')
958 ctxt['verbose'] = \
959 False if config('verbose') is None else config('verbose')
960
961 return ctxt
962
963
964class SyslogContext(OSContextGenerator):
965
966 def __call__(self):
967 ctxt = {'use_syslog': config('use-syslog')}
968 return ctxt
969
970
971class BindHostContext(OSContextGenerator):
972
973 def __call__(self):
974 if config('prefer-ipv6'):
975 return {'bind_host': '::'}
976 else:
977 return {'bind_host': '0.0.0.0'}
978
979
980class WorkerConfigContext(OSContextGenerator):
981
982 @property
983 def num_cpus(self):
984 try:
985 from psutil import NUM_CPUS
986 except ImportError:
987 apt_install('python-psutil', fatal=True)
988 from psutil import NUM_CPUS
989
990 return NUM_CPUS
991
992 def __call__(self):
993 multiplier = config('worker-multiplier') or 0
994 ctxt = {"workers": self.num_cpus * multiplier}
995 return ctxt
996
997
998class ZeroMQContext(OSContextGenerator):
999 interfaces = ['zeromq-configuration']
1000
1001 def __call__(self):
1002 ctxt = {}
1003 if is_relation_made('zeromq-configuration', 'host'):
1004 for rid in relation_ids('zeromq-configuration'):
1005 for unit in related_units(rid):
1006 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1007 ctxt['zmq_host'] = relation_get('host', unit, rid)
1008
1009 return ctxt
1010
1011
1012class NotificationDriverContext(OSContextGenerator):
1013
1014 def __init__(self, zmq_relation='zeromq-configuration',
1015 amqp_relation='amqp'):
1016 """
1017 :param zmq_relation: Name of Zeromq relation to check
1018 """
1019 self.zmq_relation = zmq_relation
1020 self.amqp_relation = amqp_relation
1021
1022 def __call__(self):
1023 ctxt = {'notifications': 'False'}
1024 if is_relation_made(self.amqp_relation):
1025 ctxt['notifications'] = "True"
1026
1027 return ctxt
1028
1029
1030class SysctlContext(OSContextGenerator):
1031 """This context check if the 'sysctl' option exists on configuration
1032 then creates a file with the loaded contents"""
1033 def __call__(self):
1034 sysctl_dict = config('sysctl')
1035 if sysctl_dict:
1036 sysctl_create(sysctl_dict,
1037 '/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
1038 return {'sysctl': sysctl_dict}
01039
=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,93 @@
1from charmhelpers.core.hookenv import (
2 config,
3 unit_get,
4)
5from charmhelpers.contrib.network.ip import (
6 get_address_in_network,
7 is_address_in_network,
8 is_ipv6,
9 get_ipv6_addr,
10)
11from charmhelpers.contrib.hahelpers.cluster import is_clustered
12
13PUBLIC = 'public'
14INTERNAL = 'int'
15ADMIN = 'admin'
16
17ADDRESS_MAP = {
18 PUBLIC: {
19 'config': 'os-public-network',
20 'fallback': 'public-address'
21 },
22 INTERNAL: {
23 'config': 'os-internal-network',
24 'fallback': 'private-address'
25 },
26 ADMIN: {
27 'config': 'os-admin-network',
28 'fallback': 'private-address'
29 }
30}
31
32
33def canonical_url(configs, endpoint_type=PUBLIC):
34 """Returns the correct HTTP URL to this host given the state of HTTPS
35 configuration, hacluster and charm configuration.
36
37 :param configs: OSTemplateRenderer config templating object to inspect
38 for a complete https context.
39 :param endpoint_type: str endpoint type to resolve.
40 :param returns: str base URL for services on the current service unit.
41 """
42 scheme = 'http'
43 if 'https' in configs.complete_contexts():
44 scheme = 'https'
45 address = resolve_address(endpoint_type)
46 if is_ipv6(address):
47 address = "[{}]".format(address)
48 return '%s://%s' % (scheme, address)
49
50
51def resolve_address(endpoint_type=PUBLIC):
52 """Return unit address depending on net config.
53
54 If unit is clustered with vip(s) and has net splits defined, return vip on
55 correct network. If clustered with no nets defined, return primary vip.
56
57 If not clustered, return unit address ensuring address is on configured net
58 split if one is configured.
59
60 :param endpoint_type: Network endpoing type
61 """
62 resolved_address = None
63 vips = config('vip')
64 if vips:
65 vips = vips.split()
66
67 net_type = ADDRESS_MAP[endpoint_type]['config']
68 net_addr = config(net_type)
69 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
70 clustered = is_clustered()
71 if clustered:
72 if not net_addr:
73 # If no net-splits defined, we expect a single vip
74 resolved_address = vips[0]
75 else:
76 for vip in vips:
77 if is_address_in_network(net_addr, vip):
78 resolved_address = vip
79 break
80 else:
81 if config('prefer-ipv6'):
82 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
83 else:
84 fallback_addr = unit_get(net_fallback)
85
86 resolved_address = get_address_in_network(net_addr, fallback_addr)
87
88 if resolved_address is None:
89 raise ValueError("Unable to resolve a suitable IP address based on "
90 "charm state and configuration. (net_type=%s, "
91 "clustered=%s)" % (net_type, clustered))
92
93 return resolved_address
094
=== added file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,223 @@
1# Various utilies for dealing with Neutron and the renaming from Quantum.
2
3from subprocess import check_output
4
5from charmhelpers.core.hookenv import (
6 config,
7 log,
8 ERROR,
9)
10
11from charmhelpers.contrib.openstack.utils import os_release
12
13
14def headers_package():
15 """Ensures correct linux-headers for running kernel are installed,
16 for building DKMS package"""
17 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
18 return 'linux-headers-%s' % kver
19
20QUANTUM_CONF_DIR = '/etc/quantum'
21
22
23def kernel_version():
24 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
25 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
26 kver = kver.split('.')
27 return (int(kver[0]), int(kver[1]))
28
29
30def determine_dkms_package():
31 """ Determine which DKMS package should be used based on kernel version """
32 # NOTE: 3.13 kernels have support for GRE and VXLAN native
33 if kernel_version() >= (3, 13):
34 return []
35 else:
36 return ['openvswitch-datapath-dkms']
37
38
39# legacy
40
41
42def quantum_plugins():
43 from charmhelpers.contrib.openstack import context
44 return {
45 'ovs': {
46 'config': '/etc/quantum/plugins/openvswitch/'
47 'ovs_quantum_plugin.ini',
48 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
49 'OVSQuantumPluginV2',
50 'contexts': [
51 context.SharedDBContext(user=config('neutron-database-user'),
52 database=config('neutron-database'),
53 relation_prefix='neutron',
54 ssl_dir=QUANTUM_CONF_DIR)],
55 'services': ['quantum-plugin-openvswitch-agent'],
56 'packages': [[headers_package()] + determine_dkms_package(),
57 ['quantum-plugin-openvswitch-agent']],
58 'server_packages': ['quantum-server',
59 'quantum-plugin-openvswitch'],
60 'server_services': ['quantum-server']
61 },
62 'nvp': {
63 'config': '/etc/quantum/plugins/nicira/nvp.ini',
64 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
65 'QuantumPlugin.NvpPluginV2',
66 'contexts': [
67 context.SharedDBContext(user=config('neutron-database-user'),
68 database=config('neutron-database'),
69 relation_prefix='neutron',
70 ssl_dir=QUANTUM_CONF_DIR)],
71 'services': [],
72 'packages': [],
73 'server_packages': ['quantum-server',
74 'quantum-plugin-nicira'],
75 'server_services': ['quantum-server']
76 }
77 }
78
79NEUTRON_CONF_DIR = '/etc/neutron'
80
81
82def neutron_plugins():
83 from charmhelpers.contrib.openstack import context
84 release = os_release('nova-common')
85 plugins = {
86 'ovs': {
87 'config': '/etc/neutron/plugins/openvswitch/'
88 'ovs_neutron_plugin.ini',
89 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
90 'OVSNeutronPluginV2',
91 'contexts': [
92 context.SharedDBContext(user=config('neutron-database-user'),
93 database=config('neutron-database'),
94 relation_prefix='neutron',
95 ssl_dir=NEUTRON_CONF_DIR)],
96 'services': ['neutron-plugin-openvswitch-agent'],
97 'packages': [[headers_package()] + determine_dkms_package(),
98 ['neutron-plugin-openvswitch-agent']],
99 'server_packages': ['neutron-server',
100 'neutron-plugin-openvswitch'],
101 'server_services': ['neutron-server']
102 },
103 'nvp': {
104 'config': '/etc/neutron/plugins/nicira/nvp.ini',
105 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
106 'NeutronPlugin.NvpPluginV2',
107 'contexts': [
108 context.SharedDBContext(user=config('neutron-database-user'),
109 database=config('neutron-database'),
110 relation_prefix='neutron',
111 ssl_dir=NEUTRON_CONF_DIR)],
112 'services': [],
113 'packages': [],
114 'server_packages': ['neutron-server',
115 'neutron-plugin-nicira'],
116 'server_services': ['neutron-server']
117 },
118 'nsx': {
119 'config': '/etc/neutron/plugins/vmware/nsx.ini',
120 'driver': 'vmware',
121 'contexts': [
122 context.SharedDBContext(user=config('neutron-database-user'),
123 database=config('neutron-database'),
124 relation_prefix='neutron',
125 ssl_dir=NEUTRON_CONF_DIR)],
126 'services': [],
127 'packages': [],
128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [[headers_package()] + determine_dkms_package(),
142 ['neutron-plugin-cisco']],
143 'server_packages': ['neutron-server',
144 'neutron-plugin-cisco'],
145 'server_services': ['neutron-server']
146 },
147 'Calico': {
148 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
149 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
150 'contexts': [
151 context.SharedDBContext(user=config('neutron-database-user'),
152 database=config('neutron-database'),
153 relation_prefix='neutron',
154 ssl_dir=NEUTRON_CONF_DIR)],
155 'services': ['calico-felix',
156 'bird',
157 'neutron-dhcp-agent',
158 'nova-api-metadata'],
159 'packages': [[headers_package()] + determine_dkms_package(),
160 ['calico-compute',
161 'bird',
162 'neutron-dhcp-agent',
163 'nova-api-metadata']],
164 'server_packages': ['neutron-server', 'calico-control'],
165 'server_services': ['neutron-server']
166 }
167 }
168 if release >= 'icehouse':
169 # NOTE: patch in ml2 plugin for icehouse onwards
170 plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
171 plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
172 plugins['ovs']['server_packages'] = ['neutron-server',
173 'neutron-plugin-ml2']
174 # NOTE: patch in vmware renames nvp->nsx for icehouse onwards
175 plugins['nvp'] = plugins['nsx']
176 return plugins
177
178
179def neutron_plugin_attribute(plugin, attr, net_manager=None):
180 manager = net_manager or network_manager()
181 if manager == 'quantum':
182 plugins = quantum_plugins()
183 elif manager == 'neutron':
184 plugins = neutron_plugins()
185 else:
186 log("Network manager '%s' does not support plugins." % (manager),
187 level=ERROR)
188 raise Exception
189
190 try:
191 _plugin = plugins[plugin]
192 except KeyError:
193 log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
194 raise Exception
195
196 try:
197 return _plugin[attr]
198 except KeyError:
199 return None
200
201
202def network_manager():
203 '''
204 Deals with the renaming of Quantum to Neutron in H and any situations
205 that require compatability (eg, deploying H with network-manager=quantum,
206 upgrading from G).
207 '''
208 release = os_release('nova-common')
209 manager = config('network-manager').lower()
210
211 if manager not in ['quantum', 'neutron']:
212 return manager
213
214 if release in ['essex']:
215 # E does not support neutron
216 log('Neutron networking not supported in Essex.', level=ERROR)
217 raise Exception
218 elif release in ['folsom', 'grizzly']:
219 # neutron is named quantum in F and G
220 return 'quantum'
221 else:
222 # ensure accurate naming for all releases post-H
223 return 'neutron'
0224
=== added directory 'hooks/charmhelpers/contrib/openstack/templates'
=== added file 'hooks/charmhelpers/contrib/openstack/templates/__init__.py'
--- hooks/charmhelpers/contrib/openstack/templates/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/__init__.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,2 @@
1# dummy __init__.py to fool syncer into thinking this is a syncable python
2# module
03
=== added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf'
--- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2015-01-15 16:18:44 +0000
@@ -0,0 +1,15 @@
1###############################################################################
2# [ WARNING ]
3# cinder configuration file maintained by Juju
4# local changes may be overwritten.
5###############################################################################
6[global]
7{% if auth -%}
8 auth_supported = {{ auth }}
9 keyring = /etc/ceph/$cluster.$name.keyring
10 mon host = {{ mon_hosts }}
11{% endif -%}
12 log to syslog = {{ use_syslog }}
13 err to syslog = {{ use_syslog }}
14 clog to syslog = {{ use_syslog }}
15
016
=== added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2015-01-15 16:18:44 +0000
@@ -0,0 +1,58 @@
1global
2 log {{ local_host }} local0
3 log {{ local_host }} local1 notice
4 maxconn 20000
5 user haproxy
6 group haproxy
7 spread-checks 0
8
9defaults
10 log global
11 mode tcp
12 option tcplog
13 option dontlognull
14 retries 3
15 timeout queue 1000
16 timeout connect 1000
17{% if haproxy_client_timeout -%}
18 timeout client {{ haproxy_client_timeout }}
19{% else -%}
20 timeout client 30000
21{% endif -%}
22
23{% if haproxy_server_timeout -%}
24 timeout server {{ haproxy_server_timeout }}
25{% else -%}
26 timeout server 30000
27{% endif -%}
28
29listen stats {{ stat_port }}
30 mode http
31 stats enable
32 stats hide-version
33 stats realm Haproxy\ Statistics
34 stats uri /
35 stats auth admin:password
36
37{% if frontends -%}
38{% for service, ports in service_ports.items() -%}
39frontend tcp-in_{{ service }}
40 bind *:{{ ports[0] }}
41 {% if ipv6 -%}
42 bind :::{{ ports[0] }}
43 {% endif -%}
44 {% for frontend in frontends -%}
45 acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
46 use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
47 {% endfor -%}
48 default_backend {{ service }}_{{ default_backend }}
49
50{% for frontend in frontends -%}
51backend {{ service }}_{{ frontend }}
52 balance leastconn
53 {% for unit, address in frontends[frontend]['backends'].items() -%}
54 server {{ unit }} {{ address }}:{{ ports[1] }} check
55 {% endfor %}
56{% endfor -%}
57{% endfor -%}
58{% endif -%}
059
=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2015-01-15 16:18:44 +0000
@@ -0,0 +1,24 @@
1{% if endpoints -%}
2{% for ext_port in ext_ports -%}
3Listen {{ ext_port }}
4{% endfor -%}
5{% for address, endpoint, ext, int in endpoints -%}
6<VirtualHost {{ address }}:{{ ext }}>
7 ServerName {{ endpoint }}
8 SSLEngine on
9 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
10 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
11 ProxyPass / http://localhost:{{ int }}/
12 ProxyPassReverse / http://localhost:{{ int }}/
13 ProxyPreserveHost on
14</VirtualHost>
15{% endfor -%}
16<Proxy *>
17 Order deny,allow
18 Allow from all
19</Proxy>
20<Location />
21 Order allow,deny
22 Allow from all
23</Location>
24{% endif -%}
025
=== added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2015-01-15 16:18:44 +0000
@@ -0,0 +1,24 @@
1{% if endpoints -%}
2{% for ext_port in ext_ports -%}
3Listen {{ ext_port }}
4{% endfor -%}
5{% for address, endpoint, ext, int in endpoints -%}
6<VirtualHost {{ address }}:{{ ext }}>
7 ServerName {{ endpoint }}
8 SSLEngine on
9 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
10 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
11 ProxyPass / http://localhost:{{ int }}/
12 ProxyPassReverse / http://localhost:{{ int }}/
13 ProxyPreserveHost on
14</VirtualHost>
15{% endfor -%}
16<Proxy *>
17 Order deny,allow
18 Allow from all
19</Proxy>
20<Location />
21 Order allow,deny
22 Allow from all
23</Location>
24{% endif -%}
025
=== added file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,279 @@
1import os
2
3import six
4
5from charmhelpers.fetch import apt_install
6from charmhelpers.core.hookenv import (
7 log,
8 ERROR,
9 INFO
10)
11from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
12
13try:
14 from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
15except ImportError:
16 # python-jinja2 may not be installed yet, or we're running unittests.
17 FileSystemLoader = ChoiceLoader = Environment = exceptions = None
18
19
20class OSConfigException(Exception):
21 pass
22
23
24def get_loader(templates_dir, os_release):
25 """
26 Create a jinja2.ChoiceLoader containing template dirs up to
27 and including os_release. If directory template directory
28 is missing at templates_dir, it will be omitted from the loader.
29 templates_dir is added to the bottom of the search list as a base
30 loading dir.
31
32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg::
34
35 hooks/charmhelpers/contrib/openstack/templates
36
37 :param templates_dir (str): Base template directory containing release
38 sub-directories.
39 :param os_release (str): OpenStack release codename to construct template
40 loader.
41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.
44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in six.itervalues(OPENSTACK_CODENAMES)]
47
48 if not os.path.isdir(templates_dir):
49 log('Templates directory not found @ %s.' % templates_dir,
50 level=ERROR)
51 raise OSConfigException
52
53 # the bottom contains tempaltes_dir and possibly a common templates dir
54 # shipped with the helper.
55 loaders = [FileSystemLoader(templates_dir)]
56 helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
57 if os.path.isdir(helper_templates):
58 loaders.append(FileSystemLoader(helper_templates))
59
60 for rel, tmpl_dir in tmpl_dirs:
61 if os.path.isdir(tmpl_dir):
62 loaders.insert(0, FileSystemLoader(tmpl_dir))
63 if rel == os_release:
64 break
65 log('Creating choice loader with dirs: %s' %
66 [l.searchpath for l in loaders], level=INFO)
67 return ChoiceLoader(loaders)
68
69
70class OSConfigTemplate(object):
71 """
72 Associates a config file template with a list of context generators.
73 Responsible for constructing a template context based on those generators.
74 """
75 def __init__(self, config_file, contexts):
76 self.config_file = config_file
77
78 if hasattr(contexts, '__call__'):
79 self.contexts = [contexts]
80 else:
81 self.contexts = contexts
82
83 self._complete_contexts = []
84
85 def context(self):
86 ctxt = {}
87 for context in self.contexts:
88 _ctxt = context()
89 if _ctxt:
90 ctxt.update(_ctxt)
91 # track interfaces for every complete context.
92 [self._complete_contexts.append(interface)
93 for interface in context.interfaces
94 if interface not in self._complete_contexts]
95 return ctxt
96
97 def complete_contexts(self):
98 '''
99 Return a list of interfaces that have atisfied contexts.
100 '''
101 if self._complete_contexts:
102 return self._complete_contexts
103 self.context()
104 return self._complete_contexts
105
106
107class OSConfigRenderer(object):
108 """
109 This class provides a common templating system to be used by OpenStack
110 charms. It is intended to help charms share common code and templates,
111 and ease the burden of managing config templates across multiple OpenStack
112 releases.
113
114 Basic usage::
115
116 # import some common context generates from charmhelpers
117 from charmhelpers.contrib.openstack import context
118
119 # Create a renderer object for a specific OS release.
120 configs = OSConfigRenderer(templates_dir='/tmp/templates',
121 openstack_release='folsom')
122 # register some config files with context generators.
123 configs.register(config_file='/etc/nova/nova.conf',
124 contexts=[context.SharedDBContext(),
125 context.AMQPContext()])
126 configs.register(config_file='/etc/nova/api-paste.ini',
127 contexts=[context.IdentityServiceContext()])
128 configs.register(config_file='/etc/haproxy/haproxy.conf',
129 contexts=[context.HAProxyContext()])
130 # write out a single config
131 configs.write('/etc/nova/nova.conf')
132 # write out all registered configs
133 configs.write_all()
134
135 **OpenStack Releases and template loading**
136
137 When the object is instantiated, it is associated with a specific OS
138 release. This dictates how the template loader will be constructed.
139
140 The constructed loader attempts to load the template from several places
141 in the following order:
142 - from the most recent OS release-specific template dir (if one exists)
143 - the base templates_dir
144 - a template directory shipped in the charm with this helper file.
145
146 For the example above, '/tmp/templates' contains the following structure::
147
148 /tmp/templates/nova.conf
149 /tmp/templates/api-paste.ini
150 /tmp/templates/grizzly/api-paste.ini
151 /tmp/templates/havana/api-paste.ini
152
153 Since it was registered with the grizzly release, it first seraches
154 the grizzly directory for nova.conf, then the templates dir.
155
156 When writing api-paste.ini, it will find the template in the grizzly
157 directory.
158
159 If the object were created with folsom, it would fall back to the
160 base templates dir for its api-paste.ini template.
161
162 This system should help manage changes in config files through
163 openstack releases, allowing charms to fall back to the most recently
164 updated config template for a given release
165
166 The haproxy.conf, since it is not shipped in the templates dir, will
167 be loaded from the module directory's template directory, eg
168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
169 us to ship common templates (haproxy, apache) with the helpers.
170
171 **Context generators**
172
173 Context generators are used to generate template contexts during hook
174 execution. Doing so may require inspecting service relations, charm
175 config, etc. When registered, a config file is associated with a list
176 of generators. When a template is rendered and written, all context
177 generates are called in a chain to generate the context dictionary
178 passed to the jinja2 template. See context.py for more info.
179 """
180 def __init__(self, templates_dir, openstack_release):
181 if not os.path.isdir(templates_dir):
182 log('Could not locate templates dir %s' % templates_dir,
183 level=ERROR)
184 raise OSConfigException
185
186 self.templates_dir = templates_dir
187 self.openstack_release = openstack_release
188 self.templates = {}
189 self._tmpl_env = None
190
191 if None in [Environment, ChoiceLoader, FileSystemLoader]:
192 # if this code is running, the object is created pre-install hook.
193 # jinja2 shouldn't get touched until the module is reloaded on next
194 # hook execution, with proper jinja2 bits successfully imported.
195 apt_install('python-jinja2')
196
197 def register(self, config_file, contexts):
198 """
199 Register a config file with a list of context generators to be called
200 during rendering.
201 """
202 self.templates[config_file] = OSConfigTemplate(config_file=config_file,
203 contexts=contexts)
204 log('Registered config file: %s' % config_file, level=INFO)
205
206 def _get_tmpl_env(self):
207 if not self._tmpl_env:
208 loader = get_loader(self.templates_dir, self.openstack_release)
209 self._tmpl_env = Environment(loader=loader)
210
211 def _get_template(self, template):
212 self._get_tmpl_env()
213 template = self._tmpl_env.get_template(template)
214 log('Loaded template from %s' % template.filename, level=INFO)
215 return template
216
217 def render(self, config_file):
218 if config_file not in self.templates:
219 log('Config not registered: %s' % config_file, level=ERROR)
220 raise OSConfigException
221 ctxt = self.templates[config_file].context()
222
223 _tmpl = os.path.basename(config_file)
224 try:
225 template = self._get_template(_tmpl)
226 except exceptions.TemplateNotFound:
227 # if no template is found with basename, try looking for it
228 # using a munged full path, eg:
229 # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
230 _tmpl = '_'.join(config_file.split('/')[1:])
231 try:
232 template = self._get_template(_tmpl)
233 except exceptions.TemplateNotFound as e:
234 log('Could not load template from %s by %s or %s.' %
235 (self.templates_dir, os.path.basename(config_file), _tmpl),
236 level=ERROR)
237 raise e
238
239 log('Rendering from template: %s' % _tmpl, level=INFO)
240 return template.render(ctxt)
241
242 def write(self, config_file):
243 """
244 Write a single config file, raises if config file is not registered.
245 """
246 if config_file not in self.templates:
247 log('Config not registered: %s' % config_file, level=ERROR)
248 raise OSConfigException
249
250 _out = self.render(config_file)
251
252 with open(config_file, 'wb') as out:
253 out.write(_out)
254
255 log('Wrote template %s.' % config_file, level=INFO)
256
257 def write_all(self):
258 """
259 Write out all registered config files.
260 """
261 [self.write(k) for k in six.iterkeys(self.templates)]
262
263 def set_release(self, openstack_release):
264 """
265 Resets the template environment and generates a new template loader
266 based on a the new openstack release.
267 """
268 self._tmpl_env = None
269 self.openstack_release = openstack_release
270 self._get_tmpl_env()
271
272 def complete_contexts(self):
273 '''
274 Returns a list of context interfaces that yield a complete context.
275 '''
276 interfaces = []
277 [interfaces.extend(i.complete_contexts())
278 for i in six.itervalues(self.templates)]
279 return interfaces
0280
=== added file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,625 @@
1#!/usr/bin/python
2
3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict
5from functools import wraps
6
7import subprocess
8import json
9import os
10import socket
11import sys
12
13import six
14import yaml
15
16from charmhelpers.core.hookenv import (
17 config,
18 log as juju_log,
19 charm_dir,
20 INFO,
21 relation_ids,
22 relation_set
23)
24
25from charmhelpers.contrib.storage.linux.lvm import (
26 deactivate_lvm_volume_group,
27 is_lvm_physical_volume,
28 remove_lvm_physical_volume,
29)
30
31from charmhelpers.contrib.network.ip import (
32 get_ipv6_addr
33)
34
35from charmhelpers.core.host import lsb_release, mounts, umount
36from charmhelpers.fetch import apt_install, apt_cache, install_remote
37from charmhelpers.contrib.python.packages import pip_install
38from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
39from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
40
41CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
42CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
43
44DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
45 'restricted main multiverse universe')
46
47
48UBUNTU_OPENSTACK_RELEASE = OrderedDict([
49 ('oneiric', 'diablo'),
50 ('precise', 'essex'),
51 ('quantal', 'folsom'),
52 ('raring', 'grizzly'),
53 ('saucy', 'havana'),
54 ('trusty', 'icehouse'),
55 ('utopic', 'juno'),
56 ('vivid', 'kilo'),
57])
58
59
60OPENSTACK_CODENAMES = OrderedDict([
61 ('2011.2', 'diablo'),
62 ('2012.1', 'essex'),
63 ('2012.2', 'folsom'),
64 ('2013.1', 'grizzly'),
65 ('2013.2', 'havana'),
66 ('2014.1', 'icehouse'),
67 ('2014.2', 'juno'),
68 ('2015.1', 'kilo'),
69])
70
71# The ugly duckling
72SWIFT_CODENAMES = OrderedDict([
73 ('1.4.3', 'diablo'),
74 ('1.4.8', 'essex'),
75 ('1.7.4', 'folsom'),
76 ('1.8.0', 'grizzly'),
77 ('1.7.7', 'grizzly'),
78 ('1.7.6', 'grizzly'),
79 ('1.10.0', 'havana'),
80 ('1.9.1', 'havana'),
81 ('1.9.0', 'havana'),
82 ('1.13.1', 'icehouse'),
83 ('1.13.0', 'icehouse'),
84 ('1.12.0', 'icehouse'),
85 ('1.11.0', 'icehouse'),
86 ('2.0.0', 'juno'),
87 ('2.1.0', 'juno'),
88 ('2.2.0', 'juno'),
89 ('2.2.1', 'kilo'),
90])
91
92DEFAULT_LOOPBACK_SIZE = '5G'
93
94
95def error_out(msg):
96 juju_log("FATAL ERROR: %s" % msg, level='ERROR')
97 sys.exit(1)
98
99
100def get_os_codename_install_source(src):
101 '''Derive OpenStack release codename from a given installation source.'''
102 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
103 rel = ''
104 if src is None:
105 return rel
106 if src in ['distro', 'distro-proposed']:
107 try:
108 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
109 except KeyError:
110 e = 'Could not derive openstack release for '\
111 'this Ubuntu release: %s' % ubuntu_rel
112 error_out(e)
113 return rel
114
115 if src.startswith('cloud:'):
116 ca_rel = src.split(':')[1]
117 ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
118 return ca_rel
119
120 # Best guess match based on deb string provided
121 if src.startswith('deb') or src.startswith('ppa'):
122 for k, v in six.iteritems(OPENSTACK_CODENAMES):
123 if v in src:
124 return v
125
126
127def get_os_version_install_source(src):
128 codename = get_os_codename_install_source(src)
129 return get_os_version_codename(codename)
130
131
132def get_os_codename_version(vers):
133 '''Determine OpenStack codename from version number.'''
134 try:
135 return OPENSTACK_CODENAMES[vers]
136 except KeyError:
137 e = 'Could not determine OpenStack codename for version %s' % vers
138 error_out(e)
139
140
141def get_os_version_codename(codename):
142 '''Determine OpenStack version number from codename.'''
143 for k, v in six.iteritems(OPENSTACK_CODENAMES):
144 if v == codename:
145 return k
146 e = 'Could not derive OpenStack version for '\
147 'codename: %s' % codename
148 error_out(e)
149
150
151def get_os_codename_package(package, fatal=True):
152 '''Derive OpenStack release codename from an installed package.'''
153 import apt_pkg as apt
154
155 cache = apt_cache()
156
157 try:
158 pkg = cache[package]
159 except:
160 if not fatal:
161 return None
162 # the package is unknown to the current apt cache.
163 e = 'Could not determine version of package with no installation '\
164 'candidate: %s' % package
165 error_out(e)
166
167 if not pkg.current_ver:
168 if not fatal:
169 return None
170 # package is known, but no version is currently installed.
171 e = 'Could not determine version of uninstalled package: %s' % package
172 error_out(e)
173
174 vers = apt.upstream_version(pkg.current_ver.ver_str)
175
176 try:
177 if 'swift' in pkg.name:
178 swift_vers = vers[:5]
179 if swift_vers not in SWIFT_CODENAMES:
180 # Deal with 1.10.0 upward
181 swift_vers = vers[:6]
182 return SWIFT_CODENAMES[swift_vers]
183 else:
184 vers = vers[:6]
185 return OPENSTACK_CODENAMES[vers]
186 except KeyError:
187 e = 'Could not determine OpenStack codename for version %s' % vers
188 error_out(e)
189
190
191def get_os_version_package(pkg, fatal=True):
192 '''Derive OpenStack version number from an installed package.'''
193 codename = get_os_codename_package(pkg, fatal=fatal)
194
195 if not codename:
196 return None
197
198 if 'swift' in pkg:
199 vers_map = SWIFT_CODENAMES
200 else:
201 vers_map = OPENSTACK_CODENAMES
202
203 for version, cname in six.iteritems(vers_map):
204 if cname == codename:
205 return version
206 # e = "Could not determine OpenStack version for package: %s" % pkg
207 # error_out(e)
208
209
210os_rel = None
211
212
213def os_release(package, base='essex'):
214 '''
215 Returns OpenStack release codename from a cached global.
216 If the codename can not be determined from either an installed package or
217 the installation source, the earliest release supported by the charm should
218 be returned.
219 '''
220 global os_rel
221 if os_rel:
222 return os_rel
223 os_rel = (get_os_codename_package(package, fatal=False) or
224 get_os_codename_install_source(config('openstack-origin')) or
225 base)
226 return os_rel
227
228
229def import_key(keyid):
230 cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
231 "--recv-keys %s" % keyid
232 try:
233 subprocess.check_call(cmd.split(' '))
234 except subprocess.CalledProcessError:
235 error_out("Error importing repo key %s" % keyid)
236
237
238def configure_installation_source(rel):
239 '''Configure apt installation source.'''
240 if rel == 'distro':
241 return
242 elif rel == 'distro-proposed':
243 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
244 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
245 f.write(DISTRO_PROPOSED % ubuntu_rel)
246 elif rel[:4] == "ppa:":
247 src = rel
248 subprocess.check_call(["add-apt-repository", "-y", src])
249 elif rel[:3] == "deb":
250 l = len(rel.split('|'))
251 if l == 2:
252 src, key = rel.split('|')
253 juju_log("Importing PPA key from keyserver for %s" % src)
254 import_key(key)
255 elif l == 1:
256 src = rel
257 with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
258 f.write(src)
259 elif rel[:6] == 'cloud:':
260 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
261 rel = rel.split(':')[1]
262 u_rel = rel.split('-')[0]
263 ca_rel = rel.split('-')[1]
264
265 if u_rel != ubuntu_rel:
266 e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
267 'version (%s)' % (ca_rel, ubuntu_rel)
268 error_out(e)
269
270 if 'staging' in ca_rel:
271 # staging is just a regular PPA.
272 os_rel = ca_rel.split('/')[0]
273 ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
274 cmd = 'add-apt-repository -y %s' % ppa
275 subprocess.check_call(cmd.split(' '))
276 return
277
278 # map charm config options to actual archive pockets.
279 pockets = {
280 'folsom': 'precise-updates/folsom',
281 'folsom/updates': 'precise-updates/folsom',
282 'folsom/proposed': 'precise-proposed/folsom',
283 'grizzly': 'precise-updates/grizzly',
284 'grizzly/updates': 'precise-updates/grizzly',
285 'grizzly/proposed': 'precise-proposed/grizzly',
286 'havana': 'precise-updates/havana',
287 'havana/updates': 'precise-updates/havana',
288 'havana/proposed': 'precise-proposed/havana',
289 'icehouse': 'precise-updates/icehouse',
290 'icehouse/updates': 'precise-updates/icehouse',
291 'icehouse/proposed': 'precise-proposed/icehouse',
292 'juno': 'trusty-updates/juno',
293 'juno/updates': 'trusty-updates/juno',
294 'juno/proposed': 'trusty-proposed/juno',
295 'kilo': 'trusty-updates/kilo',
296 'kilo/updates': 'trusty-updates/kilo',
297 'kilo/proposed': 'trusty-proposed/kilo',
298 }
299
300 try:
301 pocket = pockets[ca_rel]
302 except KeyError:
303 e = 'Invalid Cloud Archive release specified: %s' % rel
304 error_out(e)
305
306 src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
307 apt_install('ubuntu-cloud-keyring', fatal=True)
308
309 with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
310 f.write(src)
311 else:
312 error_out("Invalid openstack-release specified: %s" % rel)
313
314
315def save_script_rc(script_path="scripts/scriptrc", **env_vars):
316 """
317 Write an rc file in the charm-delivered directory containing
318 exported environment variables provided by env_vars. Any charm scripts run
319 outside the juju hook environment can source this scriptrc to obtain
320 updated config information necessary to perform health checks or
321 service changes.
322 """
323 juju_rc_path = "%s/%s" % (charm_dir(), script_path)
324 if not os.path.exists(os.path.dirname(juju_rc_path)):
325 os.mkdir(os.path.dirname(juju_rc_path))
326 with open(juju_rc_path, 'wb') as rc_script:
327 rc_script.write(
328 "#!/bin/bash\n")
329 [rc_script.write('export %s=%s\n' % (u, p))
330 for u, p in six.iteritems(env_vars) if u != "script_path"]
331
332
333def openstack_upgrade_available(package):
334 """
335 Determines if an OpenStack upgrade is available from installation
336 source, based on version of installed package.
337
338 :param package: str: Name of installed package.
339
340 :returns: bool: : Returns True if configured installation source offers
341 a newer version of package.
342
343 """
344
345 import apt_pkg as apt
346 src = config('openstack-origin')
347 cur_vers = get_os_version_package(package)
348 available_vers = get_os_version_install_source(src)
349 apt.init()
350 return apt.version_compare(available_vers, cur_vers) == 1
351
352
353def ensure_block_device(block_device):
354 '''
355 Confirm block_device, create as loopback if necessary.
356
357 :param block_device: str: Full path of block device to ensure.
358
359 :returns: str: Full path of ensured block device.
360 '''
361 _none = ['None', 'none', None]
362 if (block_device in _none):
363 error_out('prepare_storage(): Missing required input: block_device=%s.'
364 % block_device)
365
366 if block_device.startswith('/dev/'):
367 bdev = block_device
368 elif block_device.startswith('/'):
369 _bd = block_device.split('|')
370 if len(_bd) == 2:
371 bdev, size = _bd
372 else:
373 bdev = block_device
374 size = DEFAULT_LOOPBACK_SIZE
375 bdev = ensure_loopback_device(bdev, size)
376 else:
377 bdev = '/dev/%s' % block_device
378
379 if not is_block_device(bdev):
380 error_out('Failed to locate valid block device at %s' % bdev)
381
382 return bdev
383
384
385def clean_storage(block_device):
386 '''
387 Ensures a block device is clean. That is:
388 - unmounted
389 - any lvm volume groups are deactivated
390 - any lvm physical device signatures removed
391 - partition table wiped
392
393 :param block_device: str: Full path to block device to clean.
394 '''
395 for mp, d in mounts():
396 if d == block_device:
397 juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
398 (d, mp), level=INFO)
399 umount(mp, persist=True)
400
401 if is_lvm_physical_volume(block_device):
402 deactivate_lvm_volume_group(block_device)
403 remove_lvm_physical_volume(block_device)
404 else:
405 zap_disk(block_device)
406
407
408def is_ip(address):
409 """
410 Returns True if address is a valid IP address.
411 """
412 try:
413 # Test to see if already an IPv4 address
414 socket.inet_aton(address)
415 return True
416 except socket.error:
417 return False
418
419
420def ns_query(address):
421 try:
422 import dns.resolver
423 except ImportError:
424 apt_install('python-dnspython')
425 import dns.resolver
426
427 if isinstance(address, dns.name.Name):
428 rtype = 'PTR'
429 elif isinstance(address, six.string_types):
430 rtype = 'A'
431 else:
432 return None
433
434 answers = dns.resolver.query(address, rtype)
435 if answers:
436 return str(answers[0])
437 return None
438
439
440def get_host_ip(hostname):
441 """
442 Resolves the IP for a given hostname, or returns
443 the input if it is already an IP.
444 """
445 if is_ip(hostname):
446 return hostname
447
448 return ns_query(hostname)
449
450
451def get_hostname(address, fqdn=True):
452 """
453 Resolves hostname for given IP, or returns the input
454 if it is already a hostname.
455 """
456 if is_ip(address):
457 try:
458 import dns.reversename
459 except ImportError:
460 apt_install('python-dnspython')
461 import dns.reversename
462
463 rev = dns.reversename.from_address(address)
464 result = ns_query(rev)
465 if not result:
466 return None
467 else:
468 result = address
469
470 if fqdn:
471 # strip trailing .
472 if result.endswith('.'):
473 return result[:-1]
474 else:
475 return result
476 else:
477 return result.split('.')[0]
478
479
480def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
481 mm_map = {}
482 if os.path.isfile(mm_file):
483 with open(mm_file, 'r') as f:
484 mm_map = json.load(f)
485 return mm_map
486
487
488def sync_db_with_multi_ipv6_addresses(database, database_user,
489 relation_prefix=None):
490 hosts = get_ipv6_addr(dynamic_only=False)
491
492 kwargs = {'database': database,
493 'username': database_user,
494 'hostname': json.dumps(hosts)}
495
496 if relation_prefix:
497 for key in list(kwargs.keys()):
498 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
499 del kwargs[key]
500
501 for rid in relation_ids('shared-db'):
502 relation_set(relation_id=rid, **kwargs)
503
504
505def os_requires_version(ostack_release, pkg):
506 """
507 Decorator for hook to specify minimum supported release
508 """
509 def wrap(f):
510 @wraps(f)
511 def wrapped_f(*args):
512 if os_release(pkg) < ostack_release:
513 raise Exception("This hook is not supported on releases"
514 " before %s" % ostack_release)
515 f(*args)
516 return wrapped_f
517 return wrap
518
519
520def git_install_requested():
521 """Returns true if openstack-origin-git is specified."""
522 return config('openstack-origin-git') != "None"
523
524
525requirements_dir = None
526
527
528def git_clone_and_install(file_name, core_project):
529 """Clone/install all OpenStack repos specified in yaml config file."""
530 global requirements_dir
531
532 if file_name == "None":
533 return
534
535 yaml_file = os.path.join(charm_dir(), file_name)
536
537 # clone/install the requirements project first
538 installed = _git_clone_and_install_subset(yaml_file,
539 whitelist=['requirements'])
540 if 'requirements' not in installed:
541 error_out('requirements git repository must be specified')
542
543 # clone/install all other projects except requirements and the core project
544 blacklist = ['requirements', core_project]
545 _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
546 update_requirements=True)
547
548 # clone/install the core project
549 whitelist = [core_project]
550 installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
551 update_requirements=True)
552 if core_project not in installed:
553 error_out('{} git repository must be specified'.format(core_project))
554
555
556def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
557 update_requirements=False):
558 """Clone/install subset of OpenStack repos specified in yaml config file."""
559 global requirements_dir
560 installed = []
561
562 with open(yaml_file, 'r') as fd:
563 projects = yaml.load(fd)
564 for proj, val in projects.items():
565 # The project subset is chosen based on the following 3 rules:
566 # 1) If project is in blacklist, we don't clone/install it, period.
567 # 2) If whitelist is empty, we clone/install everything else.
568 # 3) If whitelist is not empty, we clone/install everything in the
569 # whitelist.
570 if proj in blacklist:
571 continue
572 if whitelist and proj not in whitelist:
573 continue
574 repo = val['repository']
575 branch = val['branch']
576 repo_dir = _git_clone_and_install_single(repo, branch,
577 update_requirements)
578 if proj == 'requirements':
579 requirements_dir = repo_dir
580 installed.append(proj)
581 return installed
582
583
584def _git_clone_and_install_single(repo, branch, update_requirements=False):
585 """Clone and install a single git repository."""
586 dest_parent_dir = "/mnt/openstack-git/"
587 dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
588
589 if not os.path.exists(dest_parent_dir):
590 juju_log('Host dir not mounted at {}. '
591 'Creating directory there instead.'.format(dest_parent_dir))
592 os.mkdir(dest_parent_dir)
593
594 if not os.path.exists(dest_dir):
595 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
596 repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
597 else:
598 repo_dir = dest_dir
599
600 if update_requirements:
601 if not requirements_dir:
602 error_out('requirements repo must be cloned before '
603 'updating from global requirements.')
604 _git_update_requirements(repo_dir, requirements_dir)
605
606 juju_log('Installing git repo from dir: {}'.format(repo_dir))
607 pip_install(repo_dir)
608
609 return repo_dir
610
611
612def _git_update_requirements(package_dir, reqs_dir):
613 """Update from global requirements.
614
615 Update an OpenStack git directory's requirements.txt and
616 test-requirements.txt from global-requirements.txt."""
617 orig_dir = os.getcwd()
618 os.chdir(reqs_dir)
619 cmd = "python update.py {}".format(package_dir)
620 try:
621 subprocess.check_call(cmd.split(' '))
622 except subprocess.CalledProcessError:
623 package = os.path.basename(package_dir)
624 error_out("Error updating {} from global-requirements.txt".format(package))
625 os.chdir(orig_dir)
0626
=== added directory 'hooks/charmhelpers/contrib/python'
=== added file 'hooks/charmhelpers/contrib/python/__init__.py'
=== added file 'hooks/charmhelpers/contrib/python/packages.py'
--- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/python/packages.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,77 @@
1#!/usr/bin/env python
2# coding: utf-8
3
4__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
5
6from charmhelpers.fetch import apt_install, apt_update
7from charmhelpers.core.hookenv import log
8
9try:
10 from pip import main as pip_execute
11except ImportError:
12 apt_update()
13 apt_install('python-pip')
14 from pip import main as pip_execute
15
16
17def parse_options(given, available):
18 """Given a set of options, check if available"""
19 for key, value in sorted(given.items()):
20 if key in available:
21 yield "--{0}={1}".format(key, value)
22
23
24def pip_install_requirements(requirements, **options):
25 """Install a requirements file """
26 command = ["install"]
27
28 available_options = ('proxy', 'src', 'log', )
29 for option in parse_options(options, available_options):
30 command.append(option)
31
32 command.append("-r {0}".format(requirements))
33 log("Installing from file: {} with options: {}".format(requirements,
34 command))
35 pip_execute(command)
36
37
38def pip_install(package, fatal=False, **options):
39 """Install a python package"""
40 command = ["install"]
41
42 available_options = ('proxy', 'src', 'log', "index-url", )
43 for option in parse_options(options, available_options):
44 command.append(option)
45
46 if isinstance(package, list):
47 command.extend(package)
48 else:
49 command.append(package)
50
51 log("Installing {} package with options: {}".format(package,
52 command))
53 pip_execute(command)
54
55
56def pip_uninstall(package, **options):
57 """Uninstall a python package"""
58 command = ["uninstall", "-q", "-y"]
59
60 available_options = ('proxy', 'log', )
61 for option in parse_options(options, available_options):
62 command.append(option)
63
64 if isinstance(package, list):
65 command.extend(package)
66 else:
67 command.append(package)
68
69 log("Uninstalling {} package with options: {}".format(package,
70 command))
71 pip_execute(command)
72
73
74def pip_list():
75 """Returns the list of current python installed packages
76 """
77 return pip_execute(["list"])
078
=== added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,428 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import os
12import shutil
13import json
14import time
15
16from subprocess import (
17 check_call,
18 check_output,
19 CalledProcessError,
20)
21from charmhelpers.core.hookenv import (
22 relation_get,
23 relation_ids,
24 related_units,
25 log,
26 DEBUG,
27 INFO,
28 WARNING,
29 ERROR,
30)
31from charmhelpers.core.host import (
32 mount,
33 mounts,
34 service_start,
35 service_stop,
36 service_running,
37 umount,
38)
39from charmhelpers.fetch import (
40 apt_install,
41)
42
43KEYRING = '/etc/ceph/ceph.client.{}.keyring'
44KEYFILE = '/etc/ceph/ceph.client.{}.key'
45
46CEPH_CONF = """[global]
47 auth supported = {auth}
48 keyring = {keyring}
49 mon host = {mon_hosts}
50 log to syslog = {use_syslog}
51 err to syslog = {use_syslog}
52 clog to syslog = {use_syslog}
53"""
54
55
56def install():
57 """Basic Ceph client installation."""
58 ceph_dir = "/etc/ceph"
59 if not os.path.exists(ceph_dir):
60 os.mkdir(ceph_dir)
61
62 apt_install('ceph-common', fatal=True)
63
64
65def rbd_exists(service, pool, rbd_img):
66 """Check to see if a RADOS block device exists."""
67 try:
68 out = check_output(['rbd', 'list', '--id',
69 service, '--pool', pool]).decode('UTF-8')
70 except CalledProcessError:
71 return False
72
73 return rbd_img in out
74
75
76def create_rbd_image(service, pool, image, sizemb):
77 """Create a new RADOS block device."""
78 cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
79 '--pool', pool]
80 check_call(cmd)
81
82
83def pool_exists(service, name):
84 """Check to see if a RADOS pool already exists."""
85 try:
86 out = check_output(['rados', '--id', service,
87 'lspools']).decode('UTF-8')
88 except CalledProcessError:
89 return False
90
91 return name in out
92
93
94def get_osds(service):
95 """Return a list of all Ceph Object Storage Daemons currently in the
96 cluster.
97 """
98 version = ceph_version()
99 if version and version >= '0.56':
100 return json.loads(check_output(['ceph', '--id', service,
101 'osd', 'ls',
102 '--format=json']).decode('UTF-8'))
103
104 return None
105
106
107def create_pool(service, name, replicas=3):
108 """Create a new RADOS pool."""
109 if pool_exists(service, name):
110 log("Ceph pool {} already exists, skipping creation".format(name),
111 level=WARNING)
112 return
113
114 # Calculate the number of placement groups based
115 # on upstream recommended best practices.
116 osds = get_osds(service)
117 if osds:
118 pgnum = (len(osds) * 100 // replicas)
119 else:
120 # NOTE(james-page): Default to 200 for older ceph versions
121 # which don't support OSD query from cli
122 pgnum = 200
123
124 cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
125 check_call(cmd)
126
127 cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
128 str(replicas)]
129 check_call(cmd)
130
131
132def delete_pool(service, name):
133 """Delete a RADOS pool from ceph."""
134 cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
135 '--yes-i-really-really-mean-it']
136 check_call(cmd)
137
138
139def _keyfile_path(service):
140 return KEYFILE.format(service)
141
142
143def _keyring_path(service):
144 return KEYRING.format(service)
145
146
147def create_keyring(service, key):
148 """Create a new Ceph keyring containing key."""
149 keyring = _keyring_path(service)
150 if os.path.exists(keyring):
151 log('Ceph keyring exists at %s.' % keyring, level=WARNING)
152 return
153
154 cmd = ['ceph-authtool', keyring, '--create-keyring',
155 '--name=client.{}'.format(service), '--add-key={}'.format(key)]
156 check_call(cmd)
157 log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
158
159
160def delete_keyring(service):
161 """Delete an existing Ceph keyring."""
162 keyring = _keyring_path(service)
163 if not os.path.exists(keyring):
164 log('Keyring does not exist at %s' % keyring, level=WARNING)
165 return
166
167 os.remove(keyring)
168 log('Deleted ring at %s.' % keyring, level=INFO)
169
170
171def create_key_file(service, key):
172 """Create a file containing key."""
173 keyfile = _keyfile_path(service)
174 if os.path.exists(keyfile):
175 log('Keyfile exists at %s.' % keyfile, level=WARNING)
176 return
177
178 with open(keyfile, 'w') as fd:
179 fd.write(key)
180
181 log('Created new keyfile at %s.' % keyfile, level=INFO)
182
183
184def get_ceph_nodes():
185 """Query named relation 'ceph' to determine current nodes."""
186 hosts = []
187 for r_id in relation_ids('ceph'):
188 for unit in related_units(r_id):
189 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
190
191 return hosts
192
193
194def configure(service, key, auth, use_syslog):
195 """Perform basic configuration of Ceph."""
196 create_keyring(service, key)
197 create_key_file(service, key)
198 hosts = get_ceph_nodes()
199 with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
200 ceph_conf.write(CEPH_CONF.format(auth=auth,
201 keyring=_keyring_path(service),
202 mon_hosts=",".join(map(str, hosts)),
203 use_syslog=use_syslog))
204 modprobe('rbd')
205
206
207def image_mapped(name):
208 """Determine whether a RADOS block device is mapped locally."""
209 try:
210 out = check_output(['rbd', 'showmapped']).decode('UTF-8')
211 except CalledProcessError:
212 return False
213
214 return name in out
215
216
217def map_block_storage(service, pool, image):
218 """Map a RADOS block device for local use."""
219 cmd = [
220 'rbd',
221 'map',
222 '{}/{}'.format(pool, image),
223 '--user',
224 service,
225 '--secret',
226 _keyfile_path(service),
227 ]
228 check_call(cmd)
229
230
231def filesystem_mounted(fs):
232 """Determine whether a filesytems is already mounted."""
233 return fs in [f for f, m in mounts()]
234
235
236def make_filesystem(blk_device, fstype='ext4', timeout=10):
237 """Make a new filesystem on the specified block device."""
238 count = 0
239 e_noent = os.errno.ENOENT
240 while not os.path.exists(blk_device):
241 if count >= timeout:
242 log('Gave up waiting on block device %s' % blk_device,
243 level=ERROR)
244 raise IOError(e_noent, os.strerror(e_noent), blk_device)
245
246 log('Waiting for block device %s to appear' % blk_device,
247 level=DEBUG)
248 count += 1
249 time.sleep(1)
250 else:
251 log('Formatting block device %s as filesystem %s.' %
252 (blk_device, fstype), level=INFO)
253 check_call(['mkfs', '-t', fstype, blk_device])
254
255
256def place_data_on_block_device(blk_device, data_src_dst):
257 """Migrate data in data_src_dst to blk_device and then remount."""
258 # mount block device into /mnt
259 mount(blk_device, '/mnt')
260 # copy data to /mnt
261 copy_files(data_src_dst, '/mnt')
262 # umount block device
263 umount('/mnt')
264 # Grab user/group ID's from original source
265 _dir = os.stat(data_src_dst)
266 uid = _dir.st_uid
267 gid = _dir.st_gid
268 # re-mount where the data should originally be
269 # TODO: persist is currently a NO-OP in core.host
270 mount(blk_device, data_src_dst, persist=True)
271 # ensure original ownership of new mount.
272 os.chown(data_src_dst, uid, gid)
273
274
275# TODO: re-use
276def modprobe(module):
277 """Load a kernel module and configure for auto-load on reboot."""
278 log('Loading kernel module', level=INFO)
279 cmd = ['modprobe', module]
280 check_call(cmd)
281 with open('/etc/modules', 'r+') as modules:
282 if module not in modules.read():
283 modules.write(module)
284
285
286def copy_files(src, dst, symlinks=False, ignore=None):
287 """Copy files from src to dst."""
288 for item in os.listdir(src):
289 s = os.path.join(src, item)
290 d = os.path.join(dst, item)
291 if os.path.isdir(s):
292 shutil.copytree(s, d, symlinks, ignore)
293 else:
294 shutil.copy2(s, d)
295
296
297def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
298 blk_device, fstype, system_services=[],
299 replicas=3):
300 """NOTE: This function must only be called from a single service unit for
301 the same rbd_img otherwise data loss will occur.
302
303 Ensures given pool and RBD image exists, is mapped to a block device,
304 and the device is formatted and mounted at the given mount_point.
305
306 If formatting a device for the first time, data existing at mount_point
307 will be migrated to the RBD device before being re-mounted.
308
309 All services listed in system_services will be stopped prior to data
310 migration and restarted when complete.
311 """
312 # Ensure pool, RBD image, RBD mappings are in place.
313 if not pool_exists(service, pool):
314 log('Creating new pool {}.'.format(pool), level=INFO)
315 create_pool(service, pool, replicas=replicas)
316
317 if not rbd_exists(service, pool, rbd_img):
318 log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
319 create_rbd_image(service, pool, rbd_img, sizemb)
320
321 if not image_mapped(rbd_img):
322 log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
323 level=INFO)
324 map_block_storage(service, pool, rbd_img)
325
326 # make file system
327 # TODO: What happens if for whatever reason this is run again and
328 # the data is already in the rbd device and/or is mounted??
329 # When it is mounted already, it will fail to make the fs
330 # XXX: This is really sketchy! Need to at least add an fstab entry
331 # otherwise this hook will blow away existing data if its executed
332 # after a reboot.
333 if not filesystem_mounted(mount_point):
334 make_filesystem(blk_device, fstype)
335
336 for svc in system_services:
337 if service_running(svc):
338 log('Stopping services {} prior to migrating data.'
339 .format(svc), level=DEBUG)
340 service_stop(svc)
341
342 place_data_on_block_device(blk_device, mount_point)
343
344 for svc in system_services:
345 log('Starting service {} after migrating data.'
346 .format(svc), level=DEBUG)
347 service_start(svc)
348
349
350def ensure_ceph_keyring(service, user=None, group=None):
351 """Ensures a ceph keyring is created for a named service and optionally
352 ensures user and group ownership.
353
354 Returns False if no ceph key is available in relation state.
355 """
356 key = None
357 for rid in relation_ids('ceph'):
358 for unit in related_units(rid):
359 key = relation_get('key', rid=rid, unit=unit)
360 if key:
361 break
362
363 if not key:
364 return False
365
366 create_keyring(service=service, key=key)
367 keyring = _keyring_path(service)
368 if user and group:
369 check_call(['chown', '%s.%s' % (user, group), keyring])
370
371 return True
372
373
374def ceph_version():
375 """Retrieve the local version of ceph."""
376 if os.path.exists('/usr/bin/ceph'):
377 cmd = ['ceph', '-v']
378 output = check_output(cmd).decode('US-ASCII')
379 output = output.split()
380 if len(output) > 3:
381 return output[2]
382 else:
383 return None
384 else:
385 return None
386
387
388class CephBrokerRq(object):
389 """Ceph broker request.
390
391 Multiple operations can be added to a request and sent to the Ceph broker
392 to be executed.
393
394 Request is json-encoded for sending over the wire.
395
396 The API is versioned and defaults to version 1.
397 """
398 def __init__(self, api_version=1):
399 self.api_version = api_version
400 self.ops = []
401
402 def add_op_create_pool(self, name, replica_count=3):
403 self.ops.append({'op': 'create-pool', 'name': name,
404 'replicas': replica_count})
405
406 @property
407 def request(self):
408 return json.dumps({'api-version': self.api_version, 'ops': self.ops})
409
410
411class CephBrokerRsp(object):
412 """Ceph broker response.
413
414 Response is json-decoded and contents provided as methods/properties.
415
416 The API is versioned and defaults to version 1.
417 """
418 def __init__(self, encoded_rsp):
419 self.api_version = None
420 self.rsp = json.loads(encoded_rsp)
421
422 @property
423 def exit_code(self):
424 return self.rsp.get('exit-code')
425
426 @property
427 def exit_msg(self):
428 return self.rsp.get('stderr')
0429
=== added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
--- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,62 @@
1import os
2import re
3from subprocess import (
4 check_call,
5 check_output,
6)
7
8import six
9
10
11##################################################
12# loopback device helpers.
13##################################################
14def loopback_devices():
15 '''
16 Parse through 'losetup -a' output to determine currently mapped
17 loopback devices. Output is expected to look like:
18
19 /dev/loop0: [0807]:961814 (/tmp/my.img)
20
21 :returns: dict: a dict mapping {loopback_dev: backing_file}
22 '''
23 loopbacks = {}
24 cmd = ['losetup', '-a']
25 devs = [d.strip().split(' ') for d in
26 check_output(cmd).splitlines() if d != '']
27 for dev, _, f in devs:
28 loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
29 return loopbacks
30
31
32def create_loopback(file_path):
33 '''
34 Create a loopback device for a given backing file.
35
36 :returns: str: Full path to new loopback device (eg, /dev/loop0)
37 '''
38 file_path = os.path.abspath(file_path)
39 check_call(['losetup', '--find', file_path])
40 for d, f in six.iteritems(loopback_devices()):
41 if f == file_path:
42 return d
43
44
45def ensure_loopback_device(path, size):
46 '''
47 Ensure a loopback device exists for a given backing file path and size.
48 If it a loopback device is not mapped to file, a new one will be created.
49
50 TODO: Confirm size of found loopback device.
51
52 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
53 '''
54 for d, f in six.iteritems(loopback_devices()):
55 if f == path:
56 return d
57
58 if not os.path.exists(path):
59 cmd = ['truncate', '--size', size, path]
60 check_call(cmd)
61
62 return create_loopback(path)
063
=== added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,89 @@
1from subprocess import (
2 CalledProcessError,
3 check_call,
4 check_output,
5 Popen,
6 PIPE,
7)
8
9
10##################################################
11# LVM helpers.
12##################################################
13def deactivate_lvm_volume_group(block_device):
14 '''
15 Deactivate any volume gruop associated with an LVM physical volume.
16
17 :param block_device: str: Full path to LVM physical volume
18 '''
19 vg = list_lvm_volume_group(block_device)
20 if vg:
21 cmd = ['vgchange', '-an', vg]
22 check_call(cmd)
23
24
25def is_lvm_physical_volume(block_device):
26 '''
27 Determine whether a block device is initialized as an LVM PV.
28
29 :param block_device: str: Full path of block device to inspect.
30
31 :returns: boolean: True if block device is a PV, False if not.
32 '''
33 try:
34 check_output(['pvdisplay', block_device])
35 return True
36 except CalledProcessError:
37 return False
38
39
40def remove_lvm_physical_volume(block_device):
41 '''
42 Remove LVM PV signatures from a given block device.
43
44 :param block_device: str: Full path of block device to scrub.
45 '''
46 p = Popen(['pvremove', '-ff', block_device],
47 stdin=PIPE)
48 p.communicate(input='y\n')
49
50
51def list_lvm_volume_group(block_device):
52 '''
53 List LVM volume group associated with a given block device.
54
55 Assumes block device is a valid LVM PV.
56
57 :param block_device: str: Full path of block device to inspect.
58
59 :returns: str: Name of volume group associated with block device or None
60 '''
61 vg = None
62 pvd = check_output(['pvdisplay', block_device]).splitlines()
63 for l in pvd:
64 l = l.decode('UTF-8')
65 if l.strip().startswith('VG Name'):
66 vg = ' '.join(l.strip().split()[2:])
67 return vg
68
69
70def create_lvm_physical_volume(block_device):
71 '''
72 Initialize a block device as an LVM physical volume.
73
74 :param block_device: str: Full path of block device to initialize.
75
76 '''
77 check_call(['pvcreate', block_device])
78
79
80def create_lvm_volume_group(volume_group, block_device):
81 '''
82 Create an LVM volume group backed by a given block device.
83
84 Assumes block device has already been initialized as an LVM PV.
85
86 :param volume_group: str: Name of volume group to create.
87 :block_device: str: Full path of PV-initialized block device.
88 '''
89 check_call(['vgcreate', volume_group, block_device])
090
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-09-17 14:11:53 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2015-01-15 16:18:44 +0000
@@ -30,7 +30,8 @@
30 # sometimes sgdisk exits non-zero; this is OK, dd will clean up30 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
31 call(['sgdisk', '--zap-all', '--mbrtogpt',31 call(['sgdisk', '--zap-all', '--mbrtogpt',
32 '--clear', block_device])32 '--clear', block_device])
33 dev_end = check_output(['blockdev', '--getsz', block_device])33 dev_end = check_output(['blockdev', '--getsz',
34 block_device]).decode('UTF-8')
34 gpt_end = int(dev_end.split()[0]) - 10035 gpt_end = int(dev_end.split()[0]) - 100
35 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),36 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
36 'bs=1M', 'count=1'])37 'bs=1M', 'count=1'])
@@ -47,7 +48,7 @@
47 it doesn't.48 it doesn't.
48 '''49 '''
49 is_partition = bool(re.search(r".*[0-9]+\b", device))50 is_partition = bool(re.search(r".*[0-9]+\b", device))
50 out = check_output(['mount'])51 out = check_output(['mount']).decode('UTF-8')
51 if is_partition:52 if is_partition:
52 return bool(re.search(device + r"\b", out))53 return bool(re.search(device + r"\b", out))
53 return bool(re.search(device + r"[0-9]+\b", out))54 return bool(re.search(device + r"[0-9]+\b", out))
5455
=== added file 'hooks/charmhelpers/core/decorators.py'
--- hooks/charmhelpers/core/decorators.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/decorators.py 2015-01-15 16:18:44 +0000
@@ -0,0 +1,41 @@
1#
2# Copyright 2014 Canonical Ltd.
3#
4# Authors:
5# Edward Hope-Morley <opentastic@gmail.com>
6#
7
8import time
9
10from charmhelpers.core.hookenv import (
11 log,
12 INFO,
13)
14
15
16def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
17 """If the decorated function raises exception exc_type, allow num_retries
18 retry attempts before raise the exception.
19 """
20 def _retry_on_exception_inner_1(f):
21 def _retry_on_exception_inner_2(*args, **kwargs):
22 retries = num_retries
23 multiplier = 1
24 while True:
25 try:
26 return f(*args, **kwargs)
27 except exc_type:
28 if not retries:
29 raise
30
31 delay = base_delay * multiplier
32 multiplier += 1
33 log("Retrying '%s' %d more times (delay=%s)" %
34 (f.__name__, retries, delay), level=INFO)
35 retries -= 1
36 if delay:
37 time.sleep(delay)
38
39 return _retry_on_exception_inner_2
40
41 return _retry_on_exception_inner_1
042
=== modified file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 2014-07-24 09:43:27 +0000
+++ hooks/charmhelpers/core/fstab.py 2015-01-15 16:18:44 +0000
@@ -3,10 +3,11 @@
33
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
55
6import io
6import os7import os
78
89
9class Fstab(file):10class Fstab(io.FileIO):
10 """This class extends file in order to implement a file reader/writer11 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`12 for file `/etc/fstab`
12 """13 """
@@ -24,8 +25,8 @@
24 options = "defaults"25 options = "defaults"
2526
26 self.options = options27 self.options = options
27 self.d = d28 self.d = int(d)
28 self.p = p29 self.p = int(p)
2930
30 def __eq__(self, o):31 def __eq__(self, o):
31 return str(self) == str(o)32 return str(self) == str(o)
@@ -45,7 +46,7 @@
45 self._path = path46 self._path = path
46 else:47 else:
47 self._path = self.DEFAULT_PATH48 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')49 super(Fstab, self).__init__(self._path, 'rb+')
4950
50 def _hydrate_entry(self, line):51 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any52 # NOTE: use split with no arguments to split on any
@@ -58,8 +59,9 @@
58 def entries(self):59 def entries(self):
59 self.seek(0)60 self.seek(0)
60 for line in self.readlines():61 for line in self.readlines():
62 line = line.decode('us-ascii')
61 try:63 try:
62 if not line.startswith("#"):64 if line.strip() and not line.startswith("#"):
63 yield self._hydrate_entry(line)65 yield self._hydrate_entry(line)
64 except ValueError:66 except ValueError:
65 pass67 pass
@@ -75,14 +77,14 @@
75 if self.get_entry_by_attr('device', entry.device):77 if self.get_entry_by_attr('device', entry.device):
76 return False78 return False
7779
78 self.write(str(entry) + '\n')80 self.write((str(entry) + '\n').encode('us-ascii'))
79 self.truncate()81 self.truncate()
80 return entry82 return entry
8183
82 def remove_entry(self, entry):84 def remove_entry(self, entry):
83 self.seek(0)85 self.seek(0)
8486
85 lines = self.readlines()87 lines = [l.decode('us-ascii') for l in self.readlines()]
8688
87 found = False89 found = False
88 for index, line in enumerate(lines):90 for index, line in enumerate(lines):
@@ -97,7 +99,7 @@
97 lines.remove(line)99 lines.remove(line)
98100
99 self.seek(0)101 self.seek(0)
100 self.write(''.join(lines))102 self.write(''.join(lines).encode('us-ascii'))
101 self.truncate()103 self.truncate()
102 return True104 return True
103105
104106
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-10-21 07:28:36 +0000
+++ hooks/charmhelpers/core/hookenv.py 2015-01-15 16:18:44 +0000
@@ -9,9 +9,14 @@
9import yaml9import yaml
10import subprocess10import subprocess
11import sys11import sys
12import UserDict
13from subprocess import CalledProcessError12from subprocess import CalledProcessError
1413
14import six
15if not six.PY3:
16 from UserDict import UserDict
17else:
18 from collections import UserDict
19
15CRITICAL = "CRITICAL"20CRITICAL = "CRITICAL"
16ERROR = "ERROR"21ERROR = "ERROR"
17WARNING = "WARNING"22WARNING = "WARNING"
@@ -63,16 +68,18 @@
63 command = ['juju-log']68 command = ['juju-log']
64 if level:69 if level:
65 command += ['-l', level]70 command += ['-l', level]
71 if not isinstance(message, six.string_types):
72 message = repr(message)
66 command += [message]73 command += [message]
67 subprocess.call(command)74 subprocess.call(command)
6875
6976
70class Serializable(UserDict.IterableUserDict):77class Serializable(UserDict):
71 """Wrapper, an object that can be serialized to yaml or json"""78 """Wrapper, an object that can be serialized to yaml or json"""
7279
73 def __init__(self, obj):80 def __init__(self, obj):
74 # wrap the object81 # wrap the object
75 UserDict.IterableUserDict.__init__(self)82 UserDict.__init__(self)
76 self.data = obj83 self.data = obj
7784
78 def __getattr__(self, attr):85 def __getattr__(self, attr):
@@ -218,7 +225,7 @@
218 prev_keys = []225 prev_keys = []
219 if self._prev_dict is not None:226 if self._prev_dict is not None:
220 prev_keys = self._prev_dict.keys()227 prev_keys = self._prev_dict.keys()
221 return list(set(prev_keys + dict.keys(self)))228 return list(set(prev_keys + list(dict.keys(self))))
222229
223 def load_previous(self, path=None):230 def load_previous(self, path=None):
224 """Load previous copy of config from disk.231 """Load previous copy of config from disk.
@@ -269,7 +276,7 @@
269276
270 """277 """
271 if self._prev_dict:278 if self._prev_dict:
272 for k, v in self._prev_dict.iteritems():279 for k, v in six.iteritems(self._prev_dict):
273 if k not in self:280 if k not in self:
274 self[k] = v281 self[k] = v
275 with open(self.path, 'w') as f:282 with open(self.path, 'w') as f:
@@ -284,7 +291,8 @@
284 config_cmd_line.append(scope)291 config_cmd_line.append(scope)
285 config_cmd_line.append('--format=json')292 config_cmd_line.append('--format=json')
286 try:293 try:
287 config_data = json.loads(subprocess.check_output(config_cmd_line))294 config_data = json.loads(
295 subprocess.check_output(config_cmd_line).decode('UTF-8'))
288 if scope is not None:296 if scope is not None:
289 return config_data297 return config_data
290 return Config(config_data)298 return Config(config_data)
@@ -303,10 +311,10 @@
303 if unit:311 if unit:
304 _args.append(unit)312 _args.append(unit)
305 try:313 try:
306 return json.loads(subprocess.check_output(_args))314 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
307 except ValueError:315 except ValueError:
308 return None316 return None
309 except CalledProcessError, e:317 except CalledProcessError as e:
310 if e.returncode == 2:318 if e.returncode == 2:
311 return None319 return None
312 raise320 raise
@@ -318,7 +326,7 @@
318 relation_cmd_line = ['relation-set']326 relation_cmd_line = ['relation-set']
319 if relation_id is not None:327 if relation_id is not None:
320 relation_cmd_line.extend(('-r', relation_id))328 relation_cmd_line.extend(('-r', relation_id))
321 for k, v in (relation_settings.items() + kwargs.items()):329 for k, v in (list(relation_settings.items()) + list(kwargs.items())):
322 if v is None:330 if v is None:
323 relation_cmd_line.append('{}='.format(k))331 relation_cmd_line.append('{}='.format(k))
324 else:332 else:
@@ -335,7 +343,8 @@
335 relid_cmd_line = ['relation-ids', '--format=json']343 relid_cmd_line = ['relation-ids', '--format=json']
336 if reltype is not None:344 if reltype is not None:
337 relid_cmd_line.append(reltype)345 relid_cmd_line.append(reltype)
338 return json.loads(subprocess.check_output(relid_cmd_line)) or []346 return json.loads(
347 subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
339 return []348 return []
340349
341350
@@ -346,7 +355,8 @@
346 units_cmd_line = ['relation-list', '--format=json']355 units_cmd_line = ['relation-list', '--format=json']
347 if relid is not None:356 if relid is not None:
348 units_cmd_line.extend(('-r', relid))357 units_cmd_line.extend(('-r', relid))
349 return json.loads(subprocess.check_output(units_cmd_line)) or []358 return json.loads(
359 subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
350360
351361
352@cached362@cached
@@ -386,21 +396,31 @@
386396
387397
388@cached398@cached
399def metadata():
400 """Get the current charm metadata.yaml contents as a python object"""
401 with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
402 return yaml.safe_load(md)
403
404
405@cached
389def relation_types():406def relation_types():
390 """Get a list of relation types supported by this charm"""407 """Get a list of relation types supported by this charm"""
391 charmdir = os.environ.get('CHARM_DIR', '')
392 mdf = open(os.path.join(charmdir, 'metadata.yaml'))
393 md = yaml.safe_load(mdf)
394 rel_types = []408 rel_types = []
409 md = metadata()
395 for key in ('provides', 'requires', 'peers'):410 for key in ('provides', 'requires', 'peers'):
396 section = md.get(key)411 section = md.get(key)
397 if section:412 if section:
398 rel_types.extend(section.keys())413 rel_types.extend(section.keys())
399 mdf.close()
400 return rel_types414 return rel_types
401415
402416
403@cached417@cached
418def charm_name():
419 """Get the name of the current charm as is specified on metadata.yaml"""
420 return metadata().get('name')
421
422
423@cached
404def relations():424def relations():
405 """Get a nested dictionary of relation data for all related units"""425 """Get a nested dictionary of relation data for all related units"""
406 rels = {}426 rels = {}
@@ -455,7 +475,7 @@
455 """Get the unit ID for the remote unit"""475 """Get the unit ID for the remote unit"""
456 _args = ['unit-get', '--format=json', attribute]476 _args = ['unit-get', '--format=json', attribute]
457 try:477 try:
458 return json.loads(subprocess.check_output(_args))478 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
459 except ValueError:479 except ValueError:
460 return None480 return None
461481
462482
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-10-21 07:28:36 +0000
+++ hooks/charmhelpers/core/host.py 2015-01-15 16:18:44 +0000
@@ -14,11 +14,12 @@
14import subprocess14import subprocess
15import hashlib15import hashlib
16from contextlib import contextmanager16from contextlib import contextmanager
17
18from collections import OrderedDict17from collections import OrderedDict
1918
20from hookenv import log19import six
21from fstab import Fstab20
21from .hookenv import log
22from .fstab import Fstab
2223
2324
24def service_start(service_name):25def service_start(service_name):
@@ -54,7 +55,9 @@
54def service_running(service):55def service_running(service):
55 """Determine whether a system service is running"""56 """Determine whether a system service is running"""
56 try:57 try:
57 output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)58 output = subprocess.check_output(
59 ['service', service, 'status'],
60 stderr=subprocess.STDOUT).decode('UTF-8')
58 except subprocess.CalledProcessError:61 except subprocess.CalledProcessError:
59 return False62 return False
60 else:63 else:
@@ -67,7 +70,9 @@
67def service_available(service_name):70def service_available(service_name):
68 """Determine whether a system service is available"""71 """Determine whether a system service is available"""
69 try:72 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)73 subprocess.check_output(
74 ['service', service_name, 'status'],
75 stderr=subprocess.STDOUT).decode('UTF-8')
71 except subprocess.CalledProcessError as e:76 except subprocess.CalledProcessError as e:
72 return 'unrecognized service' not in e.output77 return 'unrecognized service' not in e.output
73 else:78 else:
@@ -96,6 +101,26 @@
96 return user_info101 return user_info
97102
98103
104def add_group(group_name, system_group=False):
105 """Add a group to the system"""
106 try:
107 group_info = grp.getgrnam(group_name)
108 log('group {0} already exists!'.format(group_name))
109 except KeyError:
110 log('creating group {0}'.format(group_name))
111 cmd = ['addgroup']
112 if system_group:
113 cmd.append('--system')
114 else:
115 cmd.extend([
116 '--group',
117 ])
118 cmd.append(group_name)
119 subprocess.check_call(cmd)
120 group_info = grp.getgrnam(group_name)
121 return group_info
122
123
99def add_user_to_group(username, group):124def add_user_to_group(username, group):
100 """Add a user to a group"""125 """Add a user to a group"""
101 cmd = [126 cmd = [
@@ -115,7 +140,7 @@
115 cmd.append(from_path)140 cmd.append(from_path)
116 cmd.append(to_path)141 cmd.append(to_path)
117 log(" ".join(cmd))142 log(" ".join(cmd))
118 return subprocess.check_output(cmd).strip()143 return subprocess.check_output(cmd).decode('UTF-8').strip()
119144
120145
121def symlink(source, destination):146def symlink(source, destination):
@@ -130,23 +155,26 @@
130 subprocess.check_call(cmd)155 subprocess.check_call(cmd)
131156
132157
133def mkdir(path, owner='root', group='root', perms=0555, force=False):158def mkdir(path, owner='root', group='root', perms=0o555, force=False):
134 """Create a directory"""159 """Create a directory"""
135 log("Making dir {} {}:{} {:o}".format(path, owner, group,160 log("Making dir {} {}:{} {:o}".format(path, owner, group,
136 perms))161 perms))
137 uid = pwd.getpwnam(owner).pw_uid162 uid = pwd.getpwnam(owner).pw_uid
138 gid = grp.getgrnam(group).gr_gid163 gid = grp.getgrnam(group).gr_gid
139 realpath = os.path.abspath(path)164 realpath = os.path.abspath(path)
140 if os.path.exists(realpath):165 path_exists = os.path.exists(realpath)
141 if force and not os.path.isdir(realpath):166 if path_exists and force:
167 if not os.path.isdir(realpath):
142 log("Removing non-directory file {} prior to mkdir()".format(path))168 log("Removing non-directory file {} prior to mkdir()".format(path))
143 os.unlink(realpath)169 os.unlink(realpath)
144 else:170 os.makedirs(realpath, perms)
171 os.chown(realpath, uid, gid)
172 elif not path_exists:
145 os.makedirs(realpath, perms)173 os.makedirs(realpath, perms)
146 os.chown(realpath, uid, gid)174 os.chown(realpath, uid, gid)
147175
148176
149def write_file(path, content, owner='root', group='root', perms=0444):177def write_file(path, content, owner='root', group='root', perms=0o444):
150 """Create or overwrite a file with the contents of a string"""178 """Create or overwrite a file with the contents of a string"""
151 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))179 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
152 uid = pwd.getpwnam(owner).pw_uid180 uid = pwd.getpwnam(owner).pw_uid
@@ -177,7 +205,7 @@
177 cmd_args.extend([device, mountpoint])205 cmd_args.extend([device, mountpoint])
178 try:206 try:
179 subprocess.check_output(cmd_args)207 subprocess.check_output(cmd_args)
180 except subprocess.CalledProcessError, e:208 except subprocess.CalledProcessError as e:
181 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))209 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
182 return False210 return False
183211
@@ -191,7 +219,7 @@
191 cmd_args = ['umount', mountpoint]219 cmd_args = ['umount', mountpoint]
192 try:220 try:
193 subprocess.check_output(cmd_args)221 subprocess.check_output(cmd_args)
194 except subprocess.CalledProcessError, e:222 except subprocess.CalledProcessError as e:
195 log('Error unmounting {}\n{}'.format(mountpoint, e.output))223 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
196 return False224 return False
197225
@@ -218,8 +246,8 @@
218 """246 """
219 if os.path.exists(path):247 if os.path.exists(path):
220 h = getattr(hashlib, hash_type)()248 h = getattr(hashlib, hash_type)()
221 with open(path, 'r') as source:249 with open(path, 'rb') as source:
222 h.update(source.read()) # IGNORE:E1101 - it does have update250 h.update(source.read())
223 return h.hexdigest()251 return h.hexdigest()
224 else:252 else:
225 return None253 return None
@@ -297,7 +325,7 @@
297 if length is None:325 if length is None:
298 length = random.choice(range(35, 45))326 length = random.choice(range(35, 45))
299 alphanumeric_chars = [327 alphanumeric_chars = [
300 l for l in (string.letters + string.digits)328 l for l in (string.ascii_letters + string.digits)
301 if l not in 'l0QD1vAEIOUaeiou']329 if l not in 'l0QD1vAEIOUaeiou']
302 random_chars = [330 random_chars = [
303 random.choice(alphanumeric_chars) for _ in range(length)]331 random.choice(alphanumeric_chars) for _ in range(length)]
@@ -306,14 +334,14 @@
306334
307def list_nics(nic_type):335def list_nics(nic_type):
308 '''Return a list of nics of given type(s)'''336 '''Return a list of nics of given type(s)'''
309 if isinstance(nic_type, basestring):337 if isinstance(nic_type, six.string_types):
310 int_types = [nic_type]338 int_types = [nic_type]
311 else:339 else:
312 int_types = nic_type340 int_types = nic_type
313 interfaces = []341 interfaces = []
314 for int_type in int_types:342 for int_type in int_types:
315 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']343 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
316 ip_output = subprocess.check_output(cmd).split('\n')344 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
317 ip_output = (line for line in ip_output if line)345 ip_output = (line for line in ip_output if line)
318 for line in ip_output:346 for line in ip_output:
319 if line.split()[1].startswith(int_type):347 if line.split()[1].startswith(int_type):
@@ -335,7 +363,7 @@
335363
336def get_nic_mtu(nic):364def get_nic_mtu(nic):
337 cmd = ['ip', 'addr', 'show', nic]365 cmd = ['ip', 'addr', 'show', nic]
338 ip_output = subprocess.check_output(cmd).split('\n')366 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
339 mtu = ""367 mtu = ""
340 for line in ip_output:368 for line in ip_output:
341 words = line.split()369 words = line.split()
@@ -346,7 +374,7 @@
346374
347def get_nic_hwaddr(nic):375def get_nic_hwaddr(nic):
348 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]376 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
349 ip_output = subprocess.check_output(cmd)377 ip_output = subprocess.check_output(cmd).decode('UTF-8')
350 hwaddr = ""378 hwaddr = ""
351 words = ip_output.split()379 words = ip_output.split()
352 if 'link/ether' in words:380 if 'link/ether' in words:
@@ -363,8 +391,8 @@
363391
364 '''392 '''
365 import apt_pkg393 import apt_pkg
366 from charmhelpers.fetch import apt_cache
367 if not pkgcache:394 if not pkgcache:
395 from charmhelpers.fetch import apt_cache
368 pkgcache = apt_cache()396 pkgcache = apt_cache()
369 pkg = pkgcache[package]397 pkg = pkgcache[package]
370 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)398 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
371399
=== modified file 'hooks/charmhelpers/core/services/__init__.py'
--- hooks/charmhelpers/core/services/__init__.py 2014-09-17 14:11:53 +0000
+++ hooks/charmhelpers/core/services/__init__.py 2015-01-15 16:18:44 +0000
@@ -1,2 +1,2 @@
1from .base import *1from .base import * # NOQA
2from .helpers import *2from .helpers import * # NOQA
33
=== modified file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 2014-09-27 17:33:59 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2015-01-15 16:18:44 +0000
@@ -196,7 +196,7 @@
196 if not os.path.isabs(file_name):196 if not os.path.isabs(file_name):
197 file_name = os.path.join(hookenv.charm_dir(), file_name)197 file_name = os.path.join(hookenv.charm_dir(), file_name)
198 with open(file_name, 'w') as file_stream:198 with open(file_name, 'w') as file_stream:
199 os.fchmod(file_stream.fileno(), 0600)199 os.fchmod(file_stream.fileno(), 0o600)
200 yaml.dump(config_data, file_stream)200 yaml.dump(config_data, file_stream)
201201
202 def read_context(self, file_name):202 def read_context(self, file_name):
@@ -211,15 +211,19 @@
211211
212class TemplateCallback(ManagerCallback):212class TemplateCallback(ManagerCallback):
213 """213 """
214 Callback class that will render a Jinja2 template, for use as a ready action.214 Callback class that will render a Jinja2 template, for use as a ready
215215 action.
216 :param str source: The template source file, relative to `$CHARM_DIR/templates`216
217 :param str source: The template source file, relative to
218 `$CHARM_DIR/templates`
219
217 :param str target: The target to write the rendered template to220 :param str target: The target to write the rendered template to
218 :param str owner: The owner of the rendered file221 :param str owner: The owner of the rendered file
219 :param str group: The group of the rendered file222 :param str group: The group of the rendered file
220 :param int perms: The permissions of the rendered file223 :param int perms: The permissions of the rendered file
221 """224 """
222 def __init__(self, source, target, owner='root', group='root', perms=0444):225 def __init__(self, source, target,
226 owner='root', group='root', perms=0o444):
223 self.source = source227 self.source = source
224 self.target = target228 self.target = target
225 self.owner = owner229 self.owner = owner
226230
=== modified file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 2014-09-17 14:11:53 +0000
+++ hooks/charmhelpers/core/templating.py 2015-01-15 16:18:44 +0000
@@ -4,7 +4,8 @@
4from charmhelpers.core import hookenv4from charmhelpers.core import hookenv
55
66
7def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):7def render(source, target, context, owner='root', group='root',
8 perms=0o444, templates_dir=None):
8 """9 """
9 Render a template.10 Render a template.
1011
@@ -47,5 +48,5 @@
47 level=hookenv.ERROR)48 level=hookenv.ERROR)
48 raise e49 raise e
49 content = template.render(context)50 content = template.render(context)
50 host.mkdir(os.path.dirname(target))51 host.mkdir(os.path.dirname(target), owner, group)
51 host.write_file(target, content, owner, group, perms)52 host.write_file(target, content, owner, group, perms)
5253
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-10-21 07:28:36 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2015-01-15 16:18:44 +0000
@@ -5,10 +5,6 @@
5from charmhelpers.core.host import (5from charmhelpers.core.host import (
6 lsb_release6 lsb_release
7)7)
8from urlparse import (
9 urlparse,
10 urlunparse,
11)
12import subprocess8import subprocess
13from charmhelpers.core.hookenv import (9from charmhelpers.core.hookenv import (
14 config,10 config,
@@ -16,6 +12,12 @@
16)12)
17import os13import os
1814
15import six
16if six.PY3:
17 from urllib.parse import urlparse, urlunparse
18else:
19 from urlparse import urlparse, urlunparse
20
1921
20CLOUD_ARCHIVE = """# Ubuntu Cloud Archive22CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
21deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main23deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@@ -62,9 +64,16 @@
62 'trusty-juno/updates': 'trusty-updates/juno',64 'trusty-juno/updates': 'trusty-updates/juno',
63 'trusty-updates/juno': 'trusty-updates/juno',65 'trusty-updates/juno': 'trusty-updates/juno',
64 'juno/proposed': 'trusty-proposed/juno',66 'juno/proposed': 'trusty-proposed/juno',
65 'juno/proposed': 'trusty-proposed/juno',
66 'trusty-juno/proposed': 'trusty-proposed/juno',67 'trusty-juno/proposed': 'trusty-proposed/juno',
67 'trusty-proposed/juno': 'trusty-proposed/juno',68 'trusty-proposed/juno': 'trusty-proposed/juno',
69 # Kilo
70 'kilo': 'trusty-updates/kilo',
71 'trusty-kilo': 'trusty-updates/kilo',
72 'trusty-kilo/updates': 'trusty-updates/kilo',
73 'trusty-updates/kilo': 'trusty-updates/kilo',
74 'kilo/proposed': 'trusty-proposed/kilo',
75 'trusty-kilo/proposed': 'trusty-proposed/kilo',
76 'trusty-proposed/kilo': 'trusty-proposed/kilo',
68}77}
6978
70# The order of this list is very important. Handlers should be listed in from79# The order of this list is very important. Handlers should be listed in from
@@ -149,7 +158,7 @@
149 cmd = ['apt-get', '--assume-yes']158 cmd = ['apt-get', '--assume-yes']
150 cmd.extend(options)159 cmd.extend(options)
151 cmd.append('install')160 cmd.append('install')
152 if isinstance(packages, basestring):161 if isinstance(packages, six.string_types):
153 cmd.append(packages)162 cmd.append(packages)
154 else:163 else:
155 cmd.extend(packages)164 cmd.extend(packages)
@@ -182,7 +191,7 @@
182def apt_purge(packages, fatal=False):191def apt_purge(packages, fatal=False):
183 """Purge one or more packages"""192 """Purge one or more packages"""
184 cmd = ['apt-get', '--assume-yes', 'purge']193 cmd = ['apt-get', '--assume-yes', 'purge']
185 if isinstance(packages, basestring):194 if isinstance(packages, six.string_types):
186 cmd.append(packages)195 cmd.append(packages)
187 else:196 else:
188 cmd.extend(packages)197 cmd.extend(packages)
@@ -193,7 +202,7 @@
193def apt_hold(packages, fatal=False):202def apt_hold(packages, fatal=False):
194 """Hold one or more packages"""203 """Hold one or more packages"""
195 cmd = ['apt-mark', 'hold']204 cmd = ['apt-mark', 'hold']
196 if isinstance(packages, basestring):205 if isinstance(packages, six.string_types):
197 cmd.append(packages)206 cmd.append(packages)
198 else:207 else:
199 cmd.extend(packages)208 cmd.extend(packages)
@@ -256,11 +265,11 @@
256 elif source == 'distro':265 elif source == 'distro':
257 pass266 pass
258 else:267 else:
259 raise SourceConfigError("Unknown source: {!r}".format(source))268 log("Unknown source: {!r}".format(source))
260269
261 if key:270 if key:
262 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:271 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
263 with NamedTemporaryFile() as key_file:272 with NamedTemporaryFile('w+') as key_file:
264 key_file.write(key)273 key_file.write(key)
265 key_file.flush()274 key_file.flush()
266 key_file.seek(0)275 key_file.seek(0)
@@ -297,14 +306,14 @@
297 sources = safe_load((config(sources_var) or '').strip()) or []306 sources = safe_load((config(sources_var) or '').strip()) or []
298 keys = safe_load((config(keys_var) or '').strip()) or None307 keys = safe_load((config(keys_var) or '').strip()) or None
299308
300 if isinstance(sources, basestring):309 if isinstance(sources, six.string_types):
301 sources = [sources]310 sources = [sources]
302311
303 if keys is None:312 if keys is None:
304 for source in sources:313 for source in sources:
305 add_source(source, None)314 add_source(source, None)
306 else:315 else:
307 if isinstance(keys, basestring):316 if isinstance(keys, six.string_types):
308 keys = [keys]317 keys = [keys]
309318
310 if len(sources) != len(keys):319 if len(sources) != len(keys):
@@ -401,7 +410,7 @@
401 while result is None or result == APT_NO_LOCK:410 while result is None or result == APT_NO_LOCK:
402 try:411 try:
403 result = subprocess.check_call(cmd, env=env)412 result = subprocess.check_call(cmd, env=env)
404 except subprocess.CalledProcessError, e:413 except subprocess.CalledProcessError as e:
405 retry_count = retry_count + 1414 retry_count = retry_count + 1
406 if retry_count > APT_NO_LOCK_RETRY_COUNT:415 if retry_count > APT_NO_LOCK_RETRY_COUNT:
407 raise416 raise
408417
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-27 17:33:59 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2015-01-15 16:18:44 +0000
@@ -1,8 +1,23 @@
1import os1import os
2import urllib2
3from urllib import urlretrieve
4import urlparse
5import hashlib2import hashlib
3import re
4
5import six
6if six.PY3:
7 from urllib.request import (
8 build_opener, install_opener, urlopen, urlretrieve,
9 HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
10 )
11 from urllib.parse import urlparse, urlunparse, parse_qs
12 from urllib.error import URLError
13else:
14 from urllib import urlretrieve
15 from urllib2 import (
16 build_opener, install_opener, urlopen,
17 HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
18 URLError
19 )
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches