Merge lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages into lp:~openstack-charmers/charms/trusty/nova-compute-vmware/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 107
Proposed branch: lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages
Merge into: lp:~openstack-charmers/charms/trusty/nova-compute-vmware/next
Diff against target: 3400 lines (+1118/-524)
27 files modified
charm-helpers.yaml (+1/-0)
hooks/charmhelpers/__init__.py (+22/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+16/-7)
hooks/charmhelpers/contrib/network/ip.py (+52/-48)
hooks/charmhelpers/contrib/network/ufw.py (+189/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
hooks/charmhelpers/contrib/openstack/context.py (+292/-232)
hooks/charmhelpers/contrib/openstack/ip.py (+41/-27)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-4)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+2/-2)
hooks/charmhelpers/contrib/openstack/templating.py (+5/-5)
hooks/charmhelpers/contrib/openstack/utils.py (+122/-13)
hooks/charmhelpers/contrib/python/packages.py (+77/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+83/-97)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+4/-4)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+36/-16)
hooks/charmhelpers/core/host.py (+43/-18)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/templating.py (+2/-1)
hooks/charmhelpers/fetch/__init__.py (+13/-11)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+12/-5)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+244330@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charm-helpers.yaml'
--- charm-helpers.yaml 2014-10-23 17:30:13 +0000
+++ charm-helpers.yaml 2014-12-11 17:56:59 +0000
@@ -10,3 +10,4 @@
10 - cluster10 - cluster
11 - payload.execd11 - payload.execd
12 - contrib.network12 - contrib.network
13 - contrib.python.packages
1314
=== added file 'hooks/charmhelpers/__init__.py'
--- hooks/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/__init__.py 2014-12-11 17:56:59 +0000
@@ -0,0 +1,22 @@
1# Bootstrap charm-helpers, installing its dependencies if necessary using
2# only standard libraries.
3import subprocess
4import sys
5
6try:
7 import six # flake8: noqa
8except ImportError:
9 if sys.version_info.major == 2:
10 subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
11 else:
12 subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
13 import six # flake8: noqa
14
15try:
16 import yaml # flake8: noqa
17except ImportError:
18 if sys.version_info.major == 2:
19 subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
20 else:
21 subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
22 import yaml # flake8: noqa
023
=== removed file 'hooks/charmhelpers/__init__.py'
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-12-11 17:56:59 +0000
@@ -13,9 +13,10 @@
1313
14import subprocess14import subprocess
15import os15import os
16
17from socket import gethostname as get_unit_hostname16from socket import gethostname as get_unit_hostname
1817
18import six
19
19from charmhelpers.core.hookenv import (20from charmhelpers.core.hookenv import (
20 log,21 log,
21 relation_ids,22 relation_ids,
@@ -77,7 +78,7 @@
77 "show", resource78 "show", resource
78 ]79 ]
79 try:80 try:
80 status = subprocess.check_output(cmd)81 status = subprocess.check_output(cmd).decode('UTF-8')
81 except subprocess.CalledProcessError:82 except subprocess.CalledProcessError:
82 return False83 return False
83 else:84 else:
@@ -150,34 +151,42 @@
150 return False151 return False
151152
152153
153def determine_api_port(public_port):154def determine_api_port(public_port, singlenode_mode=False):
154 '''155 '''
155 Determine correct API server listening port based on156 Determine correct API server listening port based on
156 existence of HTTPS reverse proxy and/or haproxy.157 existence of HTTPS reverse proxy and/or haproxy.
157158
158 public_port: int: standard public port for given service159 public_port: int: standard public port for given service
159160
161 singlenode_mode: boolean: Shuffle ports when only a single unit is present
162
160 returns: int: the correct listening port for the API service163 returns: int: the correct listening port for the API service
161 '''164 '''
162 i = 0165 i = 0
163 if len(peer_units()) > 0 or is_clustered():166 if singlenode_mode:
167 i += 1
168 elif len(peer_units()) > 0 or is_clustered():
164 i += 1169 i += 1
165 if https():170 if https():
166 i += 1171 i += 1
167 return public_port - (i * 10)172 return public_port - (i * 10)
168173
169174
170def determine_apache_port(public_port):175def determine_apache_port(public_port, singlenode_mode=False):
171 '''176 '''
172 Description: Determine correct apache listening port based on public IP +177 Description: Determine correct apache listening port based on public IP +
173 state of the cluster.178 state of the cluster.
174179
175 public_port: int: standard public port for given service180 public_port: int: standard public port for given service
176181
182 singlenode_mode: boolean: Shuffle ports when only a single unit is present
183
177 returns: int: the correct listening port for the HAProxy service184 returns: int: the correct listening port for the HAProxy service
178 '''185 '''
179 i = 0186 i = 0
180 if len(peer_units()) > 0 or is_clustered():187 if singlenode_mode:
188 i += 1
189 elif len(peer_units()) > 0 or is_clustered():
181 i += 1190 i += 1
182 return public_port - (i * 10)191 return public_port - (i * 10)
183192
@@ -197,7 +206,7 @@
197 for setting in settings:206 for setting in settings:
198 conf[setting] = config_get(setting)207 conf[setting] = config_get(setting)
199 missing = []208 missing = []
200 [missing.append(s) for s, v in conf.iteritems() if v is None]209 [missing.append(s) for s, v in six.iteritems(conf) if v is None]
201 if missing:210 if missing:
202 log('Insufficient config data to configure hacluster.', level=ERROR)211 log('Insufficient config data to configure hacluster.', level=ERROR)
203 raise HAIncompleteConfig212 raise HAIncompleteConfig
204213
=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2014-12-11 17:56:59 +0000
@@ -1,14 +1,12 @@
1import glob1import glob
2import re2import re
3import subprocess3import subprocess
4import sys
54
6from functools import partial5from functools import partial
76
8from charmhelpers.core.hookenv import unit_get7from charmhelpers.core.hookenv import unit_get
9from charmhelpers.fetch import apt_install8from charmhelpers.fetch import apt_install
10from charmhelpers.core.hookenv import (9from charmhelpers.core.hookenv import (
11 ERROR,
12 log10 log
13)11)
1412
@@ -33,31 +31,28 @@
33 network)31 network)
3432
3533
34def no_ip_found_error_out(network):
35 errmsg = ("No IP address found in network: %s" % network)
36 raise ValueError(errmsg)
37
38
36def get_address_in_network(network, fallback=None, fatal=False):39def get_address_in_network(network, fallback=None, fatal=False):
37 """40 """Get an IPv4 or IPv6 address within the network from the host.
38 Get an IPv4 or IPv6 address within the network from the host.
3941
40 :param network (str): CIDR presentation format. For example,42 :param network (str): CIDR presentation format. For example,
41 '192.168.1.0/24'.43 '192.168.1.0/24'.
42 :param fallback (str): If no address is found, return fallback.44 :param fallback (str): If no address is found, return fallback.
43 :param fatal (boolean): If no address is found, fallback is not45 :param fatal (boolean): If no address is found, fallback is not
44 set and fatal is True then exit(1).46 set and fatal is True then exit(1).
45
46 """47 """
47
48 def not_found_error_out():
49 log("No IP address found in network: %s" % network,
50 level=ERROR)
51 sys.exit(1)
52
53 if network is None:48 if network is None:
54 if fallback is not None:49 if fallback is not None:
55 return fallback50 return fallback
51
52 if fatal:
53 no_ip_found_error_out(network)
56 else:54 else:
57 if fatal:55 return None
58 not_found_error_out()
59 else:
60 return None
6156
62 _validate_cidr(network)57 _validate_cidr(network)
63 network = netaddr.IPNetwork(network)58 network = netaddr.IPNetwork(network)
@@ -69,6 +64,7 @@
69 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))64 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
70 if cidr in network:65 if cidr in network:
71 return str(cidr.ip)66 return str(cidr.ip)
67
72 if network.version == 6 and netifaces.AF_INET6 in addresses:68 if network.version == 6 and netifaces.AF_INET6 in addresses:
73 for addr in addresses[netifaces.AF_INET6]:69 for addr in addresses[netifaces.AF_INET6]:
74 if not addr['addr'].startswith('fe80'):70 if not addr['addr'].startswith('fe80'):
@@ -81,20 +77,20 @@
81 return fallback77 return fallback
8278
83 if fatal:79 if fatal:
84 not_found_error_out()80 no_ip_found_error_out(network)
8581
86 return None82 return None
8783
8884
89def is_ipv6(address):85def is_ipv6(address):
90 '''Determine whether provided address is IPv6 or not'''86 """Determine whether provided address is IPv6 or not."""
91 try:87 try:
92 address = netaddr.IPAddress(address)88 address = netaddr.IPAddress(address)
93 except netaddr.AddrFormatError:89 except netaddr.AddrFormatError:
94 # probably a hostname - so not an address at all!90 # probably a hostname - so not an address at all!
95 return False91 return False
96 else:92
97 return address.version == 693 return address.version == 6
9894
9995
100def is_address_in_network(network, address):96def is_address_in_network(network, address):
@@ -112,11 +108,13 @@
112 except (netaddr.core.AddrFormatError, ValueError):108 except (netaddr.core.AddrFormatError, ValueError):
113 raise ValueError("Network (%s) is not in CIDR presentation format" %109 raise ValueError("Network (%s) is not in CIDR presentation format" %
114 network)110 network)
111
115 try:112 try:
116 address = netaddr.IPAddress(address)113 address = netaddr.IPAddress(address)
117 except (netaddr.core.AddrFormatError, ValueError):114 except (netaddr.core.AddrFormatError, ValueError):
118 raise ValueError("Address (%s) is not in correct presentation format" %115 raise ValueError("Address (%s) is not in correct presentation format" %
119 address)116 address)
117
120 if address in network:118 if address in network:
121 return True119 return True
122 else:120 else:
@@ -146,6 +144,7 @@
146 return iface144 return iface
147 else:145 else:
148 return addresses[netifaces.AF_INET][0][key]146 return addresses[netifaces.AF_INET][0][key]
147
149 if address.version == 6 and netifaces.AF_INET6 in addresses:148 if address.version == 6 and netifaces.AF_INET6 in addresses:
150 for addr in addresses[netifaces.AF_INET6]:149 for addr in addresses[netifaces.AF_INET6]:
151 if not addr['addr'].startswith('fe80'):150 if not addr['addr'].startswith('fe80'):
@@ -159,40 +158,42 @@
159 return str(cidr).split('/')[1]158 return str(cidr).split('/')[1]
160 else:159 else:
161 return addr[key]160 return addr[key]
161
162 return None162 return None
163163
164164
165get_iface_for_address = partial(_get_for_address, key='iface')165get_iface_for_address = partial(_get_for_address, key='iface')
166166
167
167get_netmask_for_address = partial(_get_for_address, key='netmask')168get_netmask_for_address = partial(_get_for_address, key='netmask')
168169
169170
170def format_ipv6_addr(address):171def format_ipv6_addr(address):
171 """172 """If address is IPv6, wrap it in '[]' otherwise return None.
172 IPv6 needs to be wrapped with [] in url link to parse correctly.173
174 This is required by most configuration files when specifying IPv6
175 addresses.
173 """176 """
174 if is_ipv6(address):177 if is_ipv6(address):
175 address = "[%s]" % address178 return "[%s]" % address
176 else:
177 address = None
178179
179 return address180 return None
180181
181182
182def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,183def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
183 fatal=True, exc_list=None):184 fatal=True, exc_list=None):
184 """185 """Return the assigned IP address for a given interface, if any."""
185 Return the assigned IP address for a given interface, if any, or [].
186 """
187 # Extract nic if passed /dev/ethX186 # Extract nic if passed /dev/ethX
188 if '/' in iface:187 if '/' in iface:
189 iface = iface.split('/')[-1]188 iface = iface.split('/')[-1]
189
190 if not exc_list:190 if not exc_list:
191 exc_list = []191 exc_list = []
192
192 try:193 try:
193 inet_num = getattr(netifaces, inet_type)194 inet_num = getattr(netifaces, inet_type)
194 except AttributeError:195 except AttributeError:
195 raise Exception('Unknown inet type ' + str(inet_type))196 raise Exception("Unknown inet type '%s'" % str(inet_type))
196197
197 interfaces = netifaces.interfaces()198 interfaces = netifaces.interfaces()
198 if inc_aliases:199 if inc_aliases:
@@ -200,15 +201,18 @@
200 for _iface in interfaces:201 for _iface in interfaces:
201 if iface == _iface or _iface.split(':')[0] == iface:202 if iface == _iface or _iface.split(':')[0] == iface:
202 ifaces.append(_iface)203 ifaces.append(_iface)
204
203 if fatal and not ifaces:205 if fatal and not ifaces:
204 raise Exception("Invalid interface '%s'" % iface)206 raise Exception("Invalid interface '%s'" % iface)
207
205 ifaces.sort()208 ifaces.sort()
206 else:209 else:
207 if iface not in interfaces:210 if iface not in interfaces:
208 if fatal:211 if fatal:
209 raise Exception("%s not found " % (iface))212 raise Exception("Interface '%s' not found " % (iface))
210 else:213 else:
211 return []214 return []
215
212 else:216 else:
213 ifaces = [iface]217 ifaces = [iface]
214218
@@ -219,10 +223,13 @@
219 for entry in net_info[inet_num]:223 for entry in net_info[inet_num]:
220 if 'addr' in entry and entry['addr'] not in exc_list:224 if 'addr' in entry and entry['addr'] not in exc_list:
221 addresses.append(entry['addr'])225 addresses.append(entry['addr'])
226
222 if fatal and not addresses:227 if fatal and not addresses:
223 raise Exception("Interface '%s' doesn't have any %s addresses." %228 raise Exception("Interface '%s' doesn't have any %s addresses." %
224 (iface, inet_type))229 (iface, inet_type))
225 return addresses230
231 return sorted(addresses)
232
226233
227get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')234get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
228235
@@ -239,6 +246,7 @@
239 raw = re.match(ll_key, _addr)246 raw = re.match(ll_key, _addr)
240 if raw:247 if raw:
241 _addr = raw.group(1)248 _addr = raw.group(1)
249
242 if _addr == addr:250 if _addr == addr:
243 log("Address '%s' is configured on iface '%s'" %251 log("Address '%s' is configured on iface '%s'" %
244 (addr, iface))252 (addr, iface))
@@ -249,8 +257,9 @@
249257
250258
251def sniff_iface(f):259def sniff_iface(f):
252 """If no iface provided, inject net iface inferred from unit private260 """Ensure decorated function is called with a value for iface.
253 address.261
262 If no iface provided, inject net iface inferred from unit private address.
254 """263 """
255 def iface_sniffer(*args, **kwargs):264 def iface_sniffer(*args, **kwargs):
256 if not kwargs.get('iface', None):265 if not kwargs.get('iface', None):
@@ -293,7 +302,7 @@
293 if global_addrs:302 if global_addrs:
294 # Make sure any found global addresses are not temporary303 # Make sure any found global addresses are not temporary
295 cmd = ['ip', 'addr', 'show', iface]304 cmd = ['ip', 'addr', 'show', iface]
296 out = subprocess.check_output(cmd)305 out = subprocess.check_output(cmd).decode('UTF-8')
297 if dynamic_only:306 if dynamic_only:
298 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")307 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
299 else:308 else:
@@ -315,33 +324,28 @@
315 return addrs324 return addrs
316325
317 if fatal:326 if fatal:
318 raise Exception("Interface '%s' doesn't have a scope global "327 raise Exception("Interface '%s' does not have a scope global "
319 "non-temporary ipv6 address." % iface)328 "non-temporary ipv6 address." % iface)
320329
321 return []330 return []
322331
323332
324def get_bridges(vnic_dir='/sys/devices/virtual/net'):333def get_bridges(vnic_dir='/sys/devices/virtual/net'):
325 """334 """Return a list of bridges on the system."""
326 Return a list of bridges on the system or []335 b_regex = "%s/*/bridge" % vnic_dir
327 """336 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
328 b_rgex = vnic_dir + '/*/bridge'
329 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
330337
331338
332def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):339def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
333 """340 """Return a list of nics comprising a given bridge on the system."""
334 Return a list of nics comprising a given bridge on the system or []341 brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
335 """342 return [x.split('/')[-1] for x in glob.glob(brif_regex)]
336 brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
337 return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
338343
339344
340def is_bridge_member(nic):345def is_bridge_member(nic):
341 """346 """Check if a given nic is a member of a bridge."""
342 Check if a given nic is a member of a bridge
343 """
344 for bridge in get_bridges():347 for bridge in get_bridges():
345 if nic in get_bridge_nics(bridge):348 if nic in get_bridge_nics(bridge):
346 return True349 return True
350
347 return False351 return False
348352
=== added file 'hooks/charmhelpers/contrib/network/ufw.py'
--- hooks/charmhelpers/contrib/network/ufw.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ufw.py 2014-12-11 17:56:59 +0000
@@ -0,0 +1,189 @@
1"""
2This module contains helpers to add and remove ufw rules.
3
4Examples:
5
6- open SSH port for subnet 10.0.3.0/24:
7
8 >>> from charmhelpers.contrib.network import ufw
9 >>> ufw.enable()
10 >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
11
12- open service by name as defined in /etc/services:
13
14 >>> from charmhelpers.contrib.network import ufw
15 >>> ufw.enable()
16 >>> ufw.service('ssh', 'open')
17
18- close service by port number:
19
20 >>> from charmhelpers.contrib.network import ufw
21 >>> ufw.enable()
22 >>> ufw.service('4949', 'close') # munin
23"""
24
25__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
26
27import re
28import os
29import subprocess
30from charmhelpers.core import hookenv
31
32
33def is_enabled():
34 """
35 Check if `ufw` is enabled
36
37 :returns: True if ufw is enabled
38 """
39 output = subprocess.check_output(['ufw', 'status'],
40 env={'LANG': 'en_US',
41 'PATH': os.environ['PATH']})
42
43 m = re.findall(r'^Status: active\n', output, re.M)
44
45 return len(m) >= 1
46
47
48def enable():
49 """
50 Enable ufw
51
52 :returns: True if ufw is successfully enabled
53 """
54 if is_enabled():
55 return True
56
57 output = subprocess.check_output(['ufw', 'enable'],
58 env={'LANG': 'en_US',
59 'PATH': os.environ['PATH']})
60
61 m = re.findall('^Firewall is active and enabled on system startup\n',
62 output, re.M)
63 hookenv.log(output, level='DEBUG')
64
65 if len(m) == 0:
66 hookenv.log("ufw couldn't be enabled", level='WARN')
67 return False
68 else:
69 hookenv.log("ufw enabled", level='INFO')
70 return True
71
72
73def disable():
74 """
75 Disable ufw
76
77 :returns: True if ufw is successfully disabled
78 """
79 if not is_enabled():
80 return True
81
82 output = subprocess.check_output(['ufw', 'disable'],
83 env={'LANG': 'en_US',
84 'PATH': os.environ['PATH']})
85
86 m = re.findall(r'^Firewall stopped and disabled on system startup\n',
87 output, re.M)
88 hookenv.log(output, level='DEBUG')
89
90 if len(m) == 0:
91 hookenv.log("ufw couldn't be disabled", level='WARN')
92 return False
93 else:
94 hookenv.log("ufw disabled", level='INFO')
95 return True
96
97
98def modify_access(src, dst='any', port=None, proto=None, action='allow'):
99 """
100 Grant access to an address or subnet
101
102 :param src: address (e.g. 192.168.1.234) or subnet
103 (e.g. 192.168.1.0/24).
104 :param dst: destiny of the connection, if the machine has multiple IPs and
105 connections to only one of those have to accepted this is the
106 field has to be set.
107 :param port: destiny port
108 :param proto: protocol (tcp or udp)
109 :param action: `allow` or `delete`
110 """
111 if not is_enabled():
112 hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
113 return
114
115 if action == 'delete':
116 cmd = ['ufw', 'delete', 'allow']
117 else:
118 cmd = ['ufw', action]
119
120 if src is not None:
121 cmd += ['from', src]
122
123 if dst is not None:
124 cmd += ['to', dst]
125
126 if port is not None:
127 cmd += ['port', port]
128
129 if proto is not None:
130 cmd += ['proto', proto]
131
132 hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
133 p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
134 (stdout, stderr) = p.communicate()
135
136 hookenv.log(stdout, level='INFO')
137
138 if p.returncode != 0:
139 hookenv.log(stderr, level='ERROR')
140 hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
141 p.returncode),
142 level='ERROR')
143
144
145def grant_access(src, dst='any', port=None, proto=None):
146 """
147 Grant access to an address or subnet
148
149 :param src: address (e.g. 192.168.1.234) or subnet
150 (e.g. 192.168.1.0/24).
151 :param dst: destiny of the connection, if the machine has multiple IPs and
152 connections to only one of those have to accepted this is the
153 field has to be set.
154 :param port: destiny port
155 :param proto: protocol (tcp or udp)
156 """
157 return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
158
159
160def revoke_access(src, dst='any', port=None, proto=None):
161 """
162 Revoke access to an address or subnet
163
164 :param src: address (e.g. 192.168.1.234) or subnet
165 (e.g. 192.168.1.0/24).
166 :param dst: destiny of the connection, if the machine has multiple IPs and
167 connections to only one of those have to accepted this is the
168 field has to be set.
169 :param port: destiny port
170 :param proto: protocol (tcp or udp)
171 """
172 return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
173
174
175def service(name, action):
176 """
177 Open/close access to a service
178
179 :param name: could be a service name defined in `/etc/services` or a port
180 number.
181 :param action: `open` or `close`
182 """
183 if action == 'open':
184 subprocess.check_output(['ufw', 'allow', name])
185 elif action == 'close':
186 subprocess.check_output(['ufw', 'delete', 'allow', name])
187 else:
188 raise Exception(("'{}' not supported, use 'allow' "
189 "or 'delete'").format(action))
0190
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-12-11 17:56:59 +0000
@@ -1,3 +1,4 @@
1import six
1from charmhelpers.contrib.amulet.deployment import (2from charmhelpers.contrib.amulet.deployment import (
2 AmuletDeployment3 AmuletDeployment
3)4)
@@ -69,7 +70,7 @@
6970
70 def _configure_services(self, configs):71 def _configure_services(self, configs):
71 """Configure all of the services."""72 """Configure all of the services."""
72 for service, config in configs.iteritems():73 for service, config in six.iteritems(configs):
73 self.d.configure(service, config)74 self.d.configure(service, config)
7475
75 def _get_openstack_release(self):76 def _get_openstack_release(self):
7677
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-12-11 17:56:59 +0000
@@ -7,6 +7,8 @@
7import keystoneclient.v2_0 as keystone_client7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client8import novaclient.v1_1.client as nova_client
99
10import six
11
10from charmhelpers.contrib.amulet.utils import (12from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils13 AmuletUtils
12)14)
@@ -60,7 +62,7 @@
60 expected service catalog endpoints.62 expected service catalog endpoints.
61 """63 """
62 self.log.debug('actual: {}'.format(repr(actual)))64 self.log.debug('actual: {}'.format(repr(actual)))
63 for k, v in expected.iteritems():65 for k, v in six.iteritems(expected):
64 if k in actual:66 if k in actual:
65 ret = self._validate_dict_data(expected[k][0], actual[k][0])67 ret = self._validate_dict_data(expected[k][0], actual[k][0])
66 if ret:68 if ret:
6769
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-12-11 17:56:59 +0000
@@ -1,18 +1,15 @@
1import json1import json
2import os2import os
3import time3import time
4
5from base64 import b64decode4from base64 import b64decode
5from subprocess import check_call
66
7from subprocess import (7import six
8 check_call
9)
108
11from charmhelpers.fetch import (9from charmhelpers.fetch import (
12 apt_install,10 apt_install,
13 filter_installed_packages,11 filter_installed_packages,
14)12)
15
16from charmhelpers.core.hookenv import (13from charmhelpers.core.hookenv import (
17 config,14 config,
18 is_relation_made,15 is_relation_made,
@@ -24,44 +21,40 @@
24 relation_set,21 relation_set,
25 unit_get,22 unit_get,
26 unit_private_ip,23 unit_private_ip,
24 DEBUG,
25 INFO,
26 WARNING,
27 ERROR,27 ERROR,
28 DEBUG
29)28)
30
31from charmhelpers.core.host import (29from charmhelpers.core.host import (
32 mkdir,30 mkdir,
33 write_file31 write_file,
34)32)
35
36from charmhelpers.contrib.hahelpers.cluster import (33from charmhelpers.contrib.hahelpers.cluster import (
37 determine_apache_port,34 determine_apache_port,
38 determine_api_port,35 determine_api_port,
39 https,36 https,
40 is_clustered37 is_clustered,
41)38)
42
43from charmhelpers.contrib.hahelpers.apache import (39from charmhelpers.contrib.hahelpers.apache import (
44 get_cert,40 get_cert,
45 get_ca_cert,41 get_ca_cert,
46 install_ca_cert,42 install_ca_cert,
47)43)
48
49from charmhelpers.contrib.openstack.neutron import (44from charmhelpers.contrib.openstack.neutron import (
50 neutron_plugin_attribute,45 neutron_plugin_attribute,
51)46)
52
53from charmhelpers.contrib.network.ip import (47from charmhelpers.contrib.network.ip import (
54 get_address_in_network,48 get_address_in_network,
55 get_ipv6_addr,49 get_ipv6_addr,
56 get_netmask_for_address,50 get_netmask_for_address,
57 format_ipv6_addr,51 format_ipv6_addr,
58 is_address_in_network52 is_address_in_network,
59)53)
54from charmhelpers.contrib.openstack.utils import get_host_ip
6055
61from charmhelpers.contrib.openstack.utils import (
62 get_host_ip,
63)
64CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'56CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
57ADDRESS_TYPES = ['admin', 'internal', 'public']
6558
6659
67class OSContextError(Exception):60class OSContextError(Exception):
@@ -69,7 +62,7 @@
6962
7063
71def ensure_packages(packages):64def ensure_packages(packages):
72 '''Install but do not upgrade required plugin packages'''65 """Install but do not upgrade required plugin packages."""
73 required = filter_installed_packages(packages)66 required = filter_installed_packages(packages)
74 if required:67 if required:
75 apt_install(required, fatal=True)68 apt_install(required, fatal=True)
@@ -77,20 +70,27 @@
7770
78def context_complete(ctxt):71def context_complete(ctxt):
79 _missing = []72 _missing = []
80 for k, v in ctxt.iteritems():73 for k, v in six.iteritems(ctxt):
81 if v is None or v == '':74 if v is None or v == '':
82 _missing.append(k)75 _missing.append(k)
76
83 if _missing:77 if _missing:
84 log('Missing required data: %s' % ' '.join(_missing), level='INFO')78 log('Missing required data: %s' % ' '.join(_missing), level=INFO)
85 return False79 return False
80
86 return True81 return True
8782
8883
89def config_flags_parser(config_flags):84def config_flags_parser(config_flags):
85 """Parses config flags string into dict.
86
87 The provided config_flags string may be a list of comma-separated values
88 which themselves may be comma-separated list of values.
89 """
90 if config_flags.find('==') >= 0:90 if config_flags.find('==') >= 0:
91 log("config_flags is not in expected format (key=value)",91 log("config_flags is not in expected format (key=value)", level=ERROR)
92 level=ERROR)
93 raise OSContextError92 raise OSContextError
93
94 # strip the following from each value.94 # strip the following from each value.
95 post_strippers = ' ,'95 post_strippers = ' ,'
96 # we strip any leading/trailing '=' or ' ' from the string then96 # we strip any leading/trailing '=' or ' ' from the string then
@@ -98,7 +98,7 @@
98 split = config_flags.strip(' =').split('=')98 split = config_flags.strip(' =').split('=')
99 limit = len(split)99 limit = len(split)
100 flags = {}100 flags = {}
101 for i in xrange(0, limit - 1):101 for i in range(0, limit - 1):
102 current = split[i]102 current = split[i]
103 next = split[i + 1]103 next = split[i + 1]
104 vindex = next.rfind(',')104 vindex = next.rfind(',')
@@ -113,17 +113,18 @@
113 # if this not the first entry, expect an embedded key.113 # if this not the first entry, expect an embedded key.
114 index = current.rfind(',')114 index = current.rfind(',')
115 if index < 0:115 if index < 0:
116 log("invalid config value(s) at index %s" % (i),116 log("Invalid config value(s) at index %s" % (i), level=ERROR)
117 level=ERROR)
118 raise OSContextError117 raise OSContextError
119 key = current[index + 1:]118 key = current[index + 1:]
120119
121 # Add to collection.120 # Add to collection.
122 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)121 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
122
123 return flags123 return flags
124124
125125
126class OSContextGenerator(object):126class OSContextGenerator(object):
127 """Base class for all context generators."""
127 interfaces = []128 interfaces = []
128129
129 def __call__(self):130 def __call__(self):
@@ -135,11 +136,11 @@
135136
136 def __init__(self,137 def __init__(self,
137 database=None, user=None, relation_prefix=None, ssl_dir=None):138 database=None, user=None, relation_prefix=None, ssl_dir=None):
138 '''139 """Allows inspecting relation for settings prefixed with
139 Allows inspecting relation for settings prefixed with relation_prefix.140 relation_prefix. This is useful for parsing access for multiple
140 This is useful for parsing access for multiple databases returned via141 databases returned via the shared-db interface (eg, nova_password,
141 the shared-db interface (eg, nova_password, quantum_password)142 quantum_password)
142 '''143 """
143 self.relation_prefix = relation_prefix144 self.relation_prefix = relation_prefix
144 self.database = database145 self.database = database
145 self.user = user146 self.user = user
@@ -149,9 +150,8 @@
149 self.database = self.database or config('database')150 self.database = self.database or config('database')
150 self.user = self.user or config('database-user')151 self.user = self.user or config('database-user')
151 if None in [self.database, self.user]:152 if None in [self.database, self.user]:
152 log('Could not generate shared_db context. '153 log("Could not generate shared_db context. Missing required charm "
153 'Missing required charm config options. '154 "config options. (database name and user)", level=ERROR)
154 '(database name and user)')
155 raise OSContextError155 raise OSContextError
156156
157 ctxt = {}157 ctxt = {}
@@ -204,23 +204,24 @@
204 def __call__(self):204 def __call__(self):
205 self.database = self.database or config('database')205 self.database = self.database or config('database')
206 if self.database is None:206 if self.database is None:
207 log('Could not generate postgresql_db context. '207 log('Could not generate postgresql_db context. Missing required '
208 'Missing required charm config options. '208 'charm config options. (database name)', level=ERROR)
209 '(database name)')
210 raise OSContextError209 raise OSContextError
210
211 ctxt = {}211 ctxt = {}
212
213 for rid in relation_ids(self.interfaces[0]):212 for rid in relation_ids(self.interfaces[0]):
214 for unit in related_units(rid):213 for unit in related_units(rid):
215 ctxt = {214 rel_host = relation_get('host', rid=rid, unit=unit)
216 'database_host': relation_get('host', rid=rid, unit=unit),215 rel_user = relation_get('user', rid=rid, unit=unit)
217 'database': self.database,216 rel_passwd = relation_get('password', rid=rid, unit=unit)
218 'database_user': relation_get('user', rid=rid, unit=unit),217 ctxt = {'database_host': rel_host,
219 'database_password': relation_get('password', rid=rid, unit=unit),218 'database': self.database,
220 'database_type': 'postgresql',219 'database_user': rel_user,
221 }220 'database_password': rel_passwd,
221 'database_type': 'postgresql'}
222 if context_complete(ctxt):222 if context_complete(ctxt):
223 return ctxt223 return ctxt
224
224 return {}225 return {}
225226
226227
@@ -229,23 +230,29 @@
229 ca_path = os.path.join(ssl_dir, 'db-client.ca')230 ca_path = os.path.join(ssl_dir, 'db-client.ca')
230 with open(ca_path, 'w') as fh:231 with open(ca_path, 'w') as fh:
231 fh.write(b64decode(rdata['ssl_ca']))232 fh.write(b64decode(rdata['ssl_ca']))
233
232 ctxt['database_ssl_ca'] = ca_path234 ctxt['database_ssl_ca'] = ca_path
233 elif 'ssl_ca' in rdata:235 elif 'ssl_ca' in rdata:
234 log("Charm not setup for ssl support but ssl ca found")236 log("Charm not setup for ssl support but ssl ca found", level=INFO)
235 return ctxt237 return ctxt
238
236 if 'ssl_cert' in rdata:239 if 'ssl_cert' in rdata:
237 cert_path = os.path.join(240 cert_path = os.path.join(
238 ssl_dir, 'db-client.cert')241 ssl_dir, 'db-client.cert')
239 if not os.path.exists(cert_path):242 if not os.path.exists(cert_path):
240 log("Waiting 1m for ssl client cert validity")243 log("Waiting 1m for ssl client cert validity", level=INFO)
241 time.sleep(60)244 time.sleep(60)
245
242 with open(cert_path, 'w') as fh:246 with open(cert_path, 'w') as fh:
243 fh.write(b64decode(rdata['ssl_cert']))247 fh.write(b64decode(rdata['ssl_cert']))
248
244 ctxt['database_ssl_cert'] = cert_path249 ctxt['database_ssl_cert'] = cert_path
245 key_path = os.path.join(ssl_dir, 'db-client.key')250 key_path = os.path.join(ssl_dir, 'db-client.key')
246 with open(key_path, 'w') as fh:251 with open(key_path, 'w') as fh:
247 fh.write(b64decode(rdata['ssl_key']))252 fh.write(b64decode(rdata['ssl_key']))
253
248 ctxt['database_ssl_key'] = key_path254 ctxt['database_ssl_key'] = key_path
255
249 return ctxt256 return ctxt
250257
251258
@@ -253,9 +260,8 @@
253 interfaces = ['identity-service']260 interfaces = ['identity-service']
254261
255 def __call__(self):262 def __call__(self):
256 log('Generating template context for identity-service')263 log('Generating template context for identity-service', level=DEBUG)
257 ctxt = {}264 ctxt = {}
258
259 for rid in relation_ids('identity-service'):265 for rid in relation_ids('identity-service'):
260 for unit in related_units(rid):266 for unit in related_units(rid):
261 rdata = relation_get(rid=rid, unit=unit)267 rdata = relation_get(rid=rid, unit=unit)
@@ -263,26 +269,24 @@
263 serv_host = format_ipv6_addr(serv_host) or serv_host269 serv_host = format_ipv6_addr(serv_host) or serv_host
264 auth_host = rdata.get('auth_host')270 auth_host = rdata.get('auth_host')
265 auth_host = format_ipv6_addr(auth_host) or auth_host271 auth_host = format_ipv6_addr(auth_host) or auth_host
266272 svc_protocol = rdata.get('service_protocol') or 'http'
267 ctxt = {273 auth_protocol = rdata.get('auth_protocol') or 'http'
268 'service_port': rdata.get('service_port'),274 ctxt = {'service_port': rdata.get('service_port'),
269 'service_host': serv_host,275 'service_host': serv_host,
270 'auth_host': auth_host,276 'auth_host': auth_host,
271 'auth_port': rdata.get('auth_port'),277 'auth_port': rdata.get('auth_port'),
272 'admin_tenant_name': rdata.get('service_tenant'),278 'admin_tenant_name': rdata.get('service_tenant'),
273 'admin_user': rdata.get('service_username'),279 'admin_user': rdata.get('service_username'),
274 'admin_password': rdata.get('service_password'),280 'admin_password': rdata.get('service_password'),
275 'service_protocol':281 'service_protocol': svc_protocol,
276 rdata.get('service_protocol') or 'http',282 'auth_protocol': auth_protocol}
277 'auth_protocol':
278 rdata.get('auth_protocol') or 'http',
279 }
280 if context_complete(ctxt):283 if context_complete(ctxt):
281 # NOTE(jamespage) this is required for >= icehouse284 # NOTE(jamespage) this is required for >= icehouse
282 # so a missing value just indicates keystone needs285 # so a missing value just indicates keystone needs
283 # upgrading286 # upgrading
284 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')287 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
285 return ctxt288 return ctxt
289
286 return {}290 return {}
287291
288292
@@ -295,21 +299,23 @@
295 self.interfaces = [rel_name]299 self.interfaces = [rel_name]
296300
297 def __call__(self):301 def __call__(self):
298 log('Generating template context for amqp')302 log('Generating template context for amqp', level=DEBUG)
299 conf = config()303 conf = config()
300 user_setting = 'rabbit-user'
301 vhost_setting = 'rabbit-vhost'
302 if self.relation_prefix:304 if self.relation_prefix:
303 user_setting = self.relation_prefix + '-rabbit-user'305 user_setting = '%s-rabbit-user' % (self.relation_prefix)
304 vhost_setting = self.relation_prefix + '-rabbit-vhost'306 vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
307 else:
308 user_setting = 'rabbit-user'
309 vhost_setting = 'rabbit-vhost'
305310
306 try:311 try:
307 username = conf[user_setting]312 username = conf[user_setting]
308 vhost = conf[vhost_setting]313 vhost = conf[vhost_setting]
309 except KeyError as e:314 except KeyError as e:
310 log('Could not generate shared_db context. '315 log('Could not generate shared_db context. Missing required charm '
311 'Missing required charm config options: %s.' % e)316 'config options: %s.' % e, level=ERROR)
312 raise OSContextError317 raise OSContextError
318
313 ctxt = {}319 ctxt = {}
314 for rid in relation_ids(self.rel_name):320 for rid in relation_ids(self.rel_name):
315 ha_vip_only = False321 ha_vip_only = False
@@ -323,6 +329,7 @@
323 host = relation_get('private-address', rid=rid, unit=unit)329 host = relation_get('private-address', rid=rid, unit=unit)
324 host = format_ipv6_addr(host) or host330 host = format_ipv6_addr(host) or host
325 ctxt['rabbitmq_host'] = host331 ctxt['rabbitmq_host'] = host
332
326 ctxt.update({333 ctxt.update({
327 'rabbitmq_user': username,334 'rabbitmq_user': username,
328 'rabbitmq_password': relation_get('password', rid=rid,335 'rabbitmq_password': relation_get('password', rid=rid,
@@ -333,6 +340,7 @@
333 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)340 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
334 if ssl_port:341 if ssl_port:
335 ctxt['rabbit_ssl_port'] = ssl_port342 ctxt['rabbit_ssl_port'] = ssl_port
343
336 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)344 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
337 if ssl_ca:345 if ssl_ca:
338 ctxt['rabbit_ssl_ca'] = ssl_ca346 ctxt['rabbit_ssl_ca'] = ssl_ca
@@ -346,41 +354,45 @@
346 if context_complete(ctxt):354 if context_complete(ctxt):
347 if 'rabbit_ssl_ca' in ctxt:355 if 'rabbit_ssl_ca' in ctxt:
348 if not self.ssl_dir:356 if not self.ssl_dir:
349 log(("Charm not setup for ssl support "357 log("Charm not setup for ssl support but ssl ca "
350 "but ssl ca found"))358 "found", level=INFO)
351 break359 break
360
352 ca_path = os.path.join(361 ca_path = os.path.join(
353 self.ssl_dir, 'rabbit-client-ca.pem')362 self.ssl_dir, 'rabbit-client-ca.pem')
354 with open(ca_path, 'w') as fh:363 with open(ca_path, 'w') as fh:
355 fh.write(b64decode(ctxt['rabbit_ssl_ca']))364 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
356 ctxt['rabbit_ssl_ca'] = ca_path365 ctxt['rabbit_ssl_ca'] = ca_path
366
357 # Sufficient information found = break out!367 # Sufficient information found = break out!
358 break368 break
369
359 # Used for active/active rabbitmq >= grizzly370 # Used for active/active rabbitmq >= grizzly
360 if ('clustered' not in ctxt or ha_vip_only) \371 if (('clustered' not in ctxt or ha_vip_only) and
361 and len(related_units(rid)) > 1:372 len(related_units(rid)) > 1):
362 rabbitmq_hosts = []373 rabbitmq_hosts = []
363 for unit in related_units(rid):374 for unit in related_units(rid):
364 host = relation_get('private-address', rid=rid, unit=unit)375 host = relation_get('private-address', rid=rid, unit=unit)
365 host = format_ipv6_addr(host) or host376 host = format_ipv6_addr(host) or host
366 rabbitmq_hosts.append(host)377 rabbitmq_hosts.append(host)
367 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)378
379 ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
380
368 if not context_complete(ctxt):381 if not context_complete(ctxt):
369 return {}382 return {}
370 else:383
371 return ctxt384 return ctxt
372385
373386
374class CephContext(OSContextGenerator):387class CephContext(OSContextGenerator):
388 """Generates context for /etc/ceph/ceph.conf templates."""
375 interfaces = ['ceph']389 interfaces = ['ceph']
376390
377 def __call__(self):391 def __call__(self):
378 '''This generates context for /etc/ceph/ceph.conf templates'''
379 if not relation_ids('ceph'):392 if not relation_ids('ceph'):
380 return {}393 return {}
381394
382 log('Generating template context for ceph')395 log('Generating template context for ceph', level=DEBUG)
383
384 mon_hosts = []396 mon_hosts = []
385 auth = None397 auth = None
386 key = None398 key = None
@@ -389,18 +401,18 @@
389 for unit in related_units(rid):401 for unit in related_units(rid):
390 auth = relation_get('auth', rid=rid, unit=unit)402 auth = relation_get('auth', rid=rid, unit=unit)
391 key = relation_get('key', rid=rid, unit=unit)403 key = relation_get('key', rid=rid, unit=unit)
392 ceph_addr = \404 ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
393 relation_get('ceph-public-address', rid=rid, unit=unit) or \405 unit=unit)
394 relation_get('private-address', rid=rid, unit=unit)406 unit_priv_addr = relation_get('private-address', rid=rid,
407 unit=unit)
408 ceph_addr = ceph_pub_addr or unit_priv_addr
395 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr409 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
396 mon_hosts.append(ceph_addr)410 mon_hosts.append(ceph_addr)
397411
398 ctxt = {412 ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
399 'mon_hosts': ' '.join(mon_hosts),413 'auth': auth,
400 'auth': auth,414 'key': key,
401 'key': key,415 'use_syslog': use_syslog}
402 'use_syslog': use_syslog
403 }
404416
405 if not os.path.isdir('/etc/ceph'):417 if not os.path.isdir('/etc/ceph'):
406 os.mkdir('/etc/ceph')418 os.mkdir('/etc/ceph')
@@ -409,79 +421,68 @@
409 return {}421 return {}
410422
411 ensure_packages(['ceph-common'])423 ensure_packages(['ceph-common'])
412
413 return ctxt424 return ctxt
414425
415426
416ADDRESS_TYPES = ['admin', 'internal', 'public']
417
418
419class HAProxyContext(OSContextGenerator):427class HAProxyContext(OSContextGenerator):
428 """Provides half a context for the haproxy template, which describes
429 all peers to be included in the cluster. Each charm needs to include
430 its own context generator that describes the port mapping.
431 """
420 interfaces = ['cluster']432 interfaces = ['cluster']
421433
434 def __init__(self, singlenode_mode=False):
435 self.singlenode_mode = singlenode_mode
436
422 def __call__(self):437 def __call__(self):
423 '''438 if not relation_ids('cluster') and not self.singlenode_mode:
424 Builds half a context for the haproxy template, which describes
425 all peers to be included in the cluster. Each charm needs to include
426 its own context generator that describes the port mapping.
427 '''
428 if not relation_ids('cluster'):
429 return {}439 return {}
430440
431 l_unit = local_unit().replace('/', '-')
432
433 if config('prefer-ipv6'):441 if config('prefer-ipv6'):
434 addr = get_ipv6_addr(exc_list=[config('vip')])[0]442 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
435 else:443 else:
436 addr = get_host_ip(unit_get('private-address'))444 addr = get_host_ip(unit_get('private-address'))
437445
446 l_unit = local_unit().replace('/', '-')
438 cluster_hosts = {}447 cluster_hosts = {}
439448
440 # NOTE(jamespage): build out map of configured network endpoints449 # NOTE(jamespage): build out map of configured network endpoints
441 # and associated backends450 # and associated backends
442 for addr_type in ADDRESS_TYPES:451 for addr_type in ADDRESS_TYPES:
443 laddr = get_address_in_network(452 cfg_opt = 'os-{}-network'.format(addr_type)
444 config('os-{}-network'.format(addr_type)))453 laddr = get_address_in_network(config(cfg_opt))
445 if laddr:454 if laddr:
446 cluster_hosts[laddr] = {}455 netmask = get_netmask_for_address(laddr)
447 cluster_hosts[laddr]['network'] = "{}/{}".format(456 cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
448 laddr,457 netmask),
449 get_netmask_for_address(laddr)458 'backends': {l_unit: laddr}}
450 )
451 cluster_hosts[laddr]['backends'] = {}
452 cluster_hosts[laddr]['backends'][l_unit] = laddr
453 for rid in relation_ids('cluster'):459 for rid in relation_ids('cluster'):
454 for unit in related_units(rid):460 for unit in related_units(rid):
455 _unit = unit.replace('/', '-')
456 _laddr = relation_get('{}-address'.format(addr_type),461 _laddr = relation_get('{}-address'.format(addr_type),
457 rid=rid, unit=unit)462 rid=rid, unit=unit)
458 if _laddr:463 if _laddr:
464 _unit = unit.replace('/', '-')
459 cluster_hosts[laddr]['backends'][_unit] = _laddr465 cluster_hosts[laddr]['backends'][_unit] = _laddr
460466
461 # NOTE(jamespage) no split configurations found, just use467 # NOTE(jamespage) no split configurations found, just use
462 # private addresses468 # private addresses
463 if not cluster_hosts:469 if not cluster_hosts:
464 cluster_hosts[addr] = {}470 netmask = get_netmask_for_address(addr)
465 cluster_hosts[addr]['network'] = "{}/{}".format(471 cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
466 addr,472 'backends': {l_unit: addr}}
467 get_netmask_for_address(addr)
468 )
469 cluster_hosts[addr]['backends'] = {}
470 cluster_hosts[addr]['backends'][l_unit] = addr
471 for rid in relation_ids('cluster'):473 for rid in relation_ids('cluster'):
472 for unit in related_units(rid):474 for unit in related_units(rid):
473 _unit = unit.replace('/', '-')
474 _laddr = relation_get('private-address',475 _laddr = relation_get('private-address',
475 rid=rid, unit=unit)476 rid=rid, unit=unit)
476 if _laddr:477 if _laddr:
478 _unit = unit.replace('/', '-')
477 cluster_hosts[addr]['backends'][_unit] = _laddr479 cluster_hosts[addr]['backends'][_unit] = _laddr
478480
479 ctxt = {481 ctxt = {'frontends': cluster_hosts}
480 'frontends': cluster_hosts,
481 }
482482
483 if config('haproxy-server-timeout'):483 if config('haproxy-server-timeout'):
484 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')484 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
485
485 if config('haproxy-client-timeout'):486 if config('haproxy-client-timeout'):
486 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')487 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
487488
@@ -495,13 +496,18 @@
495 ctxt['stat_port'] = ':8888'496 ctxt['stat_port'] = ':8888'
496497
497 for frontend in cluster_hosts:498 for frontend in cluster_hosts:
498 if len(cluster_hosts[frontend]['backends']) > 1:499 if (len(cluster_hosts[frontend]['backends']) > 1 or
500 self.singlenode_mode):
499 # Enable haproxy when we have enough peers.501 # Enable haproxy when we have enough peers.
500 log('Ensuring haproxy enabled in /etc/default/haproxy.')502 log('Ensuring haproxy enabled in /etc/default/haproxy.',
503 level=DEBUG)
501 with open('/etc/default/haproxy', 'w') as out:504 with open('/etc/default/haproxy', 'w') as out:
502 out.write('ENABLED=1\n')505 out.write('ENABLED=1\n')
506
503 return ctxt507 return ctxt
504 log('HAProxy context is incomplete, this unit has no peers.')508
509 log('HAProxy context is incomplete, this unit has no peers.',
510 level=INFO)
505 return {}511 return {}
506512
507513
@@ -509,29 +515,28 @@
509 interfaces = ['image-service']515 interfaces = ['image-service']
510516
511 def __call__(self):517 def __call__(self):
512 '''518 """Obtains the glance API server from the image-service relation.
513 Obtains the glance API server from the image-service relation. Useful519 Useful in nova and cinder (currently).
514 in nova and cinder (currently).520 """
515 '''521 log('Generating template context for image-service.', level=DEBUG)
516 log('Generating template context for image-service.')
517 rids = relation_ids('image-service')522 rids = relation_ids('image-service')
518 if not rids:523 if not rids:
519 return {}524 return {}
525
520 for rid in rids:526 for rid in rids:
521 for unit in related_units(rid):527 for unit in related_units(rid):
522 api_server = relation_get('glance-api-server',528 api_server = relation_get('glance-api-server',
523 rid=rid, unit=unit)529 rid=rid, unit=unit)
524 if api_server:530 if api_server:
525 return {'glance_api_servers': api_server}531 return {'glance_api_servers': api_server}
526 log('ImageService context is incomplete. '532
527 'Missing required relation data.')533 log("ImageService context is incomplete. Missing required relation "
534 "data.", level=INFO)
528 return {}535 return {}
529536
530537
531class ApacheSSLContext(OSContextGenerator):538class ApacheSSLContext(OSContextGenerator):
532539 """Generates a context for an apache vhost configuration that configures
533 """
534 Generates a context for an apache vhost configuration that configures
535 HTTPS reverse proxying for one or many endpoints. Generated context540 HTTPS reverse proxying for one or many endpoints. Generated context
536 looks something like::541 looks something like::
537542
@@ -565,6 +570,7 @@
565 else:570 else:
566 cert_filename = 'cert'571 cert_filename = 'cert'
567 key_filename = 'key'572 key_filename = 'key'
573
568 write_file(path=os.path.join(ssl_dir, cert_filename),574 write_file(path=os.path.join(ssl_dir, cert_filename),
569 content=b64decode(cert))575 content=b64decode(cert))
570 write_file(path=os.path.join(ssl_dir, key_filename),576 write_file(path=os.path.join(ssl_dir, key_filename),
@@ -576,7 +582,8 @@
576 install_ca_cert(b64decode(ca_cert))582 install_ca_cert(b64decode(ca_cert))
577583
578 def canonical_names(self):584 def canonical_names(self):
579 '''Figure out which canonical names clients will access this service'''585 """Figure out which canonical names clients will access this service.
586 """
580 cns = []587 cns = []
581 for r_id in relation_ids('identity-service'):588 for r_id in relation_ids('identity-service'):
582 for unit in related_units(r_id):589 for unit in related_units(r_id):
@@ -584,55 +591,80 @@
584 for k in rdata:591 for k in rdata:
585 if k.startswith('ssl_key_'):592 if k.startswith('ssl_key_'):
586 cns.append(k.lstrip('ssl_key_'))593 cns.append(k.lstrip('ssl_key_'))
587 return list(set(cns))594
595 return sorted(list(set(cns)))
596
597 def get_network_addresses(self):
598 """For each network configured, return corresponding address and vip
599 (if available).
600
601 Returns a list of tuples of the form:
602
603 [(address_in_net_a, vip_in_net_a),
604 (address_in_net_b, vip_in_net_b),
605 ...]
606
607 or, if no vip(s) available:
608
609 [(address_in_net_a, address_in_net_a),
610 (address_in_net_b, address_in_net_b),
611 ...]
612 """
613 addresses = []
614 if config('vip'):
615 vips = config('vip').split()
616 else:
617 vips = []
618
619 for net_type in ['os-internal-network', 'os-admin-network',
620 'os-public-network']:
621 addr = get_address_in_network(config(net_type),
622 unit_get('private-address'))
623 if len(vips) > 1 and is_clustered():
624 if not config(net_type):
625 log("Multiple networks configured but net_type "
626 "is None (%s)." % net_type, level=WARNING)
627 continue
628
629 for vip in vips:
630 if is_address_in_network(config(net_type), vip):
631 addresses.append((addr, vip))
632 break
633
634 elif is_clustered() and config('vip'):
635 addresses.append((addr, config('vip')))
636 else:
637 addresses.append((addr, addr))
638
639 return sorted(addresses)
588640
589 def __call__(self):641 def __call__(self):
590 if isinstance(self.external_ports, basestring):642 if isinstance(self.external_ports, six.string_types):
591 self.external_ports = [self.external_ports]643 self.external_ports = [self.external_ports]
592 if (not self.external_ports or not https()):644
645 if not self.external_ports or not https():
593 return {}646 return {}
594647
595 self.configure_ca()648 self.configure_ca()
596 self.enable_modules()649 self.enable_modules()
597650
598 ctxt = {651 ctxt = {'namespace': self.service_namespace,
599 'namespace': self.service_namespace,652 'endpoints': [],
600 'endpoints': [],653 'ext_ports': []}
601 'ext_ports': []
602 }
603654
604 for cn in self.canonical_names():655 for cn in self.canonical_names():
605 self.configure_cert(cn)656 self.configure_cert(cn)
606657
607 addresses = []658 addresses = self.get_network_addresses()
608 vips = []659 for address, endpoint in sorted(set(addresses)):
609 if config('vip'):
610 vips = config('vip').split()
611
612 for network_type in ['os-internal-network',
613 'os-admin-network',
614 'os-public-network']:
615 address = get_address_in_network(config(network_type),
616 unit_get('private-address'))
617 if len(vips) > 0 and is_clustered():
618 for vip in vips:
619 if is_address_in_network(config(network_type),
620 vip):
621 addresses.append((address, vip))
622 break
623 elif is_clustered():
624 addresses.append((address, config('vip')))
625 else:
626 addresses.append((address, address))
627
628 for address, endpoint in set(addresses):
629 for api_port in self.external_ports:660 for api_port in self.external_ports:
630 ext_port = determine_apache_port(api_port)661 ext_port = determine_apache_port(api_port)
631 int_port = determine_api_port(api_port)662 int_port = determine_api_port(api_port)
632 portmap = (address, endpoint, int(ext_port), int(int_port))663 portmap = (address, endpoint, int(ext_port), int(int_port))
633 ctxt['endpoints'].append(portmap)664 ctxt['endpoints'].append(portmap)
634 ctxt['ext_ports'].append(int(ext_port))665 ctxt['ext_ports'].append(int(ext_port))
635 ctxt['ext_ports'] = list(set(ctxt['ext_ports']))666
667 ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
636 return ctxt668 return ctxt
637669
638670
@@ -649,21 +681,23 @@
649681
650 @property682 @property
651 def packages(self):683 def packages(self):
652 return neutron_plugin_attribute(684 return neutron_plugin_attribute(self.plugin, 'packages',
653 self.plugin, 'packages', self.network_manager)685 self.network_manager)
654686
655 @property687 @property
656 def neutron_security_groups(self):688 def neutron_security_groups(self):
657 return None689 return None
658690
659 def _ensure_packages(self):691 def _ensure_packages(self):
660 [ensure_packages(pkgs) for pkgs in self.packages]692 for pkgs in self.packages:
693 ensure_packages(pkgs)
661694
662 def _save_flag_file(self):695 def _save_flag_file(self):
663 if self.network_manager == 'quantum':696 if self.network_manager == 'quantum':
664 _file = '/etc/nova/quantum_plugin.conf'697 _file = '/etc/nova/quantum_plugin.conf'
665 else:698 else:
666 _file = '/etc/nova/neutron_plugin.conf'699 _file = '/etc/nova/neutron_plugin.conf'
700
667 with open(_file, 'wb') as out:701 with open(_file, 'wb') as out:
668 out.write(self.plugin + '\n')702 out.write(self.plugin + '\n')
669703
@@ -672,13 +706,11 @@
672 self.network_manager)706 self.network_manager)
673 config = neutron_plugin_attribute(self.plugin, 'config',707 config = neutron_plugin_attribute(self.plugin, 'config',
674 self.network_manager)708 self.network_manager)
675 ovs_ctxt = {709 ovs_ctxt = {'core_plugin': driver,
676 'core_plugin': driver,710 'neutron_plugin': 'ovs',
677 'neutron_plugin': 'ovs',711 'neutron_security_groups': self.neutron_security_groups,
678 'neutron_security_groups': self.neutron_security_groups,712 'local_ip': unit_private_ip(),
679 'local_ip': unit_private_ip(),713 'config': config}
680 'config': config
681 }
682714
683 return ovs_ctxt715 return ovs_ctxt
684716
@@ -687,13 +719,11 @@
687 self.network_manager)719 self.network_manager)
688 config = neutron_plugin_attribute(self.plugin, 'config',720 config = neutron_plugin_attribute(self.plugin, 'config',
689 self.network_manager)721 self.network_manager)
690 nvp_ctxt = {722 nvp_ctxt = {'core_plugin': driver,
691 'core_plugin': driver,723 'neutron_plugin': 'nvp',
692 'neutron_plugin': 'nvp',724 'neutron_security_groups': self.neutron_security_groups,
693 'neutron_security_groups': self.neutron_security_groups,725 'local_ip': unit_private_ip(),
694 'local_ip': unit_private_ip(),726 'config': config}
695 'config': config
696 }
697727
698 return nvp_ctxt728 return nvp_ctxt
699729
@@ -702,35 +732,50 @@
702 self.network_manager)732 self.network_manager)
703 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',733 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
704 self.network_manager)734 self.network_manager)
705 n1kv_ctxt = {735 n1kv_user_config_flags = config('n1kv-config-flags')
706 'core_plugin': driver,736 restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
707 'neutron_plugin': 'n1kv',737 n1kv_ctxt = {'core_plugin': driver,
708 'neutron_security_groups': self.neutron_security_groups,738 'neutron_plugin': 'n1kv',
709 'local_ip': unit_private_ip(),739 'neutron_security_groups': self.neutron_security_groups,
710 'config': n1kv_config,740 'local_ip': unit_private_ip(),
711 'vsm_ip': config('n1kv-vsm-ip'),741 'config': n1kv_config,
712 'vsm_username': config('n1kv-vsm-username'),742 'vsm_ip': config('n1kv-vsm-ip'),
713 'vsm_password': config('n1kv-vsm-password'),743 'vsm_username': config('n1kv-vsm-username'),
714 'restrict_policy_profiles': config(744 'vsm_password': config('n1kv-vsm-password'),
715 'n1kv_restrict_policy_profiles'),745 'restrict_policy_profiles': restrict_policy_profiles}
716 }746
747 if n1kv_user_config_flags:
748 flags = config_flags_parser(n1kv_user_config_flags)
749 n1kv_ctxt['user_config_flags'] = flags
717750
718 return n1kv_ctxt751 return n1kv_ctxt
719752
753 def calico_ctxt(self):
754 driver = neutron_plugin_attribute(self.plugin, 'driver',
755 self.network_manager)
756 config = neutron_plugin_attribute(self.plugin, 'config',
757 self.network_manager)
758 calico_ctxt = {'core_plugin': driver,
759 'neutron_plugin': 'Calico',
760 'neutron_security_groups': self.neutron_security_groups,
761 'local_ip': unit_private_ip(),
762 'config': config}
763
764 return calico_ctxt
765
720 def neutron_ctxt(self):766 def neutron_ctxt(self):
721 if https():767 if https():
722 proto = 'https'768 proto = 'https'
723 else:769 else:
724 proto = 'http'770 proto = 'http'
771
725 if is_clustered():772 if is_clustered():
726 host = config('vip')773 host = config('vip')
727 else:774 else:
728 host = unit_get('private-address')775 host = unit_get('private-address')
729 url = '%s://%s:%s' % (proto, host, '9696')776
730 ctxt = {777 ctxt = {'network_manager': self.network_manager,
731 'network_manager': self.network_manager,778 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
732 'neutron_url': url,
733 }
734 return ctxt779 return ctxt
735780
736 def __call__(self):781 def __call__(self):
@@ -750,6 +795,8 @@
750 ctxt.update(self.nvp_ctxt())795 ctxt.update(self.nvp_ctxt())
751 elif self.plugin == 'n1kv':796 elif self.plugin == 'n1kv':
752 ctxt.update(self.n1kv_ctxt())797 ctxt.update(self.n1kv_ctxt())
798 elif self.plugin == 'Calico':
799 ctxt.update(self.calico_ctxt())
753800
754 alchemy_flags = config('neutron-alchemy-flags')801 alchemy_flags = config('neutron-alchemy-flags')
755 if alchemy_flags:802 if alchemy_flags:
@@ -761,23 +808,40 @@
761808
762809
763class OSConfigFlagContext(OSContextGenerator):810class OSConfigFlagContext(OSContextGenerator):
764811 """Provides support for user-defined config flags.
765 """812
766 Responsible for adding user-defined config-flags in charm config to a813 Users can define a comma-seperated list of key=value pairs
767 template context.814 in the charm configuration and apply them at any point in
815 any file by using a template flag.
816
817 Sometimes users might want config flags inserted within a
818 specific section so this class allows users to specify the
819 template flag name, allowing for multiple template flags
820 (sections) within the same context.
768821
769 NOTE: the value of config-flags may be a comma-separated list of822 NOTE: the value of config-flags may be a comma-separated list of
770 key=value pairs and some Openstack config files support823 key=value pairs and some Openstack config files support
771 comma-separated lists as values.824 comma-separated lists as values.
772 """825 """
773826
827 def __init__(self, charm_flag='config-flags',
828 template_flag='user_config_flags'):
829 """
830 :param charm_flag: config flags in charm configuration.
831 :param template_flag: insert point for user-defined flags in template
832 file.
833 """
834 super(OSConfigFlagContext, self).__init__()
835 self._charm_flag = charm_flag
836 self._template_flag = template_flag
837
774 def __call__(self):838 def __call__(self):
775 config_flags = config('config-flags')839 config_flags = config(self._charm_flag)
776 if not config_flags:840 if not config_flags:
777 return {}841 return {}
778842
779 flags = config_flags_parser(config_flags)843 return {self._template_flag:
780 return {'user_config_flags': flags}844 config_flags_parser(config_flags)}
781845
782846
783class SubordinateConfigContext(OSContextGenerator):847class SubordinateConfigContext(OSContextGenerator):
@@ -821,7 +885,6 @@
821 },885 },
822 }886 }
823 }887 }
824
825 """888 """
826889
827 def __init__(self, service, config_file, interface):890 def __init__(self, service, config_file, interface):
@@ -851,26 +914,28 @@
851914
852 if self.service not in sub_config:915 if self.service not in sub_config:
853 log('Found subordinate_config on %s but it contained'916 log('Found subordinate_config on %s but it contained'
854 'nothing for %s service' % (rid, self.service))917 'nothing for %s service' % (rid, self.service),
918 level=INFO)
855 continue919 continue
856920
857 sub_config = sub_config[self.service]921 sub_config = sub_config[self.service]
858 if self.config_file not in sub_config:922 if self.config_file not in sub_config:
859 log('Found subordinate_config on %s but it contained'923 log('Found subordinate_config on %s but it contained'
860 'nothing for %s' % (rid, self.config_file))924 'nothing for %s' % (rid, self.config_file),
925 level=INFO)
861 continue926 continue
862927
863 sub_config = sub_config[self.config_file]928 sub_config = sub_config[self.config_file]
864 for k, v in sub_config.iteritems():929 for k, v in six.iteritems(sub_config):
865 if k == 'sections':930 if k == 'sections':
866 for section, config_dict in v.iteritems():931 for section, config_dict in six.iteritems(v):
867 log("adding section '%s'" % (section))932 log("adding section '%s'" % (section),
933 level=DEBUG)
868 ctxt[k][section] = config_dict934 ctxt[k][section] = config_dict
869 else:935 else:
870 ctxt[k] = v936 ctxt[k] = v
871937
872 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)938 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
873
874 return ctxt939 return ctxt
875940
876941
@@ -882,15 +947,14 @@
882 False if config('debug') is None else config('debug')947 False if config('debug') is None else config('debug')
883 ctxt['verbose'] = \948 ctxt['verbose'] = \
884 False if config('verbose') is None else config('verbose')949 False if config('verbose') is None else config('verbose')
950
885 return ctxt951 return ctxt
886952
887953
888class SyslogContext(OSContextGenerator):954class SyslogContext(OSContextGenerator):
889955
890 def __call__(self):956 def __call__(self):
891 ctxt = {957 ctxt = {'use_syslog': config('use-syslog')}
892 'use_syslog': config('use-syslog')
893 }
894 return ctxt958 return ctxt
895959
896960
@@ -898,13 +962,9 @@
898962
899 def __call__(self):963 def __call__(self):
900 if config('prefer-ipv6'):964 if config('prefer-ipv6'):
901 return {965 return {'bind_host': '::'}
902 'bind_host': '::'
903 }
904 else:966 else:
905 return {967 return {'bind_host': '0.0.0.0'}
906 'bind_host': '0.0.0.0'
907 }
908968
909969
910class WorkerConfigContext(OSContextGenerator):970class WorkerConfigContext(OSContextGenerator):
@@ -916,13 +976,12 @@
916 except ImportError:976 except ImportError:
917 apt_install('python-psutil', fatal=True)977 apt_install('python-psutil', fatal=True)
918 from psutil import NUM_CPUS978 from psutil import NUM_CPUS
979
919 return NUM_CPUS980 return NUM_CPUS
920981
921 def __call__(self):982 def __call__(self):
922 multiplier = config('worker-multiplier') or 1983 multiplier = config('worker-multiplier') or 0
923 ctxt = {984 ctxt = {"workers": self.num_cpus * multiplier}
924 "workers": self.num_cpus * multiplier
925 }
926 return ctxt985 return ctxt
927986
928987
@@ -936,22 +995,23 @@
936 for unit in related_units(rid):995 for unit in related_units(rid):
937 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)996 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
938 ctxt['zmq_host'] = relation_get('host', unit, rid)997 ctxt['zmq_host'] = relation_get('host', unit, rid)
998
939 return ctxt999 return ctxt
9401000
9411001
942class NotificationDriverContext(OSContextGenerator):1002class NotificationDriverContext(OSContextGenerator):
9431003
944 def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'):1004 def __init__(self, zmq_relation='zeromq-configuration',
1005 amqp_relation='amqp'):
945 """1006 """
946 :param zmq_relation : Name of Zeromq relation to check1007 :param zmq_relation: Name of Zeromq relation to check
947 """1008 """
948 self.zmq_relation = zmq_relation1009 self.zmq_relation = zmq_relation
949 self.amqp_relation = amqp_relation1010 self.amqp_relation = amqp_relation
9501011
951 def __call__(self):1012 def __call__(self):
952 ctxt = {1013 ctxt = {'notifications': 'False'}
953 'notifications': 'False',
954 }
955 if is_relation_made(self.amqp_relation):1014 if is_relation_made(self.amqp_relation):
956 ctxt['notifications'] = "True"1015 ctxt['notifications'] = "True"
1016
957 return ctxt1017 return ctxt
9581018
=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-12-11 17:56:59 +0000
@@ -2,21 +2,19 @@
2 config,2 config,
3 unit_get,3 unit_get,
4)4)
5
6from charmhelpers.contrib.network.ip import (5from charmhelpers.contrib.network.ip import (
7 get_address_in_network,6 get_address_in_network,
8 is_address_in_network,7 is_address_in_network,
9 is_ipv6,8 is_ipv6,
10 get_ipv6_addr,9 get_ipv6_addr,
11)10)
12
13from charmhelpers.contrib.hahelpers.cluster import is_clustered11from charmhelpers.contrib.hahelpers.cluster import is_clustered
1412
15PUBLIC = 'public'13PUBLIC = 'public'
16INTERNAL = 'int'14INTERNAL = 'int'
17ADMIN = 'admin'15ADMIN = 'admin'
1816
19_address_map = {17ADDRESS_MAP = {
20 PUBLIC: {18 PUBLIC: {
21 'config': 'os-public-network',19 'config': 'os-public-network',
22 'fallback': 'public-address'20 'fallback': 'public-address'
@@ -33,16 +31,14 @@
3331
3432
35def canonical_url(configs, endpoint_type=PUBLIC):33def canonical_url(configs, endpoint_type=PUBLIC):
36 '''34 """Returns the correct HTTP URL to this host given the state of HTTPS
37 Returns the correct HTTP URL to this host given the state of HTTPS
38 configuration, hacluster and charm configuration.35 configuration, hacluster and charm configuration.
3936
40 :configs OSTemplateRenderer: A config tempating object to inspect for37 :param configs: OSTemplateRenderer config templating object to inspect
41 a complete https context.38 for a complete https context.
42 :endpoint_type str: The endpoint type to resolve.39 :param endpoint_type: str endpoint type to resolve.
4340 :param returns: str base URL for services on the current service unit.
44 :returns str: Base URL for services on the current service unit.41 """
45 '''
46 scheme = 'http'42 scheme = 'http'
47 if 'https' in configs.complete_contexts():43 if 'https' in configs.complete_contexts():
48 scheme = 'https'44 scheme = 'https'
@@ -53,27 +49,45 @@
5349
5450
55def resolve_address(endpoint_type=PUBLIC):51def resolve_address(endpoint_type=PUBLIC):
52 """Return unit address depending on net config.
53
54 If unit is clustered with vip(s) and has net splits defined, return vip on
55 correct network. If clustered with no nets defined, return primary vip.
56
57 If not clustered, return unit address ensuring address is on configured net
58 split if one is configured.
59
60 :param endpoint_type: Network endpoing type
61 """
56 resolved_address = None62 resolved_address = None
57 if is_clustered():63 vips = config('vip')
58 if config(_address_map[endpoint_type]['config']) is None:64 if vips:
59 # Assume vip is simple and pass back directly65 vips = vips.split()
60 resolved_address = config('vip')66
67 net_type = ADDRESS_MAP[endpoint_type]['config']
68 net_addr = config(net_type)
69 net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
70 clustered = is_clustered()
71 if clustered:
72 if not net_addr:
73 # If no net-splits defined, we expect a single vip
74 resolved_address = vips[0]
61 else:75 else:
62 for vip in config('vip').split():76 for vip in vips:
63 if is_address_in_network(77 if is_address_in_network(net_addr, vip):
64 config(_address_map[endpoint_type]['config']),
65 vip):
66 resolved_address = vip78 resolved_address = vip
79 break
67 else:80 else:
68 if config('prefer-ipv6'):81 if config('prefer-ipv6'):
69 fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]82 fallback_addr = get_ipv6_addr(exc_list=vips)[0]
70 else:83 else:
71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])84 fallback_addr = unit_get(net_fallback)
72 resolved_address = get_address_in_network(85
73 config(_address_map[endpoint_type]['config']), fallback_addr)86 resolved_address = get_address_in_network(net_addr, fallback_addr)
7487
75 if resolved_address is None:88 if resolved_address is None:
76 raise ValueError('Unable to resolve a suitable IP address'89 raise ValueError("Unable to resolve a suitable IP address based on "
77 ' based on charm state and configuration')90 "charm state and configuration. (net_type=%s, "
78 else:91 "clustered=%s)" % (net_type, clustered))
79 return resolved_address92
93 return resolved_address
8094
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-12-11 17:56:59 +0000
@@ -14,7 +14,7 @@
14def headers_package():14def headers_package():
15 """Ensures correct linux-headers for running kernel are installed,15 """Ensures correct linux-headers for running kernel are installed,
16 for building DKMS package"""16 for building DKMS package"""
17 kver = check_output(['uname', '-r']).strip()17 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
18 return 'linux-headers-%s' % kver18 return 'linux-headers-%s' % kver
1919
20QUANTUM_CONF_DIR = '/etc/quantum'20QUANTUM_CONF_DIR = '/etc/quantum'
@@ -22,7 +22,7 @@
2222
23def kernel_version():23def kernel_version():
24 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """24 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
25 kver = check_output(['uname', '-r']).strip()25 kver = check_output(['uname', '-r']).decode('UTF-8').strip()
26 kver = kver.split('.')26 kver = kver.split('.')
27 return (int(kver[0]), int(kver[1]))27 return (int(kver[0]), int(kver[1]))
2828
@@ -138,10 +138,25 @@
138 relation_prefix='neutron',138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],140 'services': [],
141 'packages': [['neutron-plugin-cisco']],141 'packages': [[headers_package()] + determine_dkms_package(),
142 ['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',143 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],144 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']145 'server_services': ['neutron-server']
146 },
147 'Calico': {
148 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
149 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
150 'contexts': [
151 context.SharedDBContext(user=config('neutron-database-user'),
152 database=config('neutron-database'),
153 relation_prefix='neutron',
154 ssl_dir=NEUTRON_CONF_DIR)],
155 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
156 'packages': [[headers_package()] + determine_dkms_package(),
157 ['calico-compute', 'bird', 'neutron-dhcp-agent']],
158 'server_packages': ['neutron-server', 'calico-control'],
159 'server_services': ['neutron-server']
145 }160 }
146 }161 }
147 if release >= 'icehouse':162 if release >= 'icehouse':
@@ -162,7 +177,8 @@
162 elif manager == 'neutron':177 elif manager == 'neutron':
163 plugins = neutron_plugins()178 plugins = neutron_plugins()
164 else:179 else:
165 log('Error: Network manager does not support plugins.')180 log("Network manager '%s' does not support plugins." % (manager),
181 level=ERROR)
166 raise Exception182 raise Exception
167183
168 try:184 try:
169185
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-12-11 17:56:59 +0000
@@ -35,7 +35,7 @@
35 stats auth admin:password35 stats auth admin:password
3636
37{% if frontends -%}37{% if frontends -%}
38{% for service, ports in service_ports.iteritems() -%}38{% for service, ports in service_ports.items() -%}
39frontend tcp-in_{{ service }}39frontend tcp-in_{{ service }}
40 bind *:{{ ports[0] }}40 bind *:{{ ports[0] }}
41 bind :::{{ ports[0] }}41 bind :::{{ ports[0] }}
@@ -46,7 +46,7 @@
46{% for frontend in frontends -%}46{% for frontend in frontends -%}
47backend {{ service }}_{{ frontend }}47backend {{ service }}_{{ frontend }}
48 balance leastconn48 balance leastconn
49 {% for unit, address in frontends[frontend]['backends'].iteritems() -%}49 {% for unit, address in frontends[frontend]['backends'].items() -%}
50 server {{ unit }} {{ address }}:{{ ports[1] }} check50 server {{ unit }} {{ address }}:{{ ports[1] }} check
51 {% endfor %}51 {% endfor %}
52{% endfor -%}52{% endfor -%}
5353
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-12-11 17:56:59 +0000
@@ -1,13 +1,13 @@
1import os1import os
22
3import six
4
3from charmhelpers.fetch import apt_install5from charmhelpers.fetch import apt_install
4
5from charmhelpers.core.hookenv import (6from charmhelpers.core.hookenv import (
6 log,7 log,
7 ERROR,8 ERROR,
8 INFO9 INFO
9)10)
10
11from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES11from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1212
13try:13try:
@@ -43,7 +43,7 @@
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in six.itervalues(OPENSTACK_CODENAMES)]
4747
48 if not os.path.isdir(templates_dir):48 if not os.path.isdir(templates_dir):
49 log('Templates directory not found @ %s.' % templates_dir,49 log('Templates directory not found @ %s.' % templates_dir,
@@ -258,7 +258,7 @@
258 """258 """
259 Write out all registered config files.259 Write out all registered config files.
260 """260 """
261 [self.write(k) for k in self.templates.iterkeys()]261 [self.write(k) for k in six.iterkeys(self.templates)]
262262
263 def set_release(self, openstack_release):263 def set_release(self, openstack_release):
264 """264 """
@@ -275,5 +275,5 @@
275 '''275 '''
276 interfaces = []276 interfaces = []
277 [interfaces.extend(i.complete_contexts())277 [interfaces.extend(i.complete_contexts())
278 for i in self.templates.itervalues()]278 for i in six.itervalues(self.templates)]
279 return interfaces279 return interfaces
280280
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-12-11 17:56:59 +0000
@@ -10,11 +10,13 @@
10import socket10import socket
11import sys11import sys
1212
13import six
14import yaml
15
13from charmhelpers.core.hookenv import (16from charmhelpers.core.hookenv import (
14 config,17 config,
15 log as juju_log,18 log as juju_log,
16 charm_dir,19 charm_dir,
17 ERROR,
18 INFO,20 INFO,
19 relation_ids,21 relation_ids,
20 relation_set22 relation_set
@@ -31,7 +33,8 @@
31)33)
3234
33from charmhelpers.core.host import lsb_release, mounts, umount35from charmhelpers.core.host import lsb_release, mounts, umount
34from charmhelpers.fetch import apt_install, apt_cache36from charmhelpers.fetch import apt_install, apt_cache, install_remote
37from charmhelpers.contrib.python.packages import pip_install
35from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk38from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
36from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device39from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
3740
@@ -113,7 +116,7 @@
113116
114 # Best guess match based on deb string provided117 # Best guess match based on deb string provided
115 if src.startswith('deb') or src.startswith('ppa'):118 if src.startswith('deb') or src.startswith('ppa'):
116 for k, v in OPENSTACK_CODENAMES.iteritems():119 for k, v in six.iteritems(OPENSTACK_CODENAMES):
117 if v in src:120 if v in src:
118 return v121 return v
119122
@@ -134,7 +137,7 @@
134137
135def get_os_version_codename(codename):138def get_os_version_codename(codename):
136 '''Determine OpenStack version number from codename.'''139 '''Determine OpenStack version number from codename.'''
137 for k, v in OPENSTACK_CODENAMES.iteritems():140 for k, v in six.iteritems(OPENSTACK_CODENAMES):
138 if v == codename:141 if v == codename:
139 return k142 return k
140 e = 'Could not derive OpenStack version for '\143 e = 'Could not derive OpenStack version for '\
@@ -194,7 +197,7 @@
194 else:197 else:
195 vers_map = OPENSTACK_CODENAMES198 vers_map = OPENSTACK_CODENAMES
196199
197 for version, cname in vers_map.iteritems():200 for version, cname in six.iteritems(vers_map):
198 if cname == codename:201 if cname == codename:
199 return version202 return version
200 # e = "Could not determine OpenStack version for package: %s" % pkg203 # e = "Could not determine OpenStack version for package: %s" % pkg
@@ -318,7 +321,7 @@
318 rc_script.write(321 rc_script.write(
319 "#!/bin/bash\n")322 "#!/bin/bash\n")
320 [rc_script.write('export %s=%s\n' % (u, p))323 [rc_script.write('export %s=%s\n' % (u, p))
321 for u, p in env_vars.iteritems() if u != "script_path"]324 for u, p in six.iteritems(env_vars) if u != "script_path"]
322325
323326
324def openstack_upgrade_available(package):327def openstack_upgrade_available(package):
@@ -351,8 +354,8 @@
351 '''354 '''
352 _none = ['None', 'none', None]355 _none = ['None', 'none', None]
353 if (block_device in _none):356 if (block_device in _none):
354 error_out('prepare_storage(): Missing required input: '357 error_out('prepare_storage(): Missing required input: block_device=%s.'
355 'block_device=%s.' % block_device, level=ERROR)358 % block_device)
356359
357 if block_device.startswith('/dev/'):360 if block_device.startswith('/dev/'):
358 bdev = block_device361 bdev = block_device
@@ -368,8 +371,7 @@
368 bdev = '/dev/%s' % block_device371 bdev = '/dev/%s' % block_device
369372
370 if not is_block_device(bdev):373 if not is_block_device(bdev):
371 error_out('Failed to locate valid block device at %s' % bdev,374 error_out('Failed to locate valid block device at %s' % bdev)
372 level=ERROR)
373375
374 return bdev376 return bdev
375377
@@ -418,7 +420,7 @@
418420
419 if isinstance(address, dns.name.Name):421 if isinstance(address, dns.name.Name):
420 rtype = 'PTR'422 rtype = 'PTR'
421 elif isinstance(address, basestring):423 elif isinstance(address, six.string_types):
422 rtype = 'A'424 rtype = 'A'
423 else:425 else:
424 return None426 return None
@@ -486,8 +488,7 @@
486 'hostname': json.dumps(hosts)}488 'hostname': json.dumps(hosts)}
487489
488 if relation_prefix:490 if relation_prefix:
489 keys = kwargs.keys()491 for key in list(kwargs.keys()):
490 for key in keys:
491 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]492 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
492 del kwargs[key]493 del kwargs[key]
493494
@@ -508,3 +509,111 @@
508 f(*args)509 f(*args)
509 return wrapped_f510 return wrapped_f
510 return wrap511 return wrap
512
513
514def git_install_requested():
515 """Returns true if openstack-origin-git is specified."""
516 return config('openstack-origin-git') != "None"
517
518
519requirements_dir = None
520
521
522def git_clone_and_install(file_name, core_project):
523 """Clone/install all OpenStack repos specified in yaml config file."""
524 global requirements_dir
525
526 if file_name == "None":
527 return
528
529 yaml_file = os.path.join(charm_dir(), file_name)
530
531 # clone/install the requirements project first
532 installed = _git_clone_and_install_subset(yaml_file,
533 whitelist=['requirements'])
534 if 'requirements' not in installed:
535 error_out('requirements git repository must be specified')
536
537 # clone/install all other projects except requirements and the core project
538 blacklist = ['requirements', core_project]
539 _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
540 update_requirements=True)
541
542 # clone/install the core project
543 whitelist = [core_project]
544 installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
545 update_requirements=True)
546 if core_project not in installed:
547 error_out('{} git repository must be specified'.format(core_project))
548
549
550def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
551 update_requirements=False):
552 """Clone/install subset of OpenStack repos specified in yaml config file."""
553 global requirements_dir
554 installed = []
555
556 with open(yaml_file, 'r') as fd:
557 projects = yaml.load(fd)
558 for proj, val in projects.items():
559 # The project subset is chosen based on the following 3 rules:
560 # 1) If project is in blacklist, we don't clone/install it, period.
561 # 2) If whitelist is empty, we clone/install everything else.
562 # 3) If whitelist is not empty, we clone/install everything in the
563 # whitelist.
564 if proj in blacklist:
565 continue
566 if whitelist and proj not in whitelist:
567 continue
568 repo = val['repository']
569 branch = val['branch']
570 repo_dir = _git_clone_and_install_single(repo, branch,
571 update_requirements)
572 if proj == 'requirements':
573 requirements_dir = repo_dir
574 installed.append(proj)
575 return installed
576
577
578def _git_clone_and_install_single(repo, branch, update_requirements=False):
579 """Clone and install a single git repository."""
580 dest_parent_dir = "/mnt/openstack-git/"
581 dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
582
583 if not os.path.exists(dest_parent_dir):
584 juju_log('Host dir not mounted at {}. '
585 'Creating directory there instead.'.format(dest_parent_dir))
586 os.mkdir(dest_parent_dir)
587
588 if not os.path.exists(dest_dir):
589 juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
590 repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
591 else:
592 repo_dir = dest_dir
593
594 if update_requirements:
595 if not requirements_dir:
596 error_out('requirements repo must be cloned before '
597 'updating from global requirements.')
598 _git_update_requirements(repo_dir, requirements_dir)
599
600 juju_log('Installing git repo from dir: {}'.format(repo_dir))
601 pip_install(repo_dir)
602
603 return repo_dir
604
605
606def _git_update_requirements(package_dir, reqs_dir):
607 """Update from global requirements.
608
609 Update an OpenStack git directory's requirements.txt and
610 test-requirements.txt from global-requirements.txt."""
611 orig_dir = os.getcwd()
612 os.chdir(reqs_dir)
613 cmd = "python update.py {}".format(package_dir)
614 try:
615 subprocess.check_call(cmd.split(' '))
616 except subprocess.CalledProcessError:
617 package = os.path.basename(package_dir)
618 error_out("Error updating {} from global-requirements.txt".format(package))
619 os.chdir(orig_dir)
511620
=== added directory 'hooks/charmhelpers/contrib/python'
=== added file 'hooks/charmhelpers/contrib/python/__init__.py'
=== added file 'hooks/charmhelpers/contrib/python/packages.py'
--- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/python/packages.py 2014-12-11 17:56:59 +0000
@@ -0,0 +1,77 @@
1#!/usr/bin/env python
2# coding: utf-8
3
4__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
5
6from charmhelpers.fetch import apt_install, apt_update
7from charmhelpers.core.hookenv import log
8
9try:
10 from pip import main as pip_execute
11except ImportError:
12 apt_update()
13 apt_install('python-pip')
14 from pip import main as pip_execute
15
16
17def parse_options(given, available):
18 """Given a set of options, check if available"""
19 for key, value in sorted(given.items()):
20 if key in available:
21 yield "--{0}={1}".format(key, value)
22
23
24def pip_install_requirements(requirements, **options):
25 """Install a requirements file """
26 command = ["install"]
27
28 available_options = ('proxy', 'src', 'log', )
29 for option in parse_options(options, available_options):
30 command.append(option)
31
32 command.append("-r {0}".format(requirements))
33 log("Installing from file: {} with options: {}".format(requirements,
34 command))
35 pip_execute(command)
36
37
38def pip_install(package, fatal=False, **options):
39 """Install a python package"""
40 command = ["install"]
41
42 available_options = ('proxy', 'src', 'log', "index-url", )
43 for option in parse_options(options, available_options):
44 command.append(option)
45
46 if isinstance(package, list):
47 command.extend(package)
48 else:
49 command.append(package)
50
51 log("Installing {} package with options: {}".format(package,
52 command))
53 pip_execute(command)
54
55
56def pip_uninstall(package, **options):
57 """Uninstall a python package"""
58 command = ["uninstall", "-q", "-y"]
59
60 available_options = ('proxy', 'log', )
61 for option in parse_options(options, available_options):
62 command.append(option)
63
64 if isinstance(package, list):
65 command.extend(package)
66 else:
67 command.append(package)
68
69 log("Uninstalling {} package with options: {}".format(package,
70 command))
71 pip_execute(command)
72
73
74def pip_list():
75 """Returns the list of current python installed packages
76 """
77 return pip_execute(["list"])
078
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-12-11 17:56:59 +0000
@@ -16,19 +16,18 @@
16from subprocess import (16from subprocess import (
17 check_call,17 check_call,
18 check_output,18 check_output,
19 CalledProcessError19 CalledProcessError,
20)20)
21
22from charmhelpers.core.hookenv import (21from charmhelpers.core.hookenv import (
23 relation_get,22 relation_get,
24 relation_ids,23 relation_ids,
25 related_units,24 related_units,
26 log,25 log,
26 DEBUG,
27 INFO,27 INFO,
28 WARNING,28 WARNING,
29 ERROR29 ERROR,
30)30)
31
32from charmhelpers.core.host import (31from charmhelpers.core.host import (
33 mount,32 mount,
34 mounts,33 mounts,
@@ -37,7 +36,6 @@
37 service_running,36 service_running,
38 umount,37 umount,
39)38)
40
41from charmhelpers.fetch import (39from charmhelpers.fetch import (
42 apt_install,40 apt_install,
43)41)
@@ -56,99 +54,85 @@
5654
5755
58def install():56def install():
59 ''' Basic Ceph client installation '''57 """Basic Ceph client installation."""
60 ceph_dir = "/etc/ceph"58 ceph_dir = "/etc/ceph"
61 if not os.path.exists(ceph_dir):59 if not os.path.exists(ceph_dir):
62 os.mkdir(ceph_dir)60 os.mkdir(ceph_dir)
61
63 apt_install('ceph-common', fatal=True)62 apt_install('ceph-common', fatal=True)
6463
6564
66def rbd_exists(service, pool, rbd_img):65def rbd_exists(service, pool, rbd_img):
67 ''' Check to see if a RADOS block device exists '''66 """Check to see if a RADOS block device exists."""
68 try:67 try:
69 out = check_output(['rbd', 'list', '--id', service,68 out = check_output(['rbd', 'list', '--id',
70 '--pool', pool])69 service, '--pool', pool]).decode('UTF-8')
71 except CalledProcessError:70 except CalledProcessError:
72 return False71 return False
73 else:72
74 return rbd_img in out73 return rbd_img in out
7574
7675
77def create_rbd_image(service, pool, image, sizemb):76def create_rbd_image(service, pool, image, sizemb):
78 ''' Create a new RADOS block device '''77 """Create a new RADOS block device."""
79 cmd = [78 cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
80 'rbd',79 '--pool', pool]
81 'create',
82 image,
83 '--size',
84 str(sizemb),
85 '--id',
86 service,
87 '--pool',
88 pool
89 ]
90 check_call(cmd)80 check_call(cmd)
9181
9282
93def pool_exists(service, name):83def pool_exists(service, name):
94 ''' Check to see if a RADOS pool already exists '''84 """Check to see if a RADOS pool already exists."""
95 try:85 try:
96 out = check_output(['rados', '--id', service, 'lspools'])86 out = check_output(['rados', '--id', service,
87 'lspools']).decode('UTF-8')
97 except CalledProcessError:88 except CalledProcessError:
98 return False89 return False
99 else:90
100 return name in out91 return name in out
10192
10293
103def get_osds(service):94def get_osds(service):
104 '''95 """Return a list of all Ceph Object Storage Daemons currently in the
105 Return a list of all Ceph Object Storage Daemons96 cluster.
106 currently in the cluster97 """
107 '''
108 version = ceph_version()98 version = ceph_version()
109 if version and version >= '0.56':99 if version and version >= '0.56':
110 return json.loads(check_output(['ceph', '--id', service,100 return json.loads(check_output(['ceph', '--id', service,
111 'osd', 'ls', '--format=json']))101 'osd', 'ls',
112 else:102 '--format=json']).decode('UTF-8'))
113 return None103
104 return None
114105
115106
116def create_pool(service, name, replicas=3):107def create_pool(service, name, replicas=3):
117 ''' Create a new RADOS pool '''108 """Create a new RADOS pool."""
118 if pool_exists(service, name):109 if pool_exists(service, name):
119 log("Ceph pool {} already exists, skipping creation".format(name),110 log("Ceph pool {} already exists, skipping creation".format(name),
120 level=WARNING)111 level=WARNING)
121 return112 return
113
122 # Calculate the number of placement groups based114 # Calculate the number of placement groups based
123 # on upstream recommended best practices.115 # on upstream recommended best practices.
124 osds = get_osds(service)116 osds = get_osds(service)
125 if osds:117 if osds:
126 pgnum = (len(osds) * 100 / replicas)118 pgnum = (len(osds) * 100 // replicas)
127 else:119 else:
128 # NOTE(james-page): Default to 200 for older ceph versions120 # NOTE(james-page): Default to 200 for older ceph versions
129 # which don't support OSD query from cli121 # which don't support OSD query from cli
130 pgnum = 200122 pgnum = 200
131 cmd = [123
132 'ceph', '--id', service,124 cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
133 'osd', 'pool', 'create',
134 name, str(pgnum)
135 ]
136 check_call(cmd)125 check_call(cmd)
137 cmd = [126
138 'ceph', '--id', service,127 cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
139 'osd', 'pool', 'set', name,128 str(replicas)]
140 'size', str(replicas)
141 ]
142 check_call(cmd)129 check_call(cmd)
143130
144131
145def delete_pool(service, name):132def delete_pool(service, name):
146 ''' Delete a RADOS pool from ceph '''133 """Delete a RADOS pool from ceph."""
147 cmd = [134 cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
148 'ceph', '--id', service,135 '--yes-i-really-really-mean-it']
149 'osd', 'pool', 'delete',
150 name, '--yes-i-really-really-mean-it'
151 ]
152 check_call(cmd)136 check_call(cmd)
153137
154138
@@ -161,44 +145,43 @@
161145
162146
163def create_keyring(service, key):147def create_keyring(service, key):
164 ''' Create a new Ceph keyring containing key'''148 """Create a new Ceph keyring containing key."""
165 keyring = _keyring_path(service)149 keyring = _keyring_path(service)
166 if os.path.exists(keyring):150 if os.path.exists(keyring):
167 log('ceph: Keyring exists at %s.' % keyring, level=WARNING)151 log('Ceph keyring exists at %s.' % keyring, level=WARNING)
168 return152 return
169 cmd = [153
170 'ceph-authtool',154 cmd = ['ceph-authtool', keyring, '--create-keyring',
171 keyring,155 '--name=client.{}'.format(service), '--add-key={}'.format(key)]
172 '--create-keyring',
173 '--name=client.{}'.format(service),
174 '--add-key={}'.format(key)
175 ]
176 check_call(cmd)156 check_call(cmd)
177 log('ceph: Created new ring at %s.' % keyring, level=INFO)157 log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
178158
179159
180def create_key_file(service, key):160def create_key_file(service, key):
181 ''' Create a file containing key '''161 """Create a file containing key."""
182 keyfile = _keyfile_path(service)162 keyfile = _keyfile_path(service)
183 if os.path.exists(keyfile):163 if os.path.exists(keyfile):
184 log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)164 log('Keyfile exists at %s.' % keyfile, level=WARNING)
185 return165 return
166
186 with open(keyfile, 'w') as fd:167 with open(keyfile, 'w') as fd:
187 fd.write(key)168 fd.write(key)
188 log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)169
170 log('Created new keyfile at %s.' % keyfile, level=INFO)
189171
190172
191def get_ceph_nodes():173def get_ceph_nodes():
192 ''' Query named relation 'ceph' to detemine current nodes '''174 """Query named relation 'ceph' to determine current nodes."""
193 hosts = []175 hosts = []
194 for r_id in relation_ids('ceph'):176 for r_id in relation_ids('ceph'):
195 for unit in related_units(r_id):177 for unit in related_units(r_id):
196 hosts.append(relation_get('private-address', unit=unit, rid=r_id))178 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
179
197 return hosts180 return hosts
198181
199182
200def configure(service, key, auth, use_syslog):183def configure(service, key, auth, use_syslog):
201 ''' Perform basic configuration of Ceph '''184 """Perform basic configuration of Ceph."""
202 create_keyring(service, key)185 create_keyring(service, key)
203 create_key_file(service, key)186 create_key_file(service, key)
204 hosts = get_ceph_nodes()187 hosts = get_ceph_nodes()
@@ -211,17 +194,17 @@
211194
212195
213def image_mapped(name):196def image_mapped(name):
214 ''' Determine whether a RADOS block device is mapped locally '''197 """Determine whether a RADOS block device is mapped locally."""
215 try:198 try:
216 out = check_output(['rbd', 'showmapped'])199 out = check_output(['rbd', 'showmapped']).decode('UTF-8')
217 except CalledProcessError:200 except CalledProcessError:
218 return False201 return False
219 else:202
220 return name in out203 return name in out
221204
222205
223def map_block_storage(service, pool, image):206def map_block_storage(service, pool, image):
224 ''' Map a RADOS block device for local use '''207 """Map a RADOS block device for local use."""
225 cmd = [208 cmd = [
226 'rbd',209 'rbd',
227 'map',210 'map',
@@ -235,31 +218,32 @@
235218
236219
237def filesystem_mounted(fs):220def filesystem_mounted(fs):
238 ''' Determine whether a filesytems is already mounted '''221 """Determine whether a filesytems is already mounted."""
239 return fs in [f for f, m in mounts()]222 return fs in [f for f, m in mounts()]
240223
241224
242def make_filesystem(blk_device, fstype='ext4', timeout=10):225def make_filesystem(blk_device, fstype='ext4', timeout=10):
243 ''' Make a new filesystem on the specified block device '''226 """Make a new filesystem on the specified block device."""
244 count = 0227 count = 0
245 e_noent = os.errno.ENOENT228 e_noent = os.errno.ENOENT
246 while not os.path.exists(blk_device):229 while not os.path.exists(blk_device):
247 if count >= timeout:230 if count >= timeout:
248 log('ceph: gave up waiting on block device %s' % blk_device,231 log('Gave up waiting on block device %s' % blk_device,
249 level=ERROR)232 level=ERROR)
250 raise IOError(e_noent, os.strerror(e_noent), blk_device)233 raise IOError(e_noent, os.strerror(e_noent), blk_device)
251 log('ceph: waiting for block device %s to appear' % blk_device,234
252 level=INFO)235 log('Waiting for block device %s to appear' % blk_device,
236 level=DEBUG)
253 count += 1237 count += 1
254 time.sleep(1)238 time.sleep(1)
255 else:239 else:
256 log('ceph: Formatting block device %s as filesystem %s.' %240 log('Formatting block device %s as filesystem %s.' %
257 (blk_device, fstype), level=INFO)241 (blk_device, fstype), level=INFO)
258 check_call(['mkfs', '-t', fstype, blk_device])242 check_call(['mkfs', '-t', fstype, blk_device])
259243
260244
261def place_data_on_block_device(blk_device, data_src_dst):245def place_data_on_block_device(blk_device, data_src_dst):
262 ''' Migrate data in data_src_dst to blk_device and then remount '''246 """Migrate data in data_src_dst to blk_device and then remount."""
263 # mount block device into /mnt247 # mount block device into /mnt
264 mount(blk_device, '/mnt')248 mount(blk_device, '/mnt')
265 # copy data to /mnt249 # copy data to /mnt
@@ -279,8 +263,8 @@
279263
280# TODO: re-use264# TODO: re-use
281def modprobe(module):265def modprobe(module):
282 ''' Load a kernel module and configure for auto-load on reboot '''266 """Load a kernel module and configure for auto-load on reboot."""
283 log('ceph: Loading kernel module', level=INFO)267 log('Loading kernel module', level=INFO)
284 cmd = ['modprobe', module]268 cmd = ['modprobe', module]
285 check_call(cmd)269 check_call(cmd)
286 with open('/etc/modules', 'r+') as modules:270 with open('/etc/modules', 'r+') as modules:
@@ -289,7 +273,7 @@
289273
290274
291def copy_files(src, dst, symlinks=False, ignore=None):275def copy_files(src, dst, symlinks=False, ignore=None):
292 ''' Copy files from src to dst '''276 """Copy files from src to dst."""
293 for item in os.listdir(src):277 for item in os.listdir(src):
294 s = os.path.join(src, item)278 s = os.path.join(src, item)
295 d = os.path.join(dst, item)279 d = os.path.join(dst, item)
@@ -302,8 +286,7 @@
302def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,286def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
303 blk_device, fstype, system_services=[],287 blk_device, fstype, system_services=[],
304 replicas=3):288 replicas=3):
305 """289 """NOTE: This function must only be called from a single service unit for
306 NOTE: This function must only be called from a single service unit for
307 the same rbd_img otherwise data loss will occur.290 the same rbd_img otherwise data loss will occur.
308291
309 Ensures given pool and RBD image exists, is mapped to a block device,292 Ensures given pool and RBD image exists, is mapped to a block device,
@@ -317,15 +300,16 @@
317 """300 """
318 # Ensure pool, RBD image, RBD mappings are in place.301 # Ensure pool, RBD image, RBD mappings are in place.
319 if not pool_exists(service, pool):302 if not pool_exists(service, pool):
320 log('ceph: Creating new pool {}.'.format(pool))303 log('Creating new pool {}.'.format(pool), level=INFO)
321 create_pool(service, pool, replicas=replicas)304 create_pool(service, pool, replicas=replicas)
322305
323 if not rbd_exists(service, pool, rbd_img):306 if not rbd_exists(service, pool, rbd_img):
324 log('ceph: Creating RBD image ({}).'.format(rbd_img))307 log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
325 create_rbd_image(service, pool, rbd_img, sizemb)308 create_rbd_image(service, pool, rbd_img, sizemb)
326309
327 if not image_mapped(rbd_img):310 if not image_mapped(rbd_img):
328 log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))311 log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
312 level=INFO)
329 map_block_storage(service, pool, rbd_img)313 map_block_storage(service, pool, rbd_img)
330314
331 # make file system315 # make file system
@@ -340,45 +324,47 @@
340324
341 for svc in system_services:325 for svc in system_services:
342 if service_running(svc):326 if service_running(svc):
343 log('ceph: Stopping services {} prior to migrating data.'327 log('Stopping services {} prior to migrating data.'
344 .format(svc))328 .format(svc), level=DEBUG)
345 service_stop(svc)329 service_stop(svc)
346330
347 place_data_on_block_device(blk_device, mount_point)331 place_data_on_block_device(blk_device, mount_point)
348332
349 for svc in system_services:333 for svc in system_services:
350 log('ceph: Starting service {} after migrating data.'334 log('Starting service {} after migrating data.'
351 .format(svc))335 .format(svc), level=DEBUG)
352 service_start(svc)336 service_start(svc)
353337
354338
355def ensure_ceph_keyring(service, user=None, group=None):339def ensure_ceph_keyring(service, user=None, group=None):
356 '''340 """Ensures a ceph keyring is created for a named service and optionally
357 Ensures a ceph keyring is created for a named service341 ensures user and group ownership.
358 and optionally ensures user and group ownership.
359342
360 Returns False if no ceph key is available in relation state.343 Returns False if no ceph key is available in relation state.
361 '''344 """
362 key = None345 key = None
363 for rid in relation_ids('ceph'):346 for rid in relation_ids('ceph'):
364 for unit in related_units(rid):347 for unit in related_units(rid):
365 key = relation_get('key', rid=rid, unit=unit)348 key = relation_get('key', rid=rid, unit=unit)
366 if key:349 if key:
367 break350 break
351
368 if not key:352 if not key:
369 return False353 return False
354
370 create_keyring(service=service, key=key)355 create_keyring(service=service, key=key)
371 keyring = _keyring_path(service)356 keyring = _keyring_path(service)
372 if user and group:357 if user and group:
373 check_call(['chown', '%s.%s' % (user, group), keyring])358 check_call(['chown', '%s.%s' % (user, group), keyring])
359
374 return True360 return True
375361
376362
377def ceph_version():363def ceph_version():
378 ''' Retrieve the local version of ceph '''364 """Retrieve the local version of ceph."""
379 if os.path.exists('/usr/bin/ceph'):365 if os.path.exists('/usr/bin/ceph'):
380 cmd = ['ceph', '-v']366 cmd = ['ceph', '-v']
381 output = check_output(cmd)367 output = check_output(cmd).decode('US-ASCII')
382 output = output.split()368 output = output.split()
383 if len(output) > 3:369 if len(output) > 3:
384 return output[2]370 return output[2]
385371
=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-08-12 21:48:24 +0000
+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-12-11 17:56:59 +0000
@@ -1,12 +1,12 @@
1
2import os1import os
3import re2import re
4
5from subprocess import (3from subprocess import (
6 check_call,4 check_call,
7 check_output,5 check_output,
8)6)
97
8import six
9
1010
11##################################################11##################################################
12# loopback device helpers.12# loopback device helpers.
@@ -37,7 +37,7 @@
37 '''37 '''
38 file_path = os.path.abspath(file_path)38 file_path = os.path.abspath(file_path)
39 check_call(['losetup', '--find', file_path])39 check_call(['losetup', '--find', file_path])
40 for d, f in loopback_devices().iteritems():40 for d, f in six.iteritems(loopback_devices()):
41 if f == file_path:41 if f == file_path:
42 return d42 return d
4343
@@ -51,7 +51,7 @@
5151
52 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)52 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
53 '''53 '''
54 for d, f in loopback_devices().iteritems():54 for d, f in six.iteritems(loopback_devices()):
55 if f == path:55 if f == path:
56 return d56 return d
5757
5858
=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-05-19 11:41:02 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-12-11 17:56:59 +0000
@@ -61,6 +61,7 @@
61 vg = None61 vg = None
62 pvd = check_output(['pvdisplay', block_device]).splitlines()62 pvd = check_output(['pvdisplay', block_device]).splitlines()
63 for l in pvd:63 for l in pvd:
64 l = l.decode('UTF-8')
64 if l.strip().startswith('VG Name'):65 if l.strip().startswith('VG Name'):
65 vg = ' '.join(l.strip().split()[2:])66 vg = ' '.join(l.strip().split()[2:])
66 return vg67 return vg
6768
=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-12-11 17:56:59 +0000
@@ -30,7 +30,8 @@
30 # sometimes sgdisk exits non-zero; this is OK, dd will clean up30 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
31 call(['sgdisk', '--zap-all', '--mbrtogpt',31 call(['sgdisk', '--zap-all', '--mbrtogpt',
32 '--clear', block_device])32 '--clear', block_device])
33 dev_end = check_output(['blockdev', '--getsz', block_device])33 dev_end = check_output(['blockdev', '--getsz',
34 block_device]).decode('UTF-8')
34 gpt_end = int(dev_end.split()[0]) - 10035 gpt_end = int(dev_end.split()[0]) - 100
35 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),36 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
36 'bs=1M', 'count=1'])37 'bs=1M', 'count=1'])
@@ -47,7 +48,7 @@
47 it doesn't.48 it doesn't.
48 '''49 '''
49 is_partition = bool(re.search(r".*[0-9]+\b", device))50 is_partition = bool(re.search(r".*[0-9]+\b", device))
50 out = check_output(['mount'])51 out = check_output(['mount']).decode('UTF-8')
51 if is_partition:52 if is_partition:
52 return bool(re.search(device + r"\b", out))53 return bool(re.search(device + r"\b", out))
53 return bool(re.search(device + r"[0-9]+\b", out))54 return bool(re.search(device + r"[0-9]+\b", out))
5455
=== modified file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-12-11 17:56:59 +0000
@@ -3,10 +3,11 @@
33
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
55
6import io
6import os7import os
78
89
9class Fstab(file):10class Fstab(io.FileIO):
10 """This class extends file in order to implement a file reader/writer11 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`12 for file `/etc/fstab`
12 """13 """
@@ -24,8 +25,8 @@
24 options = "defaults"25 options = "defaults"
2526
26 self.options = options27 self.options = options
27 self.d = d28 self.d = int(d)
28 self.p = p29 self.p = int(p)
2930
30 def __eq__(self, o):31 def __eq__(self, o):
31 return str(self) == str(o)32 return str(self) == str(o)
@@ -45,7 +46,7 @@
45 self._path = path46 self._path = path
46 else:47 else:
47 self._path = self.DEFAULT_PATH48 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')49 super(Fstab, self).__init__(self._path, 'rb+')
4950
50 def _hydrate_entry(self, line):51 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any52 # NOTE: use split with no arguments to split on any
@@ -58,8 +59,9 @@
58 def entries(self):59 def entries(self):
59 self.seek(0)60 self.seek(0)
60 for line in self.readlines():61 for line in self.readlines():
62 line = line.decode('us-ascii')
61 try:63 try:
62 if not line.startswith("#"):64 if line.strip() and not line.startswith("#"):
63 yield self._hydrate_entry(line)65 yield self._hydrate_entry(line)
64 except ValueError:66 except ValueError:
65 pass67 pass
@@ -75,14 +77,14 @@
75 if self.get_entry_by_attr('device', entry.device):77 if self.get_entry_by_attr('device', entry.device):
76 return False78 return False
7779
78 self.write(str(entry) + '\n')80 self.write((str(entry) + '\n').encode('us-ascii'))
79 self.truncate()81 self.truncate()
80 return entry82 return entry
8183
82 def remove_entry(self, entry):84 def remove_entry(self, entry):
83 self.seek(0)85 self.seek(0)
8486
85 lines = self.readlines()87 lines = [l.decode('us-ascii') for l in self.readlines()]
8688
87 found = False89 found = False
88 for index, line in enumerate(lines):90 for index, line in enumerate(lines):
@@ -97,7 +99,7 @@
97 lines.remove(line)99 lines.remove(line)
98100
99 self.seek(0)101 self.seek(0)
100 self.write(''.join(lines))102 self.write(''.join(lines).encode('us-ascii'))
101 self.truncate()103 self.truncate()
102 return True104 return True
103105
104106
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-12-11 17:56:59 +0000
@@ -9,9 +9,14 @@
9import yaml9import yaml
10import subprocess10import subprocess
11import sys11import sys
12import UserDict
13from subprocess import CalledProcessError12from subprocess import CalledProcessError
1413
14import six
15if not six.PY3:
16 from UserDict import UserDict
17else:
18 from collections import UserDict
19
15CRITICAL = "CRITICAL"20CRITICAL = "CRITICAL"
16ERROR = "ERROR"21ERROR = "ERROR"
17WARNING = "WARNING"22WARNING = "WARNING"
@@ -63,16 +68,18 @@
63 command = ['juju-log']68 command = ['juju-log']
64 if level:69 if level:
65 command += ['-l', level]70 command += ['-l', level]
71 if not isinstance(message, six.string_types):
72 message = repr(message)
66 command += [message]73 command += [message]
67 subprocess.call(command)74 subprocess.call(command)
6875
6976
70class Serializable(UserDict.IterableUserDict):77class Serializable(UserDict):
71 """Wrapper, an object that can be serialized to yaml or json"""78 """Wrapper, an object that can be serialized to yaml or json"""
7279
73 def __init__(self, obj):80 def __init__(self, obj):
74 # wrap the object81 # wrap the object
75 UserDict.IterableUserDict.__init__(self)82 UserDict.__init__(self)
76 self.data = obj83 self.data = obj
7784
78 def __getattr__(self, attr):85 def __getattr__(self, attr):
@@ -218,7 +225,7 @@
218 prev_keys = []225 prev_keys = []
219 if self._prev_dict is not None:226 if self._prev_dict is not None:
220 prev_keys = self._prev_dict.keys()227 prev_keys = self._prev_dict.keys()
221 return list(set(prev_keys + dict.keys(self)))228 return list(set(prev_keys + list(dict.keys(self))))
222229
223 def load_previous(self, path=None):230 def load_previous(self, path=None):
224 """Load previous copy of config from disk.231 """Load previous copy of config from disk.
@@ -269,7 +276,7 @@
269276
270 """277 """
271 if self._prev_dict:278 if self._prev_dict:
272 for k, v in self._prev_dict.iteritems():279 for k, v in six.iteritems(self._prev_dict):
273 if k not in self:280 if k not in self:
274 self[k] = v281 self[k] = v
275 with open(self.path, 'w') as f:282 with open(self.path, 'w') as f:
@@ -284,7 +291,8 @@
284 config_cmd_line.append(scope)291 config_cmd_line.append(scope)
285 config_cmd_line.append('--format=json')292 config_cmd_line.append('--format=json')
286 try:293 try:
287 config_data = json.loads(subprocess.check_output(config_cmd_line))294 config_data = json.loads(
295 subprocess.check_output(config_cmd_line).decode('UTF-8'))
288 if scope is not None:296 if scope is not None:
289 return config_data297 return config_data
290 return Config(config_data)298 return Config(config_data)
@@ -303,10 +311,10 @@
303 if unit:311 if unit:
304 _args.append(unit)312 _args.append(unit)
305 try:313 try:
306 return json.loads(subprocess.check_output(_args))314 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
307 except ValueError:315 except ValueError:
308 return None316 return None
309 except CalledProcessError, e:317 except CalledProcessError as e:
310 if e.returncode == 2:318 if e.returncode == 2:
311 return None319 return None
312 raise320 raise
@@ -318,7 +326,7 @@
318 relation_cmd_line = ['relation-set']326 relation_cmd_line = ['relation-set']
319 if relation_id is not None:327 if relation_id is not None:
320 relation_cmd_line.extend(('-r', relation_id))328 relation_cmd_line.extend(('-r', relation_id))
321 for k, v in (relation_settings.items() + kwargs.items()):329 for k, v in (list(relation_settings.items()) + list(kwargs.items())):
322 if v is None:330 if v is None:
323 relation_cmd_line.append('{}='.format(k))331 relation_cmd_line.append('{}='.format(k))
324 else:332 else:
@@ -335,7 +343,8 @@
335 relid_cmd_line = ['relation-ids', '--format=json']343 relid_cmd_line = ['relation-ids', '--format=json']
336 if reltype is not None:344 if reltype is not None:
337 relid_cmd_line.append(reltype)345 relid_cmd_line.append(reltype)
338 return json.loads(subprocess.check_output(relid_cmd_line)) or []346 return json.loads(
347 subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
339 return []348 return []
340349
341350
@@ -346,7 +355,8 @@
346 units_cmd_line = ['relation-list', '--format=json']355 units_cmd_line = ['relation-list', '--format=json']
347 if relid is not None:356 if relid is not None:
348 units_cmd_line.extend(('-r', relid))357 units_cmd_line.extend(('-r', relid))
349 return json.loads(subprocess.check_output(units_cmd_line)) or []358 return json.loads(
359 subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
350360
351361
352@cached362@cached
@@ -386,21 +396,31 @@
386396
387397
388@cached398@cached
399def metadata():
400 """Get the current charm metadata.yaml contents as a python object"""
401 with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
402 return yaml.safe_load(md)
403
404
405@cached
389def relation_types():406def relation_types():
390 """Get a list of relation types supported by this charm"""407 """Get a list of relation types supported by this charm"""
391 charmdir = os.environ.get('CHARM_DIR', '')
392 mdf = open(os.path.join(charmdir, 'metadata.yaml'))
393 md = yaml.safe_load(mdf)
394 rel_types = []408 rel_types = []
409 md = metadata()
395 for key in ('provides', 'requires', 'peers'):410 for key in ('provides', 'requires', 'peers'):
396 section = md.get(key)411 section = md.get(key)
397 if section:412 if section:
398 rel_types.extend(section.keys())413 rel_types.extend(section.keys())
399 mdf.close()
400 return rel_types414 return rel_types
401415
402416
403@cached417@cached
418def charm_name():
419 """Get the name of the current charm as is specified on metadata.yaml"""
420 return metadata().get('name')
421
422
423@cached
404def relations():424def relations():
405 """Get a nested dictionary of relation data for all related units"""425 """Get a nested dictionary of relation data for all related units"""
406 rels = {}426 rels = {}
@@ -455,7 +475,7 @@
455 """Get the unit ID for the remote unit"""475 """Get the unit ID for the remote unit"""
456 _args = ['unit-get', '--format=json', attribute]476 _args = ['unit-get', '--format=json', attribute]
457 try:477 try:
458 return json.loads(subprocess.check_output(_args))478 return json.loads(subprocess.check_output(_args).decode('UTF-8'))
459 except ValueError:479 except ValueError:
460 return None480 return None
461481
462482
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/core/host.py 2014-12-11 17:56:59 +0000
@@ -14,11 +14,12 @@
14import subprocess14import subprocess
15import hashlib15import hashlib
16from contextlib import contextmanager16from contextlib import contextmanager
17
18from collections import OrderedDict17from collections import OrderedDict
1918
20from hookenv import log19import six
21from fstab import Fstab20
21from .hookenv import log
22from .fstab import Fstab
2223
2324
24def service_start(service_name):25def service_start(service_name):
@@ -54,7 +55,9 @@
54def service_running(service):55def service_running(service):
55 """Determine whether a system service is running"""56 """Determine whether a system service is running"""
56 try:57 try:
57 output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)58 output = subprocess.check_output(
59 ['service', service, 'status'],
60 stderr=subprocess.STDOUT).decode('UTF-8')
58 except subprocess.CalledProcessError:61 except subprocess.CalledProcessError:
59 return False62 return False
60 else:63 else:
@@ -67,7 +70,9 @@
67def service_available(service_name):70def service_available(service_name):
68 """Determine whether a system service is available"""71 """Determine whether a system service is available"""
69 try:72 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)73 subprocess.check_output(
74 ['service', service_name, 'status'],
75 stderr=subprocess.STDOUT).decode('UTF-8')
71 except subprocess.CalledProcessError as e:76 except subprocess.CalledProcessError as e:
72 return 'unrecognized service' not in e.output77 return 'unrecognized service' not in e.output
73 else:78 else:
@@ -96,6 +101,26 @@
96 return user_info101 return user_info
97102
98103
104def add_group(group_name, system_group=False):
105 """Add a group to the system"""
106 try:
107 group_info = grp.getgrnam(group_name)
108 log('group {0} already exists!'.format(group_name))
109 except KeyError:
110 log('creating group {0}'.format(group_name))
111 cmd = ['addgroup']
112 if system_group:
113 cmd.append('--system')
114 else:
115 cmd.extend([
116 '--group',
117 ])
118 cmd.append(group_name)
119 subprocess.check_call(cmd)
120 group_info = grp.getgrnam(group_name)
121 return group_info
122
123
99def add_user_to_group(username, group):124def add_user_to_group(username, group):
100 """Add a user to a group"""125 """Add a user to a group"""
101 cmd = [126 cmd = [
@@ -115,7 +140,7 @@
115 cmd.append(from_path)140 cmd.append(from_path)
116 cmd.append(to_path)141 cmd.append(to_path)
117 log(" ".join(cmd))142 log(" ".join(cmd))
118 return subprocess.check_output(cmd).strip()143 return subprocess.check_output(cmd).decode('UTF-8').strip()
119144
120145
121def symlink(source, destination):146def symlink(source, destination):
@@ -130,7 +155,7 @@
130 subprocess.check_call(cmd)155 subprocess.check_call(cmd)
131156
132157
133def mkdir(path, owner='root', group='root', perms=0555, force=False):158def mkdir(path, owner='root', group='root', perms=0o555, force=False):
134 """Create a directory"""159 """Create a directory"""
135 log("Making dir {} {}:{} {:o}".format(path, owner, group,160 log("Making dir {} {}:{} {:o}".format(path, owner, group,
136 perms))161 perms))
@@ -146,7 +171,7 @@
146 os.chown(realpath, uid, gid)171 os.chown(realpath, uid, gid)
147172
148173
149def write_file(path, content, owner='root', group='root', perms=0444):174def write_file(path, content, owner='root', group='root', perms=0o444):
150 """Create or overwrite a file with the contents of a string"""175 """Create or overwrite a file with the contents of a string"""
151 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))176 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
152 uid = pwd.getpwnam(owner).pw_uid177 uid = pwd.getpwnam(owner).pw_uid
@@ -177,7 +202,7 @@
177 cmd_args.extend([device, mountpoint])202 cmd_args.extend([device, mountpoint])
178 try:203 try:
179 subprocess.check_output(cmd_args)204 subprocess.check_output(cmd_args)
180 except subprocess.CalledProcessError, e:205 except subprocess.CalledProcessError as e:
181 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))206 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
182 return False207 return False
183208
@@ -191,7 +216,7 @@
191 cmd_args = ['umount', mountpoint]216 cmd_args = ['umount', mountpoint]
192 try:217 try:
193 subprocess.check_output(cmd_args)218 subprocess.check_output(cmd_args)
194 except subprocess.CalledProcessError, e:219 except subprocess.CalledProcessError as e:
195 log('Error unmounting {}\n{}'.format(mountpoint, e.output))220 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
196 return False221 return False
197222
@@ -218,8 +243,8 @@
218 """243 """
219 if os.path.exists(path):244 if os.path.exists(path):
220 h = getattr(hashlib, hash_type)()245 h = getattr(hashlib, hash_type)()
221 with open(path, 'r') as source:246 with open(path, 'rb') as source:
222 h.update(source.read()) # IGNORE:E1101 - it does have update247 h.update(source.read())
223 return h.hexdigest()248 return h.hexdigest()
224 else:249 else:
225 return None250 return None
@@ -297,7 +322,7 @@
297 if length is None:322 if length is None:
298 length = random.choice(range(35, 45))323 length = random.choice(range(35, 45))
299 alphanumeric_chars = [324 alphanumeric_chars = [
300 l for l in (string.letters + string.digits)325 l for l in (string.ascii_letters + string.digits)
301 if l not in 'l0QD1vAEIOUaeiou']326 if l not in 'l0QD1vAEIOUaeiou']
302 random_chars = [327 random_chars = [
303 random.choice(alphanumeric_chars) for _ in range(length)]328 random.choice(alphanumeric_chars) for _ in range(length)]
@@ -306,14 +331,14 @@
306331
307def list_nics(nic_type):332def list_nics(nic_type):
308 '''Return a list of nics of given type(s)'''333 '''Return a list of nics of given type(s)'''
309 if isinstance(nic_type, basestring):334 if isinstance(nic_type, six.string_types):
310 int_types = [nic_type]335 int_types = [nic_type]
311 else:336 else:
312 int_types = nic_type337 int_types = nic_type
313 interfaces = []338 interfaces = []
314 for int_type in int_types:339 for int_type in int_types:
315 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']340 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
316 ip_output = subprocess.check_output(cmd).split('\n')341 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
317 ip_output = (line for line in ip_output if line)342 ip_output = (line for line in ip_output if line)
318 for line in ip_output:343 for line in ip_output:
319 if line.split()[1].startswith(int_type):344 if line.split()[1].startswith(int_type):
@@ -335,7 +360,7 @@
335360
336def get_nic_mtu(nic):361def get_nic_mtu(nic):
337 cmd = ['ip', 'addr', 'show', nic]362 cmd = ['ip', 'addr', 'show', nic]
338 ip_output = subprocess.check_output(cmd).split('\n')363 ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
339 mtu = ""364 mtu = ""
340 for line in ip_output:365 for line in ip_output:
341 words = line.split()366 words = line.split()
@@ -346,7 +371,7 @@
346371
347def get_nic_hwaddr(nic):372def get_nic_hwaddr(nic):
348 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]373 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
349 ip_output = subprocess.check_output(cmd)374 ip_output = subprocess.check_output(cmd).decode('UTF-8')
350 hwaddr = ""375 hwaddr = ""
351 words = ip_output.split()376 words = ip_output.split()
352 if 'link/ether' in words:377 if 'link/ether' in words:
@@ -363,8 +388,8 @@
363388
364 '''389 '''
365 import apt_pkg390 import apt_pkg
366 from charmhelpers.fetch import apt_cache
367 if not pkgcache:391 if not pkgcache:
392 from charmhelpers.fetch import apt_cache
368 pkgcache = apt_cache()393 pkgcache = apt_cache()
369 pkg = pkgcache[package]394 pkg = pkgcache[package]
370 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)395 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
371396
=== modified file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2014-12-11 17:56:59 +0000
@@ -196,7 +196,7 @@
196 if not os.path.isabs(file_name):196 if not os.path.isabs(file_name):
197 file_name = os.path.join(hookenv.charm_dir(), file_name)197 file_name = os.path.join(hookenv.charm_dir(), file_name)
198 with open(file_name, 'w') as file_stream:198 with open(file_name, 'w') as file_stream:
199 os.fchmod(file_stream.fileno(), 0600)199 os.fchmod(file_stream.fileno(), 0o600)
200 yaml.dump(config_data, file_stream)200 yaml.dump(config_data, file_stream)
201201
202 def read_context(self, file_name):202 def read_context(self, file_name):
@@ -211,15 +211,19 @@
211211
212class TemplateCallback(ManagerCallback):212class TemplateCallback(ManagerCallback):
213 """213 """
214 Callback class that will render a Jinja2 template, for use as a ready action.214 Callback class that will render a Jinja2 template, for use as a ready
215215 action.
216 :param str source: The template source file, relative to `$CHARM_DIR/templates`216
217 :param str source: The template source file, relative to
218 `$CHARM_DIR/templates`
219
217 :param str target: The target to write the rendered template to220 :param str target: The target to write the rendered template to
218 :param str owner: The owner of the rendered file221 :param str owner: The owner of the rendered file
219 :param str group: The group of the rendered file222 :param str group: The group of the rendered file
220 :param int perms: The permissions of the rendered file223 :param int perms: The permissions of the rendered file
221 """224 """
222 def __init__(self, source, target, owner='root', group='root', perms=0444):225 def __init__(self, source, target,
226 owner='root', group='root', perms=0o444):
223 self.source = source227 self.source = source
224 self.target = target228 self.target = target
225 self.owner = owner229 self.owner = owner
226230
=== modified file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/core/templating.py 2014-12-11 17:56:59 +0000
@@ -4,7 +4,8 @@
4from charmhelpers.core import hookenv4from charmhelpers.core import hookenv
55
66
7def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):7def render(source, target, context, owner='root', group='root',
8 perms=0o444, templates_dir=None):
8 """9 """
9 Render a template.10 Render a template.
1011
1112
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-12-11 17:56:59 +0000
@@ -5,10 +5,6 @@
5from charmhelpers.core.host import (5from charmhelpers.core.host import (
6 lsb_release6 lsb_release
7)7)
8from urlparse import (
9 urlparse,
10 urlunparse,
11)
12import subprocess8import subprocess
13from charmhelpers.core.hookenv import (9from charmhelpers.core.hookenv import (
14 config,10 config,
@@ -16,6 +12,12 @@
16)12)
17import os13import os
1814
15import six
16if six.PY3:
17 from urllib.parse import urlparse, urlunparse
18else:
19 from urlparse import urlparse, urlunparse
20
1921
20CLOUD_ARCHIVE = """# Ubuntu Cloud Archive22CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
21deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main23deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
@@ -149,7 +151,7 @@
149 cmd = ['apt-get', '--assume-yes']151 cmd = ['apt-get', '--assume-yes']
150 cmd.extend(options)152 cmd.extend(options)
151 cmd.append('install')153 cmd.append('install')
152 if isinstance(packages, basestring):154 if isinstance(packages, six.string_types):
153 cmd.append(packages)155 cmd.append(packages)
154 else:156 else:
155 cmd.extend(packages)157 cmd.extend(packages)
@@ -182,7 +184,7 @@
182def apt_purge(packages, fatal=False):184def apt_purge(packages, fatal=False):
183 """Purge one or more packages"""185 """Purge one or more packages"""
184 cmd = ['apt-get', '--assume-yes', 'purge']186 cmd = ['apt-get', '--assume-yes', 'purge']
185 if isinstance(packages, basestring):187 if isinstance(packages, six.string_types):
186 cmd.append(packages)188 cmd.append(packages)
187 else:189 else:
188 cmd.extend(packages)190 cmd.extend(packages)
@@ -193,7 +195,7 @@
193def apt_hold(packages, fatal=False):195def apt_hold(packages, fatal=False):
194 """Hold one or more packages"""196 """Hold one or more packages"""
195 cmd = ['apt-mark', 'hold']197 cmd = ['apt-mark', 'hold']
196 if isinstance(packages, basestring):198 if isinstance(packages, six.string_types):
197 cmd.append(packages)199 cmd.append(packages)
198 else:200 else:
199 cmd.extend(packages)201 cmd.extend(packages)
@@ -260,7 +262,7 @@
260262
261 if key:263 if key:
262 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:264 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
263 with NamedTemporaryFile() as key_file:265 with NamedTemporaryFile('w+') as key_file:
264 key_file.write(key)266 key_file.write(key)
265 key_file.flush()267 key_file.flush()
266 key_file.seek(0)268 key_file.seek(0)
@@ -297,14 +299,14 @@
297 sources = safe_load((config(sources_var) or '').strip()) or []299 sources = safe_load((config(sources_var) or '').strip()) or []
298 keys = safe_load((config(keys_var) or '').strip()) or None300 keys = safe_load((config(keys_var) or '').strip()) or None
299301
300 if isinstance(sources, basestring):302 if isinstance(sources, six.string_types):
301 sources = [sources]303 sources = [sources]
302304
303 if keys is None:305 if keys is None:
304 for source in sources:306 for source in sources:
305 add_source(source, None)307 add_source(source, None)
306 else:308 else:
307 if isinstance(keys, basestring):309 if isinstance(keys, six.string_types):
308 keys = [keys]310 keys = [keys]
309311
310 if len(sources) != len(keys):312 if len(sources) != len(keys):
@@ -401,7 +403,7 @@
401 while result is None or result == APT_NO_LOCK:403 while result is None or result == APT_NO_LOCK:
402 try:404 try:
403 result = subprocess.check_call(cmd, env=env)405 result = subprocess.check_call(cmd, env=env)
404 except subprocess.CalledProcessError, e:406 except subprocess.CalledProcessError as e:
405 retry_count = retry_count + 1407 retry_count = retry_count + 1
406 if retry_count > APT_NO_LOCK_RETRY_COUNT:408 if retry_count > APT_NO_LOCK_RETRY_COUNT:
407 raise409 raise
408410
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2014-12-11 17:56:59 +0000
@@ -1,8 +1,23 @@
1import os1import os
2import urllib2
3from urllib import urlretrieve
4import urlparse
5import hashlib2import hashlib
3import re
4
5import six
6if six.PY3:
7 from urllib.request import (
8 build_opener, install_opener, urlopen, urlretrieve,
9 HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
10 )
11 from urllib.parse import urlparse, urlunparse, parse_qs
12 from urllib.error import URLError
13else:
14 from urllib import urlretrieve
15 from urllib2 import (
16 build_opener, install_opener, urlopen,
17 HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
18 URLError
19 )
20 from urlparse import urlparse, urlunparse, parse_qs
621
7from charmhelpers.fetch import (22from charmhelpers.fetch import (
8 BaseFetchHandler,23 BaseFetchHandler,
@@ -15,6 +30,24 @@
15from charmhelpers.core.host import mkdir, check_hash30from charmhelpers.core.host import mkdir, check_hash
1631
1732
33def splituser(host):
34 '''urllib.splituser(), but six's support of this seems broken'''
35 _userprog = re.compile('^(.*)@(.*)$')
36 match = _userprog.match(host)
37 if match:
38 return match.group(1, 2)
39 return None, host
40
41
42def splitpasswd(user):
43 '''urllib.splitpasswd(), but six's support of this is missing'''
44 _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
45 match = _passwdprog.match(user)
46 if match:
47 return match.group(1, 2)
48 return user, None
49
50
18class ArchiveUrlFetchHandler(BaseFetchHandler):51class ArchiveUrlFetchHandler(BaseFetchHandler):
19 """52 """
20 Handler to download archive files from arbitrary URLs.53 Handler to download archive files from arbitrary URLs.
@@ -42,20 +75,20 @@
42 """75 """
43 # propogate all exceptions76 # propogate all exceptions
44 # URLError, OSError, etc77 # URLError, OSError, etc
45 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)78 proto, netloc, path, params, query, fragment = urlparse(source)
46 if proto in ('http', 'https'):79 if proto in ('http', 'https'):
47 auth, barehost = urllib2.splituser(netloc)80 auth, barehost = splituser(netloc)
48 if auth is not None:81 if auth is not None:
49 source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))82 source = urlunparse((proto, barehost, path, params, query, fragment))
50 username, password = urllib2.splitpasswd(auth)83 username, password = splitpasswd(auth)
51 passman = urllib2.HTTPPasswordMgrWithDefaultRealm()84 passman = HTTPPasswordMgrWithDefaultRealm()
52 # Realm is set to None in add_password to force the username and password85 # Realm is set to None in add_password to force the username and password
53 # to be used whatever the realm86 # to be used whatever the realm
54 passman.add_password(None, source, username, password)87 passman.add_password(None, source, username, password)
55 authhandler = urllib2.HTTPBasicAuthHandler(passman)88 authhandler = HTTPBasicAuthHandler(passman)
56 opener = urllib2.build_opener(authhandler)89 opener = build_opener(authhandler)
57 urllib2.install_opener(opener)90 install_opener(opener)
58 response = urllib2.urlopen(source)91 response = urlopen(source)
59 try:92 try:
60 with open(dest, 'w') as dest_file:93 with open(dest, 'w') as dest_file:
61 dest_file.write(response.read())94 dest_file.write(response.read())
@@ -91,17 +124,21 @@
91 url_parts = self.parse_url(source)124 url_parts = self.parse_url(source)
92 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')125 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
93 if not os.path.exists(dest_dir):126 if not os.path.exists(dest_dir):
94 mkdir(dest_dir, perms=0755)127 mkdir(dest_dir, perms=0o755)
95 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))128 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
96 try:129 try:
97 self.download(source, dld_file)130 self.download(source, dld_file)
98 except urllib2.URLError as e:131 except URLError as e:
99 raise UnhandledSource(e.reason)132 raise UnhandledSource(e.reason)
100 except OSError as e:133 except OSError as e:
101 raise UnhandledSource(e.strerror)134 raise UnhandledSource(e.strerror)
102 options = urlparse.parse_qs(url_parts.fragment)135 options = parse_qs(url_parts.fragment)
103 for key, value in options.items():136 for key, value in options.items():
104 if key in hashlib.algorithms:137 if not six.PY3:
138 algorithms = hashlib.algorithms
139 else:
140 algorithms = hashlib.algorithms_available
141 if key in algorithms:
105 check_hash(dld_file, value, key)142 check_hash(dld_file, value, key)
106 if checksum:143 if checksum:
107 check_hash(dld_file, checksum, hash_type)144 check_hash(dld_file, checksum, hash_type)
108145
=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
--- hooks/charmhelpers/fetch/bzrurl.py 2014-06-10 12:45:45 +0000
+++ hooks/charmhelpers/fetch/bzrurl.py 2014-12-11 17:56:59 +0000
@@ -5,6 +5,10 @@
5)5)
6from charmhelpers.core.host import mkdir6from charmhelpers.core.host import mkdir
77
8import six
9if six.PY3:
10 raise ImportError('bzrlib does not support Python3')
11
8try:12try:
9 from bzrlib.branch import Branch13 from bzrlib.branch import Branch
10except ImportError:14except ImportError:
@@ -42,7 +46,7 @@
42 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",46 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
43 branch_name)47 branch_name)
44 if not os.path.exists(dest_dir):48 if not os.path.exists(dest_dir):
45 mkdir(dest_dir, perms=0755)49 mkdir(dest_dir, perms=0o755)
46 try:50 try:
47 self.branch(source, dest_dir)51 self.branch(source, dest_dir)
48 except OSError as e:52 except OSError as e:
4953
=== modified file 'hooks/charmhelpers/fetch/giturl.py'
--- hooks/charmhelpers/fetch/giturl.py 2014-10-23 17:30:13 +0000
+++ hooks/charmhelpers/fetch/giturl.py 2014-12-11 17:56:59 +0000
@@ -5,6 +5,10 @@
5)5)
6from charmhelpers.core.host import mkdir6from charmhelpers.core.host import mkdir
77
8import six
9if six.PY3:
10 raise ImportError('GitPython does not support Python 3')
11
8try:12try:
9 from git import Repo13 from git import Repo
10except ImportError:14except ImportError:
@@ -17,7 +21,7 @@
17 """Handler for git branches via generic and github URLs"""21 """Handler for git branches via generic and github URLs"""
18 def can_handle(self, source):22 def can_handle(self, source):
19 url_parts = self.parse_url(source)23 url_parts = self.parse_url(source)
20 #TODO (mattyw) no support for ssh git@ yet24 # TODO (mattyw) no support for ssh git@ yet
21 if url_parts.scheme not in ('http', 'https', 'git'):25 if url_parts.scheme not in ('http', 'https', 'git'):
22 return False26 return False
23 else:27 else:
@@ -30,13 +34,16 @@
30 repo = Repo.clone_from(source, dest)34 repo = Repo.clone_from(source, dest)
31 repo.git.checkout(branch)35 repo.git.checkout(branch)
3236
33 def install(self, source, branch="master"):37 def install(self, source, branch="master", dest=None):
34 url_parts = self.parse_url(source)38 url_parts = self.parse_url(source)
35 branch_name = url_parts.path.strip("/").split("/")[-1]39 branch_name = url_parts.path.strip("/").split("/")[-1]
36 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",40 if dest:
37 branch_name)41 dest_dir = os.path.join(dest, branch_name)
42 else:
43 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
44 branch_name)
38 if not os.path.exists(dest_dir):45 if not os.path.exists(dest_dir):
39 mkdir(dest_dir, perms=0755)46 mkdir(dest_dir, perms=0o755)
40 try:47 try:
41 self.clone(source, dest_dir, branch)48 self.clone(source, dest_dir, branch)
42 except OSError as e:49 except OSError as e:

Subscribers

People subscribed via source and target branches