Merge lp:~corey.bryant/charms/trusty/nova-compute/contrib.python.packages into lp:~openstack-charmers-archive/charms/trusty/nova-compute/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 93
Proposed branch: lp:~corey.bryant/charms/trusty/nova-compute/contrib.python.packages
Merge into: lp:~openstack-charmers-archive/charms/trusty/nova-compute/next
Diff against target: 3706 lines (+1331/-535)
34 files modified
charm-helpers-hooks.yaml (+1/-0)
hooks/charmhelpers/__init__.py (+22/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+16/-7)
hooks/charmhelpers/contrib/network/ip.py (+59/-53)
hooks/charmhelpers/contrib/network/ufw.py (+189/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
hooks/charmhelpers/contrib/openstack/context.py (+339/-225)
hooks/charmhelpers/contrib/openstack/ip.py (+41/-27)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-4)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+2/-2)
hooks/charmhelpers/contrib/openstack/templating.py (+5/-5)
hooks/charmhelpers/contrib/openstack/utils.py (+146/-13)
hooks/charmhelpers/contrib/python/packages.py (+77/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+89/-102)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+4/-4)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+41/-15)
hooks/charmhelpers/core/host.py (+51/-20)
hooks/charmhelpers/core/services/__init__.py (+2/-2)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/sysctl.py (+34/-0)
hooks/charmhelpers/core/templating.py (+2/-1)
hooks/charmhelpers/fetch/__init__.py (+18/-12)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+51/-0)
tests/charmhelpers/__init__.py (+22/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+3/-3)
tests/charmhelpers/contrib/amulet/utils.py (+6/-4)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/nova-compute/contrib.python.packages
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+244329@code.launchpad.net
To post a comment you must log in.
Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #168 nova-compute-next for corey.bryant mp244329
    LINT OK: passed

Build: http://10.230.18.80:8080/job/charm_lint_check/168/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #131 nova-compute-next for corey.bryant mp244329
    UNIT FAIL: unit-test failed

UNIT Results (max last 2 lines):
  FAILED (errors=3)
  make: *** [unit_test] Error 1

Full unit test output: pastebin not avail., cmd error
Build: http://10.230.18.80:8080/job/charm_unit_test/131/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #81 nova-compute-next for corey.bryant mp244329
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
  ERROR subprocess encountered error code 1
  make: *** [test] Error 1

Full amulet test output: pastebin not avail., cmd error
Build: http://10.230.18.80:8080/job/charm_amulet_test/81/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #175 nova-compute-next for corey.bryant mp244329
    LINT OK: passed

Build: http://10.230.18.80:8080/job/charm_lint_check/175/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #138 nova-compute-next for corey.bryant mp244329
    UNIT OK: passed

Build: http://10.230.18.80:8080/job/charm_unit_test/138/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #198 nova-compute-next for corey.bryant mp244329
    LINT OK: passed

Build: http://10.230.18.80:8080/job/charm_lint_check/198/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #161 nova-compute-next for corey.bryant mp244329
    UNIT OK: passed

Build: http://10.230.18.80:8080/job/charm_unit_test/161/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #116 nova-compute-next for corey.bryant mp244329
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
  ERROR subprocess encountered error code 1
  make: *** [test] Error 1

Full amulet test output: pastebin not avail., cmd error
Build: http://10.230.18.80:8080/job/charm_amulet_test/116/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_lint_check #217 nova-compute-next for corey.bryant mp244329
    LINT OK: passed

Build: http://10.230.18.80:8080/job/charm_lint_check/217/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_unit_test #180 nova-compute-next for corey.bryant mp244329
    UNIT OK: passed

Build: http://10.230.18.80:8080/job/charm_unit_test/180/

Revision history for this message
uosci-testing-bot (uosci-testing-bot) wrote :

charm_amulet_test #134 nova-compute-next for corey.bryant mp244329
    AMULET FAIL: amulet-test failed

AMULET Results (max last 2 lines):
  ERROR subprocess encountered error code 1
  make: *** [test] Error 1

Full amulet test output: pastebin not avail., cmd error
Build: http://10.230.18.80:8080/job/charm_amulet_test/134/

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2014-09-23 10:21:54 +0000
3+++ charm-helpers-hooks.yaml 2014-12-11 17:56:54 +0000
4@@ -9,4 +9,5 @@
5 - apache
6 - cluster
7 - contrib.network
8+ - contrib.python.packages
9 - payload.execd
10
11=== added file 'hooks/charmhelpers/__init__.py'
12--- hooks/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
13+++ hooks/charmhelpers/__init__.py 2014-12-11 17:56:54 +0000
14@@ -0,0 +1,22 @@
15+# Bootstrap charm-helpers, installing its dependencies if necessary using
16+# only standard libraries.
17+import subprocess
18+import sys
19+
20+try:
21+ import six # flake8: noqa
22+except ImportError:
23+ if sys.version_info.major == 2:
24+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
25+ else:
26+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
27+ import six # flake8: noqa
28+
29+try:
30+ import yaml # flake8: noqa
31+except ImportError:
32+ if sys.version_info.major == 2:
33+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
34+ else:
35+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
36+ import yaml # flake8: noqa
37
38=== removed file 'hooks/charmhelpers/__init__.py'
39=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
40--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-06 21:57:43 +0000
41+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-12-11 17:56:54 +0000
42@@ -13,9 +13,10 @@
43
44 import subprocess
45 import os
46-
47 from socket import gethostname as get_unit_hostname
48
49+import six
50+
51 from charmhelpers.core.hookenv import (
52 log,
53 relation_ids,
54@@ -77,7 +78,7 @@
55 "show", resource
56 ]
57 try:
58- status = subprocess.check_output(cmd)
59+ status = subprocess.check_output(cmd).decode('UTF-8')
60 except subprocess.CalledProcessError:
61 return False
62 else:
63@@ -150,34 +151,42 @@
64 return False
65
66
67-def determine_api_port(public_port):
68+def determine_api_port(public_port, singlenode_mode=False):
69 '''
70 Determine correct API server listening port based on
71 existence of HTTPS reverse proxy and/or haproxy.
72
73 public_port: int: standard public port for given service
74
75+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
76+
77 returns: int: the correct listening port for the API service
78 '''
79 i = 0
80- if len(peer_units()) > 0 or is_clustered():
81+ if singlenode_mode:
82+ i += 1
83+ elif len(peer_units()) > 0 or is_clustered():
84 i += 1
85 if https():
86 i += 1
87 return public_port - (i * 10)
88
89
90-def determine_apache_port(public_port):
91+def determine_apache_port(public_port, singlenode_mode=False):
92 '''
93 Description: Determine correct apache listening port based on public IP +
94 state of the cluster.
95
96 public_port: int: standard public port for given service
97
98+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
99+
100 returns: int: the correct listening port for the HAProxy service
101 '''
102 i = 0
103- if len(peer_units()) > 0 or is_clustered():
104+ if singlenode_mode:
105+ i += 1
106+ elif len(peer_units()) > 0 or is_clustered():
107 i += 1
108 return public_port - (i * 10)
109
110@@ -197,7 +206,7 @@
111 for setting in settings:
112 conf[setting] = config_get(setting)
113 missing = []
114- [missing.append(s) for s, v in conf.iteritems() if v is None]
115+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
116 if missing:
117 log('Insufficient config data to configure hacluster.', level=ERROR)
118 raise HAIncompleteConfig
119
120=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
121--- hooks/charmhelpers/contrib/network/ip.py 2014-10-06 21:57:43 +0000
122+++ hooks/charmhelpers/contrib/network/ip.py 2014-12-11 17:56:54 +0000
123@@ -1,15 +1,12 @@
124 import glob
125 import re
126 import subprocess
127-import sys
128
129 from functools import partial
130
131 from charmhelpers.core.hookenv import unit_get
132 from charmhelpers.fetch import apt_install
133 from charmhelpers.core.hookenv import (
134- WARNING,
135- ERROR,
136 log
137 )
138
139@@ -34,31 +31,28 @@
140 network)
141
142
143+def no_ip_found_error_out(network):
144+ errmsg = ("No IP address found in network: %s" % network)
145+ raise ValueError(errmsg)
146+
147+
148 def get_address_in_network(network, fallback=None, fatal=False):
149- """
150- Get an IPv4 or IPv6 address within the network from the host.
151+ """Get an IPv4 or IPv6 address within the network from the host.
152
153 :param network (str): CIDR presentation format. For example,
154 '192.168.1.0/24'.
155 :param fallback (str): If no address is found, return fallback.
156 :param fatal (boolean): If no address is found, fallback is not
157 set and fatal is True then exit(1).
158-
159 """
160-
161- def not_found_error_out():
162- log("No IP address found in network: %s" % network,
163- level=ERROR)
164- sys.exit(1)
165-
166 if network is None:
167 if fallback is not None:
168 return fallback
169+
170+ if fatal:
171+ no_ip_found_error_out(network)
172 else:
173- if fatal:
174- not_found_error_out()
175- else:
176- return None
177+ return None
178
179 _validate_cidr(network)
180 network = netaddr.IPNetwork(network)
181@@ -70,6 +64,7 @@
182 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
183 if cidr in network:
184 return str(cidr.ip)
185+
186 if network.version == 6 and netifaces.AF_INET6 in addresses:
187 for addr in addresses[netifaces.AF_INET6]:
188 if not addr['addr'].startswith('fe80'):
189@@ -82,20 +77,20 @@
190 return fallback
191
192 if fatal:
193- not_found_error_out()
194+ no_ip_found_error_out(network)
195
196 return None
197
198
199 def is_ipv6(address):
200- '''Determine whether provided address is IPv6 or not'''
201+ """Determine whether provided address is IPv6 or not."""
202 try:
203 address = netaddr.IPAddress(address)
204 except netaddr.AddrFormatError:
205 # probably a hostname - so not an address at all!
206 return False
207- else:
208- return address.version == 6
209+
210+ return address.version == 6
211
212
213 def is_address_in_network(network, address):
214@@ -113,11 +108,13 @@
215 except (netaddr.core.AddrFormatError, ValueError):
216 raise ValueError("Network (%s) is not in CIDR presentation format" %
217 network)
218+
219 try:
220 address = netaddr.IPAddress(address)
221 except (netaddr.core.AddrFormatError, ValueError):
222 raise ValueError("Address (%s) is not in correct presentation format" %
223 address)
224+
225 if address in network:
226 return True
227 else:
228@@ -140,57 +137,63 @@
229 if address.version == 4 and netifaces.AF_INET in addresses:
230 addr = addresses[netifaces.AF_INET][0]['addr']
231 netmask = addresses[netifaces.AF_INET][0]['netmask']
232- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
233+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
234+ cidr = network.cidr
235 if address in cidr:
236 if key == 'iface':
237 return iface
238 else:
239 return addresses[netifaces.AF_INET][0][key]
240+
241 if address.version == 6 and netifaces.AF_INET6 in addresses:
242 for addr in addresses[netifaces.AF_INET6]:
243 if not addr['addr'].startswith('fe80'):
244- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
245- addr['netmask']))
246+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
247+ addr['netmask']))
248+ cidr = network.cidr
249 if address in cidr:
250 if key == 'iface':
251 return iface
252+ elif key == 'netmask' and cidr:
253+ return str(cidr).split('/')[1]
254 else:
255 return addr[key]
256+
257 return None
258
259
260 get_iface_for_address = partial(_get_for_address, key='iface')
261
262+
263 get_netmask_for_address = partial(_get_for_address, key='netmask')
264
265
266 def format_ipv6_addr(address):
267- """
268- IPv6 needs to be wrapped with [] in url link to parse correctly.
269+ """If address is IPv6, wrap it in '[]' otherwise return None.
270+
271+ This is required by most configuration files when specifying IPv6
272+ addresses.
273 """
274 if is_ipv6(address):
275- address = "[%s]" % address
276- else:
277- log("Not a valid ipv6 address: %s" % address, level=WARNING)
278- address = None
279+ return "[%s]" % address
280
281- return address
282+ return None
283
284
285 def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
286 fatal=True, exc_list=None):
287- """
288- Return the assigned IP address for a given interface, if any, or [].
289- """
290+ """Return the assigned IP address for a given interface, if any."""
291 # Extract nic if passed /dev/ethX
292 if '/' in iface:
293 iface = iface.split('/')[-1]
294+
295 if not exc_list:
296 exc_list = []
297+
298 try:
299 inet_num = getattr(netifaces, inet_type)
300 except AttributeError:
301- raise Exception('Unknown inet type ' + str(inet_type))
302+ raise Exception("Unknown inet type '%s'" % str(inet_type))
303
304 interfaces = netifaces.interfaces()
305 if inc_aliases:
306@@ -198,15 +201,18 @@
307 for _iface in interfaces:
308 if iface == _iface or _iface.split(':')[0] == iface:
309 ifaces.append(_iface)
310+
311 if fatal and not ifaces:
312 raise Exception("Invalid interface '%s'" % iface)
313+
314 ifaces.sort()
315 else:
316 if iface not in interfaces:
317 if fatal:
318- raise Exception("%s not found " % (iface))
319+ raise Exception("Interface '%s' not found " % (iface))
320 else:
321 return []
322+
323 else:
324 ifaces = [iface]
325
326@@ -217,10 +223,13 @@
327 for entry in net_info[inet_num]:
328 if 'addr' in entry and entry['addr'] not in exc_list:
329 addresses.append(entry['addr'])
330+
331 if fatal and not addresses:
332 raise Exception("Interface '%s' doesn't have any %s addresses." %
333 (iface, inet_type))
334- return addresses
335+
336+ return sorted(addresses)
337+
338
339 get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
340
341@@ -237,6 +246,7 @@
342 raw = re.match(ll_key, _addr)
343 if raw:
344 _addr = raw.group(1)
345+
346 if _addr == addr:
347 log("Address '%s' is configured on iface '%s'" %
348 (addr, iface))
349@@ -247,8 +257,9 @@
350
351
352 def sniff_iface(f):
353- """If no iface provided, inject net iface inferred from unit private
354- address.
355+ """Ensure decorated function is called with a value for iface.
356+
357+ If no iface provided, inject net iface inferred from unit private address.
358 """
359 def iface_sniffer(*args, **kwargs):
360 if not kwargs.get('iface', None):
361@@ -291,7 +302,7 @@
362 if global_addrs:
363 # Make sure any found global addresses are not temporary
364 cmd = ['ip', 'addr', 'show', iface]
365- out = subprocess.check_output(cmd)
366+ out = subprocess.check_output(cmd).decode('UTF-8')
367 if dynamic_only:
368 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
369 else:
370@@ -313,33 +324,28 @@
371 return addrs
372
373 if fatal:
374- raise Exception("Interface '%s' doesn't have a scope global "
375+ raise Exception("Interface '%s' does not have a scope global "
376 "non-temporary ipv6 address." % iface)
377
378 return []
379
380
381 def get_bridges(vnic_dir='/sys/devices/virtual/net'):
382- """
383- Return a list of bridges on the system or []
384- """
385- b_rgex = vnic_dir + '/*/bridge'
386- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
387+ """Return a list of bridges on the system."""
388+ b_regex = "%s/*/bridge" % vnic_dir
389+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
390
391
392 def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
393- """
394- Return a list of nics comprising a given bridge on the system or []
395- """
396- brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
397- return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
398+ """Return a list of nics comprising a given bridge on the system."""
399+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
400+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
401
402
403 def is_bridge_member(nic):
404- """
405- Check if a given nic is a member of a bridge
406- """
407+ """Check if a given nic is a member of a bridge."""
408 for bridge in get_bridges():
409 if nic in get_bridge_nics(bridge):
410 return True
411+
412 return False
413
414=== added file 'hooks/charmhelpers/contrib/network/ufw.py'
415--- hooks/charmhelpers/contrib/network/ufw.py 1970-01-01 00:00:00 +0000
416+++ hooks/charmhelpers/contrib/network/ufw.py 2014-12-11 17:56:54 +0000
417@@ -0,0 +1,189 @@
418+"""
419+This module contains helpers to add and remove ufw rules.
420+
421+Examples:
422+
423+- open SSH port for subnet 10.0.3.0/24:
424+
425+ >>> from charmhelpers.contrib.network import ufw
426+ >>> ufw.enable()
427+ >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
428+
429+- open service by name as defined in /etc/services:
430+
431+ >>> from charmhelpers.contrib.network import ufw
432+ >>> ufw.enable()
433+ >>> ufw.service('ssh', 'open')
434+
435+- close service by port number:
436+
437+ >>> from charmhelpers.contrib.network import ufw
438+ >>> ufw.enable()
439+ >>> ufw.service('4949', 'close') # munin
440+"""
441+
442+__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
443+
444+import re
445+import os
446+import subprocess
447+from charmhelpers.core import hookenv
448+
449+
450+def is_enabled():
451+ """
452+ Check if `ufw` is enabled
453+
454+ :returns: True if ufw is enabled
455+ """
456+ output = subprocess.check_output(['ufw', 'status'],
457+ env={'LANG': 'en_US',
458+ 'PATH': os.environ['PATH']})
459+
460+ m = re.findall(r'^Status: active\n', output, re.M)
461+
462+ return len(m) >= 1
463+
464+
465+def enable():
466+ """
467+ Enable ufw
468+
469+ :returns: True if ufw is successfully enabled
470+ """
471+ if is_enabled():
472+ return True
473+
474+ output = subprocess.check_output(['ufw', 'enable'],
475+ env={'LANG': 'en_US',
476+ 'PATH': os.environ['PATH']})
477+
478+ m = re.findall('^Firewall is active and enabled on system startup\n',
479+ output, re.M)
480+ hookenv.log(output, level='DEBUG')
481+
482+ if len(m) == 0:
483+ hookenv.log("ufw couldn't be enabled", level='WARN')
484+ return False
485+ else:
486+ hookenv.log("ufw enabled", level='INFO')
487+ return True
488+
489+
490+def disable():
491+ """
492+ Disable ufw
493+
494+ :returns: True if ufw is successfully disabled
495+ """
496+ if not is_enabled():
497+ return True
498+
499+ output = subprocess.check_output(['ufw', 'disable'],
500+ env={'LANG': 'en_US',
501+ 'PATH': os.environ['PATH']})
502+
503+ m = re.findall(r'^Firewall stopped and disabled on system startup\n',
504+ output, re.M)
505+ hookenv.log(output, level='DEBUG')
506+
507+ if len(m) == 0:
508+ hookenv.log("ufw couldn't be disabled", level='WARN')
509+ return False
510+ else:
511+ hookenv.log("ufw disabled", level='INFO')
512+ return True
513+
514+
515+def modify_access(src, dst='any', port=None, proto=None, action='allow'):
516+ """
517+ Grant access to an address or subnet
518+
519+ :param src: address (e.g. 192.168.1.234) or subnet
520+ (e.g. 192.168.1.0/24).
521+ :param dst: destiny of the connection, if the machine has multiple IPs and
522+ connections to only one of those have to accepted this is the
523+ field has to be set.
524+ :param port: destiny port
525+ :param proto: protocol (tcp or udp)
526+ :param action: `allow` or `delete`
527+ """
528+ if not is_enabled():
529+ hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
530+ return
531+
532+ if action == 'delete':
533+ cmd = ['ufw', 'delete', 'allow']
534+ else:
535+ cmd = ['ufw', action]
536+
537+ if src is not None:
538+ cmd += ['from', src]
539+
540+ if dst is not None:
541+ cmd += ['to', dst]
542+
543+ if port is not None:
544+ cmd += ['port', port]
545+
546+ if proto is not None:
547+ cmd += ['proto', proto]
548+
549+ hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
550+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
551+ (stdout, stderr) = p.communicate()
552+
553+ hookenv.log(stdout, level='INFO')
554+
555+ if p.returncode != 0:
556+ hookenv.log(stderr, level='ERROR')
557+ hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
558+ p.returncode),
559+ level='ERROR')
560+
561+
562+def grant_access(src, dst='any', port=None, proto=None):
563+ """
564+ Grant access to an address or subnet
565+
566+ :param src: address (e.g. 192.168.1.234) or subnet
567+ (e.g. 192.168.1.0/24).
568+ :param dst: destiny of the connection, if the machine has multiple IPs and
569+ connections to only one of those have to accepted this is the
570+ field has to be set.
571+ :param port: destiny port
572+ :param proto: protocol (tcp or udp)
573+ """
574+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
575+
576+
577+def revoke_access(src, dst='any', port=None, proto=None):
578+ """
579+ Revoke access to an address or subnet
580+
581+ :param src: address (e.g. 192.168.1.234) or subnet
582+ (e.g. 192.168.1.0/24).
583+ :param dst: destiny of the connection, if the machine has multiple IPs and
584+ connections to only one of those have to accepted this is the
585+ field has to be set.
586+ :param port: destiny port
587+ :param proto: protocol (tcp or udp)
588+ """
589+ return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
590+
591+
592+def service(name, action):
593+ """
594+ Open/close access to a service
595+
596+ :param name: could be a service name defined in `/etc/services` or a port
597+ number.
598+ :param action: `open` or `close`
599+ """
600+ if action == 'open':
601+ subprocess.check_output(['ufw', 'allow', name])
602+ elif action == 'close':
603+ subprocess.check_output(['ufw', 'delete', 'allow', name])
604+ else:
605+ raise Exception(("'{}' not supported, use 'allow' "
606+ "or 'delete'").format(action))
607
608=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
609--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-06 21:57:43 +0000
610+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-12-11 17:56:54 +0000
611@@ -1,3 +1,4 @@
612+import six
613 from charmhelpers.contrib.amulet.deployment import (
614 AmuletDeployment
615 )
616@@ -69,7 +70,7 @@
617
618 def _configure_services(self, configs):
619 """Configure all of the services."""
620- for service, config in configs.iteritems():
621+ for service, config in six.iteritems(configs):
622 self.d.configure(service, config)
623
624 def _get_openstack_release(self):
625
626=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
627--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-06 21:57:43 +0000
628+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-12-11 17:56:54 +0000
629@@ -7,6 +7,8 @@
630 import keystoneclient.v2_0 as keystone_client
631 import novaclient.v1_1.client as nova_client
632
633+import six
634+
635 from charmhelpers.contrib.amulet.utils import (
636 AmuletUtils
637 )
638@@ -60,7 +62,7 @@
639 expected service catalog endpoints.
640 """
641 self.log.debug('actual: {}'.format(repr(actual)))
642- for k, v in expected.iteritems():
643+ for k, v in six.iteritems(expected):
644 if k in actual:
645 ret = self._validate_dict_data(expected[k][0], actual[k][0])
646 if ret:
647
648=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
649--- hooks/charmhelpers/contrib/openstack/context.py 2014-10-06 21:57:43 +0000
650+++ hooks/charmhelpers/contrib/openstack/context.py 2014-12-11 17:56:54 +0000
651@@ -1,20 +1,18 @@
652 import json
653 import os
654 import time
655-
656 from base64 import b64decode
657+from subprocess import check_call
658
659-from subprocess import (
660- check_call
661-)
662+import six
663
664 from charmhelpers.fetch import (
665 apt_install,
666 filter_installed_packages,
667 )
668-
669 from charmhelpers.core.hookenv import (
670 config,
671+ is_relation_made,
672 local_unit,
673 log,
674 relation_get,
675@@ -23,41 +21,40 @@
676 relation_set,
677 unit_get,
678 unit_private_ip,
679+ DEBUG,
680+ INFO,
681+ WARNING,
682 ERROR,
683- INFO
684 )
685-
686 from charmhelpers.core.host import (
687 mkdir,
688- write_file
689+ write_file,
690 )
691-
692 from charmhelpers.contrib.hahelpers.cluster import (
693 determine_apache_port,
694 determine_api_port,
695 https,
696- is_clustered
697+ is_clustered,
698 )
699-
700 from charmhelpers.contrib.hahelpers.apache import (
701 get_cert,
702 get_ca_cert,
703 install_ca_cert,
704 )
705-
706 from charmhelpers.contrib.openstack.neutron import (
707 neutron_plugin_attribute,
708 )
709-
710 from charmhelpers.contrib.network.ip import (
711 get_address_in_network,
712 get_ipv6_addr,
713 get_netmask_for_address,
714 format_ipv6_addr,
715- is_address_in_network
716+ is_address_in_network,
717 )
718+from charmhelpers.contrib.openstack.utils import get_host_ip
719
720 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
721+ADDRESS_TYPES = ['admin', 'internal', 'public']
722
723
724 class OSContextError(Exception):
725@@ -65,7 +62,7 @@
726
727
728 def ensure_packages(packages):
729- '''Install but do not upgrade required plugin packages'''
730+ """Install but do not upgrade required plugin packages."""
731 required = filter_installed_packages(packages)
732 if required:
733 apt_install(required, fatal=True)
734@@ -73,20 +70,27 @@
735
736 def context_complete(ctxt):
737 _missing = []
738- for k, v in ctxt.iteritems():
739+ for k, v in six.iteritems(ctxt):
740 if v is None or v == '':
741 _missing.append(k)
742+
743 if _missing:
744- log('Missing required data: %s' % ' '.join(_missing), level='INFO')
745+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
746 return False
747+
748 return True
749
750
751 def config_flags_parser(config_flags):
752+ """Parses config flags string into dict.
753+
754+ The provided config_flags string may be a list of comma-separated values
755+ which themselves may be comma-separated list of values.
756+ """
757 if config_flags.find('==') >= 0:
758- log("config_flags is not in expected format (key=value)",
759- level=ERROR)
760+ log("config_flags is not in expected format (key=value)", level=ERROR)
761 raise OSContextError
762+
763 # strip the following from each value.
764 post_strippers = ' ,'
765 # we strip any leading/trailing '=' or ' ' from the string then
766@@ -94,7 +98,7 @@
767 split = config_flags.strip(' =').split('=')
768 limit = len(split)
769 flags = {}
770- for i in xrange(0, limit - 1):
771+ for i in range(0, limit - 1):
772 current = split[i]
773 next = split[i + 1]
774 vindex = next.rfind(',')
775@@ -109,17 +113,18 @@
776 # if this not the first entry, expect an embedded key.
777 index = current.rfind(',')
778 if index < 0:
779- log("invalid config value(s) at index %s" % (i),
780- level=ERROR)
781+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
782 raise OSContextError
783 key = current[index + 1:]
784
785 # Add to collection.
786 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
787+
788 return flags
789
790
791 class OSContextGenerator(object):
792+ """Base class for all context generators."""
793 interfaces = []
794
795 def __call__(self):
796@@ -131,11 +136,11 @@
797
798 def __init__(self,
799 database=None, user=None, relation_prefix=None, ssl_dir=None):
800- '''
801- Allows inspecting relation for settings prefixed with relation_prefix.
802- This is useful for parsing access for multiple databases returned via
803- the shared-db interface (eg, nova_password, quantum_password)
804- '''
805+ """Allows inspecting relation for settings prefixed with
806+ relation_prefix. This is useful for parsing access for multiple
807+ databases returned via the shared-db interface (eg, nova_password,
808+ quantum_password)
809+ """
810 self.relation_prefix = relation_prefix
811 self.database = database
812 self.user = user
813@@ -145,9 +150,8 @@
814 self.database = self.database or config('database')
815 self.user = self.user or config('database-user')
816 if None in [self.database, self.user]:
817- log('Could not generate shared_db context. '
818- 'Missing required charm config options. '
819- '(database name and user)')
820+ log("Could not generate shared_db context. Missing required charm "
821+ "config options. (database name and user)", level=ERROR)
822 raise OSContextError
823
824 ctxt = {}
825@@ -200,23 +204,24 @@
826 def __call__(self):
827 self.database = self.database or config('database')
828 if self.database is None:
829- log('Could not generate postgresql_db context. '
830- 'Missing required charm config options. '
831- '(database name)')
832+ log('Could not generate postgresql_db context. Missing required '
833+ 'charm config options. (database name)', level=ERROR)
834 raise OSContextError
835+
836 ctxt = {}
837-
838 for rid in relation_ids(self.interfaces[0]):
839 for unit in related_units(rid):
840- ctxt = {
841- 'database_host': relation_get('host', rid=rid, unit=unit),
842- 'database': self.database,
843- 'database_user': relation_get('user', rid=rid, unit=unit),
844- 'database_password': relation_get('password', rid=rid, unit=unit),
845- 'database_type': 'postgresql',
846- }
847+ rel_host = relation_get('host', rid=rid, unit=unit)
848+ rel_user = relation_get('user', rid=rid, unit=unit)
849+ rel_passwd = relation_get('password', rid=rid, unit=unit)
850+ ctxt = {'database_host': rel_host,
851+ 'database': self.database,
852+ 'database_user': rel_user,
853+ 'database_password': rel_passwd,
854+ 'database_type': 'postgresql'}
855 if context_complete(ctxt):
856 return ctxt
857+
858 return {}
859
860
861@@ -225,23 +230,29 @@
862 ca_path = os.path.join(ssl_dir, 'db-client.ca')
863 with open(ca_path, 'w') as fh:
864 fh.write(b64decode(rdata['ssl_ca']))
865+
866 ctxt['database_ssl_ca'] = ca_path
867 elif 'ssl_ca' in rdata:
868- log("Charm not setup for ssl support but ssl ca found")
869+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
870 return ctxt
871+
872 if 'ssl_cert' in rdata:
873 cert_path = os.path.join(
874 ssl_dir, 'db-client.cert')
875 if not os.path.exists(cert_path):
876- log("Waiting 1m for ssl client cert validity")
877+ log("Waiting 1m for ssl client cert validity", level=INFO)
878 time.sleep(60)
879+
880 with open(cert_path, 'w') as fh:
881 fh.write(b64decode(rdata['ssl_cert']))
882+
883 ctxt['database_ssl_cert'] = cert_path
884 key_path = os.path.join(ssl_dir, 'db-client.key')
885 with open(key_path, 'w') as fh:
886 fh.write(b64decode(rdata['ssl_key']))
887+
888 ctxt['database_ssl_key'] = key_path
889+
890 return ctxt
891
892
893@@ -249,9 +260,8 @@
894 interfaces = ['identity-service']
895
896 def __call__(self):
897- log('Generating template context for identity-service')
898+ log('Generating template context for identity-service', level=DEBUG)
899 ctxt = {}
900-
901 for rid in relation_ids('identity-service'):
902 for unit in related_units(rid):
903 rdata = relation_get(rid=rid, unit=unit)
904@@ -259,26 +269,24 @@
905 serv_host = format_ipv6_addr(serv_host) or serv_host
906 auth_host = rdata.get('auth_host')
907 auth_host = format_ipv6_addr(auth_host) or auth_host
908-
909- ctxt = {
910- 'service_port': rdata.get('service_port'),
911- 'service_host': serv_host,
912- 'auth_host': auth_host,
913- 'auth_port': rdata.get('auth_port'),
914- 'admin_tenant_name': rdata.get('service_tenant'),
915- 'admin_user': rdata.get('service_username'),
916- 'admin_password': rdata.get('service_password'),
917- 'service_protocol':
918- rdata.get('service_protocol') or 'http',
919- 'auth_protocol':
920- rdata.get('auth_protocol') or 'http',
921- }
922+ svc_protocol = rdata.get('service_protocol') or 'http'
923+ auth_protocol = rdata.get('auth_protocol') or 'http'
924+ ctxt = {'service_port': rdata.get('service_port'),
925+ 'service_host': serv_host,
926+ 'auth_host': auth_host,
927+ 'auth_port': rdata.get('auth_port'),
928+ 'admin_tenant_name': rdata.get('service_tenant'),
929+ 'admin_user': rdata.get('service_username'),
930+ 'admin_password': rdata.get('service_password'),
931+ 'service_protocol': svc_protocol,
932+ 'auth_protocol': auth_protocol}
933 if context_complete(ctxt):
934 # NOTE(jamespage) this is required for >= icehouse
935 # so a missing value just indicates keystone needs
936 # upgrading
937 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
938 return ctxt
939+
940 return {}
941
942
943@@ -291,21 +299,23 @@
944 self.interfaces = [rel_name]
945
946 def __call__(self):
947- log('Generating template context for amqp')
948+ log('Generating template context for amqp', level=DEBUG)
949 conf = config()
950- user_setting = 'rabbit-user'
951- vhost_setting = 'rabbit-vhost'
952 if self.relation_prefix:
953- user_setting = self.relation_prefix + '-rabbit-user'
954- vhost_setting = self.relation_prefix + '-rabbit-vhost'
955+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
956+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
957+ else:
958+ user_setting = 'rabbit-user'
959+ vhost_setting = 'rabbit-vhost'
960
961 try:
962 username = conf[user_setting]
963 vhost = conf[vhost_setting]
964 except KeyError as e:
965- log('Could not generate shared_db context. '
966- 'Missing required charm config options: %s.' % e)
967+ log('Could not generate shared_db context. Missing required charm '
968+ 'config options: %s.' % e, level=ERROR)
969 raise OSContextError
970+
971 ctxt = {}
972 for rid in relation_ids(self.rel_name):
973 ha_vip_only = False
974@@ -319,6 +329,7 @@
975 host = relation_get('private-address', rid=rid, unit=unit)
976 host = format_ipv6_addr(host) or host
977 ctxt['rabbitmq_host'] = host
978+
979 ctxt.update({
980 'rabbitmq_user': username,
981 'rabbitmq_password': relation_get('password', rid=rid,
982@@ -329,6 +340,7 @@
983 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
984 if ssl_port:
985 ctxt['rabbit_ssl_port'] = ssl_port
986+
987 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
988 if ssl_ca:
989 ctxt['rabbit_ssl_ca'] = ssl_ca
990@@ -342,41 +354,45 @@
991 if context_complete(ctxt):
992 if 'rabbit_ssl_ca' in ctxt:
993 if not self.ssl_dir:
994- log(("Charm not setup for ssl support "
995- "but ssl ca found"))
996+ log("Charm not setup for ssl support but ssl ca "
997+ "found", level=INFO)
998 break
999+
1000 ca_path = os.path.join(
1001 self.ssl_dir, 'rabbit-client-ca.pem')
1002 with open(ca_path, 'w') as fh:
1003 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
1004 ctxt['rabbit_ssl_ca'] = ca_path
1005+
1006 # Sufficient information found = break out!
1007 break
1008+
1009 # Used for active/active rabbitmq >= grizzly
1010- if ('clustered' not in ctxt or ha_vip_only) \
1011- and len(related_units(rid)) > 1:
1012+ if (('clustered' not in ctxt or ha_vip_only) and
1013+ len(related_units(rid)) > 1):
1014 rabbitmq_hosts = []
1015 for unit in related_units(rid):
1016 host = relation_get('private-address', rid=rid, unit=unit)
1017 host = format_ipv6_addr(host) or host
1018 rabbitmq_hosts.append(host)
1019- ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
1020+
1021+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
1022+
1023 if not context_complete(ctxt):
1024 return {}
1025- else:
1026- return ctxt
1027+
1028+ return ctxt
1029
1030
1031 class CephContext(OSContextGenerator):
1032+ """Generates context for /etc/ceph/ceph.conf templates."""
1033 interfaces = ['ceph']
1034
1035 def __call__(self):
1036- '''This generates context for /etc/ceph/ceph.conf templates'''
1037 if not relation_ids('ceph'):
1038 return {}
1039
1040- log('Generating template context for ceph')
1041-
1042+ log('Generating template context for ceph', level=DEBUG)
1043 mon_hosts = []
1044 auth = None
1045 key = None
1046@@ -385,18 +401,18 @@
1047 for unit in related_units(rid):
1048 auth = relation_get('auth', rid=rid, unit=unit)
1049 key = relation_get('key', rid=rid, unit=unit)
1050- ceph_addr = \
1051- relation_get('ceph-public-address', rid=rid, unit=unit) or \
1052- relation_get('private-address', rid=rid, unit=unit)
1053+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1054+ unit=unit)
1055+ unit_priv_addr = relation_get('private-address', rid=rid,
1056+ unit=unit)
1057+ ceph_addr = ceph_pub_addr or unit_priv_addr
1058 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1059 mon_hosts.append(ceph_addr)
1060
1061- ctxt = {
1062- 'mon_hosts': ' '.join(mon_hosts),
1063- 'auth': auth,
1064- 'key': key,
1065- 'use_syslog': use_syslog
1066- }
1067+ ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1068+ 'auth': auth,
1069+ 'key': key,
1070+ 'use_syslog': use_syslog}
1071
1072 if not os.path.isdir('/etc/ceph'):
1073 os.mkdir('/etc/ceph')
1074@@ -405,79 +421,68 @@
1075 return {}
1076
1077 ensure_packages(['ceph-common'])
1078-
1079 return ctxt
1080
1081
1082-ADDRESS_TYPES = ['admin', 'internal', 'public']
1083-
1084-
1085 class HAProxyContext(OSContextGenerator):
1086+ """Provides half a context for the haproxy template, which describes
1087+ all peers to be included in the cluster. Each charm needs to include
1088+ its own context generator that describes the port mapping.
1089+ """
1090 interfaces = ['cluster']
1091
1092+ def __init__(self, singlenode_mode=False):
1093+ self.singlenode_mode = singlenode_mode
1094+
1095 def __call__(self):
1096- '''
1097- Builds half a context for the haproxy template, which describes
1098- all peers to be included in the cluster. Each charm needs to include
1099- its own context generator that describes the port mapping.
1100- '''
1101- if not relation_ids('cluster'):
1102+ if not relation_ids('cluster') and not self.singlenode_mode:
1103 return {}
1104
1105+ if config('prefer-ipv6'):
1106+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1107+ else:
1108+ addr = get_host_ip(unit_get('private-address'))
1109+
1110 l_unit = local_unit().replace('/', '-')
1111-
1112- if config('prefer-ipv6'):
1113- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1114- else:
1115- addr = unit_get('private-address')
1116-
1117 cluster_hosts = {}
1118
1119 # NOTE(jamespage): build out map of configured network endpoints
1120 # and associated backends
1121 for addr_type in ADDRESS_TYPES:
1122- laddr = get_address_in_network(
1123- config('os-{}-network'.format(addr_type)))
1124+ cfg_opt = 'os-{}-network'.format(addr_type)
1125+ laddr = get_address_in_network(config(cfg_opt))
1126 if laddr:
1127- cluster_hosts[laddr] = {}
1128- cluster_hosts[laddr]['network'] = "{}/{}".format(
1129- laddr,
1130- get_netmask_for_address(laddr)
1131- )
1132- cluster_hosts[laddr]['backends'] = {}
1133- cluster_hosts[laddr]['backends'][l_unit] = laddr
1134+ netmask = get_netmask_for_address(laddr)
1135+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
1136+ netmask),
1137+ 'backends': {l_unit: laddr}}
1138 for rid in relation_ids('cluster'):
1139 for unit in related_units(rid):
1140- _unit = unit.replace('/', '-')
1141 _laddr = relation_get('{}-address'.format(addr_type),
1142 rid=rid, unit=unit)
1143 if _laddr:
1144+ _unit = unit.replace('/', '-')
1145 cluster_hosts[laddr]['backends'][_unit] = _laddr
1146
1147 # NOTE(jamespage) no split configurations found, just use
1148 # private addresses
1149 if not cluster_hosts:
1150- cluster_hosts[addr] = {}
1151- cluster_hosts[addr]['network'] = "{}/{}".format(
1152- addr,
1153- get_netmask_for_address(addr)
1154- )
1155- cluster_hosts[addr]['backends'] = {}
1156- cluster_hosts[addr]['backends'][l_unit] = addr
1157+ netmask = get_netmask_for_address(addr)
1158+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
1159+ 'backends': {l_unit: addr}}
1160 for rid in relation_ids('cluster'):
1161 for unit in related_units(rid):
1162- _unit = unit.replace('/', '-')
1163 _laddr = relation_get('private-address',
1164 rid=rid, unit=unit)
1165 if _laddr:
1166+ _unit = unit.replace('/', '-')
1167 cluster_hosts[addr]['backends'][_unit] = _laddr
1168
1169- ctxt = {
1170- 'frontends': cluster_hosts,
1171- }
1172+ ctxt = {'frontends': cluster_hosts}
1173
1174 if config('haproxy-server-timeout'):
1175 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1176+
1177 if config('haproxy-client-timeout'):
1178 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1179
1180@@ -491,13 +496,18 @@
1181 ctxt['stat_port'] = ':8888'
1182
1183 for frontend in cluster_hosts:
1184- if len(cluster_hosts[frontend]['backends']) > 1:
1185+ if (len(cluster_hosts[frontend]['backends']) > 1 or
1186+ self.singlenode_mode):
1187 # Enable haproxy when we have enough peers.
1188- log('Ensuring haproxy enabled in /etc/default/haproxy.')
1189+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
1190+ level=DEBUG)
1191 with open('/etc/default/haproxy', 'w') as out:
1192 out.write('ENABLED=1\n')
1193+
1194 return ctxt
1195- log('HAProxy context is incomplete, this unit has no peers.')
1196+
1197+ log('HAProxy context is incomplete, this unit has no peers.',
1198+ level=INFO)
1199 return {}
1200
1201
1202@@ -505,29 +515,28 @@
1203 interfaces = ['image-service']
1204
1205 def __call__(self):
1206- '''
1207- Obtains the glance API server from the image-service relation. Useful
1208- in nova and cinder (currently).
1209- '''
1210- log('Generating template context for image-service.')
1211+ """Obtains the glance API server from the image-service relation.
1212+ Useful in nova and cinder (currently).
1213+ """
1214+ log('Generating template context for image-service.', level=DEBUG)
1215 rids = relation_ids('image-service')
1216 if not rids:
1217 return {}
1218+
1219 for rid in rids:
1220 for unit in related_units(rid):
1221 api_server = relation_get('glance-api-server',
1222 rid=rid, unit=unit)
1223 if api_server:
1224 return {'glance_api_servers': api_server}
1225- log('ImageService context is incomplete. '
1226- 'Missing required relation data.')
1227+
1228+ log("ImageService context is incomplete. Missing required relation "
1229+ "data.", level=INFO)
1230 return {}
1231
1232
1233 class ApacheSSLContext(OSContextGenerator):
1234-
1235- """
1236- Generates a context for an apache vhost configuration that configures
1237+ """Generates a context for an apache vhost configuration that configures
1238 HTTPS reverse proxying for one or many endpoints. Generated context
1239 looks something like::
1240
1241@@ -561,6 +570,7 @@
1242 else:
1243 cert_filename = 'cert'
1244 key_filename = 'key'
1245+
1246 write_file(path=os.path.join(ssl_dir, cert_filename),
1247 content=b64decode(cert))
1248 write_file(path=os.path.join(ssl_dir, key_filename),
1249@@ -572,7 +582,8 @@
1250 install_ca_cert(b64decode(ca_cert))
1251
1252 def canonical_names(self):
1253- '''Figure out which canonical names clients will access this service'''
1254+ """Figure out which canonical names clients will access this service.
1255+ """
1256 cns = []
1257 for r_id in relation_ids('identity-service'):
1258 for unit in related_units(r_id):
1259@@ -580,55 +591,80 @@
1260 for k in rdata:
1261 if k.startswith('ssl_key_'):
1262 cns.append(k.lstrip('ssl_key_'))
1263- return list(set(cns))
1264+
1265+ return sorted(list(set(cns)))
1266+
1267+ def get_network_addresses(self):
1268+ """For each network configured, return corresponding address and vip
1269+ (if available).
1270+
1271+ Returns a list of tuples of the form:
1272+
1273+ [(address_in_net_a, vip_in_net_a),
1274+ (address_in_net_b, vip_in_net_b),
1275+ ...]
1276+
1277+ or, if no vip(s) available:
1278+
1279+ [(address_in_net_a, address_in_net_a),
1280+ (address_in_net_b, address_in_net_b),
1281+ ...]
1282+ """
1283+ addresses = []
1284+ if config('vip'):
1285+ vips = config('vip').split()
1286+ else:
1287+ vips = []
1288+
1289+ for net_type in ['os-internal-network', 'os-admin-network',
1290+ 'os-public-network']:
1291+ addr = get_address_in_network(config(net_type),
1292+ unit_get('private-address'))
1293+ if len(vips) > 1 and is_clustered():
1294+ if not config(net_type):
1295+ log("Multiple networks configured but net_type "
1296+ "is None (%s)." % net_type, level=WARNING)
1297+ continue
1298+
1299+ for vip in vips:
1300+ if is_address_in_network(config(net_type), vip):
1301+ addresses.append((addr, vip))
1302+ break
1303+
1304+ elif is_clustered() and config('vip'):
1305+ addresses.append((addr, config('vip')))
1306+ else:
1307+ addresses.append((addr, addr))
1308+
1309+ return sorted(addresses)
1310
1311 def __call__(self):
1312- if isinstance(self.external_ports, basestring):
1313+ if isinstance(self.external_ports, six.string_types):
1314 self.external_ports = [self.external_ports]
1315- if (not self.external_ports or not https()):
1316+
1317+ if not self.external_ports or not https():
1318 return {}
1319
1320 self.configure_ca()
1321 self.enable_modules()
1322
1323- ctxt = {
1324- 'namespace': self.service_namespace,
1325- 'endpoints': [],
1326- 'ext_ports': []
1327- }
1328+ ctxt = {'namespace': self.service_namespace,
1329+ 'endpoints': [],
1330+ 'ext_ports': []}
1331
1332 for cn in self.canonical_names():
1333 self.configure_cert(cn)
1334
1335- addresses = []
1336- vips = []
1337- if config('vip'):
1338- vips = config('vip').split()
1339-
1340- for network_type in ['os-internal-network',
1341- 'os-admin-network',
1342- 'os-public-network']:
1343- address = get_address_in_network(config(network_type),
1344- unit_get('private-address'))
1345- if len(vips) > 0 and is_clustered():
1346- for vip in vips:
1347- if is_address_in_network(config(network_type),
1348- vip):
1349- addresses.append((address, vip))
1350- break
1351- elif is_clustered():
1352- addresses.append((address, config('vip')))
1353- else:
1354- addresses.append((address, address))
1355-
1356- for address, endpoint in set(addresses):
1357+ addresses = self.get_network_addresses()
1358+ for address, endpoint in sorted(set(addresses)):
1359 for api_port in self.external_ports:
1360 ext_port = determine_apache_port(api_port)
1361 int_port = determine_api_port(api_port)
1362 portmap = (address, endpoint, int(ext_port), int(int_port))
1363 ctxt['endpoints'].append(portmap)
1364 ctxt['ext_ports'].append(int(ext_port))
1365- ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
1366+
1367+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
1368 return ctxt
1369
1370
1371@@ -645,21 +681,23 @@
1372
1373 @property
1374 def packages(self):
1375- return neutron_plugin_attribute(
1376- self.plugin, 'packages', self.network_manager)
1377+ return neutron_plugin_attribute(self.plugin, 'packages',
1378+ self.network_manager)
1379
1380 @property
1381 def neutron_security_groups(self):
1382 return None
1383
1384 def _ensure_packages(self):
1385- [ensure_packages(pkgs) for pkgs in self.packages]
1386+ for pkgs in self.packages:
1387+ ensure_packages(pkgs)
1388
1389 def _save_flag_file(self):
1390 if self.network_manager == 'quantum':
1391 _file = '/etc/nova/quantum_plugin.conf'
1392 else:
1393 _file = '/etc/nova/neutron_plugin.conf'
1394+
1395 with open(_file, 'wb') as out:
1396 out.write(self.plugin + '\n')
1397
1398@@ -668,13 +706,11 @@
1399 self.network_manager)
1400 config = neutron_plugin_attribute(self.plugin, 'config',
1401 self.network_manager)
1402- ovs_ctxt = {
1403- 'core_plugin': driver,
1404- 'neutron_plugin': 'ovs',
1405- 'neutron_security_groups': self.neutron_security_groups,
1406- 'local_ip': unit_private_ip(),
1407- 'config': config
1408- }
1409+ ovs_ctxt = {'core_plugin': driver,
1410+ 'neutron_plugin': 'ovs',
1411+ 'neutron_security_groups': self.neutron_security_groups,
1412+ 'local_ip': unit_private_ip(),
1413+ 'config': config}
1414
1415 return ovs_ctxt
1416
1417@@ -683,13 +719,11 @@
1418 self.network_manager)
1419 config = neutron_plugin_attribute(self.plugin, 'config',
1420 self.network_manager)
1421- nvp_ctxt = {
1422- 'core_plugin': driver,
1423- 'neutron_plugin': 'nvp',
1424- 'neutron_security_groups': self.neutron_security_groups,
1425- 'local_ip': unit_private_ip(),
1426- 'config': config
1427- }
1428+ nvp_ctxt = {'core_plugin': driver,
1429+ 'neutron_plugin': 'nvp',
1430+ 'neutron_security_groups': self.neutron_security_groups,
1431+ 'local_ip': unit_private_ip(),
1432+ 'config': config}
1433
1434 return nvp_ctxt
1435
1436@@ -698,35 +732,50 @@
1437 self.network_manager)
1438 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1439 self.network_manager)
1440- n1kv_ctxt = {
1441- 'core_plugin': driver,
1442- 'neutron_plugin': 'n1kv',
1443- 'neutron_security_groups': self.neutron_security_groups,
1444- 'local_ip': unit_private_ip(),
1445- 'config': n1kv_config,
1446- 'vsm_ip': config('n1kv-vsm-ip'),
1447- 'vsm_username': config('n1kv-vsm-username'),
1448- 'vsm_password': config('n1kv-vsm-password'),
1449- 'restrict_policy_profiles': config(
1450- 'n1kv_restrict_policy_profiles'),
1451- }
1452+ n1kv_user_config_flags = config('n1kv-config-flags')
1453+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
1454+ n1kv_ctxt = {'core_plugin': driver,
1455+ 'neutron_plugin': 'n1kv',
1456+ 'neutron_security_groups': self.neutron_security_groups,
1457+ 'local_ip': unit_private_ip(),
1458+ 'config': n1kv_config,
1459+ 'vsm_ip': config('n1kv-vsm-ip'),
1460+ 'vsm_username': config('n1kv-vsm-username'),
1461+ 'vsm_password': config('n1kv-vsm-password'),
1462+ 'restrict_policy_profiles': restrict_policy_profiles}
1463+
1464+ if n1kv_user_config_flags:
1465+ flags = config_flags_parser(n1kv_user_config_flags)
1466+ n1kv_ctxt['user_config_flags'] = flags
1467
1468 return n1kv_ctxt
1469
1470+ def calico_ctxt(self):
1471+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1472+ self.network_manager)
1473+ config = neutron_plugin_attribute(self.plugin, 'config',
1474+ self.network_manager)
1475+ calico_ctxt = {'core_plugin': driver,
1476+ 'neutron_plugin': 'Calico',
1477+ 'neutron_security_groups': self.neutron_security_groups,
1478+ 'local_ip': unit_private_ip(),
1479+ 'config': config}
1480+
1481+ return calico_ctxt
1482+
1483 def neutron_ctxt(self):
1484 if https():
1485 proto = 'https'
1486 else:
1487 proto = 'http'
1488+
1489 if is_clustered():
1490 host = config('vip')
1491 else:
1492 host = unit_get('private-address')
1493- url = '%s://%s:%s' % (proto, host, '9696')
1494- ctxt = {
1495- 'network_manager': self.network_manager,
1496- 'neutron_url': url,
1497- }
1498+
1499+ ctxt = {'network_manager': self.network_manager,
1500+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1501 return ctxt
1502
1503 def __call__(self):
1504@@ -746,6 +795,8 @@
1505 ctxt.update(self.nvp_ctxt())
1506 elif self.plugin == 'n1kv':
1507 ctxt.update(self.n1kv_ctxt())
1508+ elif self.plugin == 'Calico':
1509+ ctxt.update(self.calico_ctxt())
1510
1511 alchemy_flags = config('neutron-alchemy-flags')
1512 if alchemy_flags:
1513@@ -757,23 +808,40 @@
1514
1515
1516 class OSConfigFlagContext(OSContextGenerator):
1517-
1518- """
1519- Responsible for adding user-defined config-flags in charm config to a
1520- template context.
1521+ """Provides support for user-defined config flags.
1522+
1523+ Users can define a comma-seperated list of key=value pairs
1524+ in the charm configuration and apply them at any point in
1525+ any file by using a template flag.
1526+
1527+ Sometimes users might want config flags inserted within a
1528+ specific section so this class allows users to specify the
1529+ template flag name, allowing for multiple template flags
1530+ (sections) within the same context.
1531
1532 NOTE: the value of config-flags may be a comma-separated list of
1533 key=value pairs and some Openstack config files support
1534 comma-separated lists as values.
1535 """
1536
1537+ def __init__(self, charm_flag='config-flags',
1538+ template_flag='user_config_flags'):
1539+ """
1540+ :param charm_flag: config flags in charm configuration.
1541+ :param template_flag: insert point for user-defined flags in template
1542+ file.
1543+ """
1544+ super(OSConfigFlagContext, self).__init__()
1545+ self._charm_flag = charm_flag
1546+ self._template_flag = template_flag
1547+
1548 def __call__(self):
1549- config_flags = config('config-flags')
1550+ config_flags = config(self._charm_flag)
1551 if not config_flags:
1552 return {}
1553
1554- flags = config_flags_parser(config_flags)
1555- return {'user_config_flags': flags}
1556+ return {self._template_flag:
1557+ config_flags_parser(config_flags)}
1558
1559
1560 class SubordinateConfigContext(OSContextGenerator):
1561@@ -817,7 +885,6 @@
1562 },
1563 }
1564 }
1565-
1566 """
1567
1568 def __init__(self, service, config_file, interface):
1569@@ -847,26 +914,28 @@
1570
1571 if self.service not in sub_config:
1572 log('Found subordinate_config on %s but it contained'
1573- 'nothing for %s service' % (rid, self.service))
1574+ 'nothing for %s service' % (rid, self.service),
1575+ level=INFO)
1576 continue
1577
1578 sub_config = sub_config[self.service]
1579 if self.config_file not in sub_config:
1580 log('Found subordinate_config on %s but it contained'
1581- 'nothing for %s' % (rid, self.config_file))
1582+ 'nothing for %s' % (rid, self.config_file),
1583+ level=INFO)
1584 continue
1585
1586 sub_config = sub_config[self.config_file]
1587- for k, v in sub_config.iteritems():
1588+ for k, v in six.iteritems(sub_config):
1589 if k == 'sections':
1590- for section, config_dict in v.iteritems():
1591- log("adding section '%s'" % (section))
1592+ for section, config_dict in six.iteritems(v):
1593+ log("adding section '%s'" % (section),
1594+ level=DEBUG)
1595 ctxt[k][section] = config_dict
1596 else:
1597 ctxt[k] = v
1598
1599- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1600-
1601+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1602 return ctxt
1603
1604
1605@@ -878,15 +947,14 @@
1606 False if config('debug') is None else config('debug')
1607 ctxt['verbose'] = \
1608 False if config('verbose') is None else config('verbose')
1609+
1610 return ctxt
1611
1612
1613 class SyslogContext(OSContextGenerator):
1614
1615 def __call__(self):
1616- ctxt = {
1617- 'use_syslog': config('use-syslog')
1618- }
1619+ ctxt = {'use_syslog': config('use-syslog')}
1620 return ctxt
1621
1622
1623@@ -894,10 +962,56 @@
1624
1625 def __call__(self):
1626 if config('prefer-ipv6'):
1627- return {
1628- 'bind_host': '::'
1629- }
1630+ return {'bind_host': '::'}
1631 else:
1632- return {
1633- 'bind_host': '0.0.0.0'
1634- }
1635+ return {'bind_host': '0.0.0.0'}
1636+
1637+
1638+class WorkerConfigContext(OSContextGenerator):
1639+
1640+ @property
1641+ def num_cpus(self):
1642+ try:
1643+ from psutil import NUM_CPUS
1644+ except ImportError:
1645+ apt_install('python-psutil', fatal=True)
1646+ from psutil import NUM_CPUS
1647+
1648+ return NUM_CPUS
1649+
1650+ def __call__(self):
1651+ multiplier = config('worker-multiplier') or 0
1652+ ctxt = {"workers": self.num_cpus * multiplier}
1653+ return ctxt
1654+
1655+
1656+class ZeroMQContext(OSContextGenerator):
1657+ interfaces = ['zeromq-configuration']
1658+
1659+ def __call__(self):
1660+ ctxt = {}
1661+ if is_relation_made('zeromq-configuration', 'host'):
1662+ for rid in relation_ids('zeromq-configuration'):
1663+ for unit in related_units(rid):
1664+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1665+ ctxt['zmq_host'] = relation_get('host', unit, rid)
1666+
1667+ return ctxt
1668+
1669+
1670+class NotificationDriverContext(OSContextGenerator):
1671+
1672+ def __init__(self, zmq_relation='zeromq-configuration',
1673+ amqp_relation='amqp'):
1674+ """
1675+ :param zmq_relation: Name of Zeromq relation to check
1676+ """
1677+ self.zmq_relation = zmq_relation
1678+ self.amqp_relation = amqp_relation
1679+
1680+ def __call__(self):
1681+ ctxt = {'notifications': 'False'}
1682+ if is_relation_made(self.amqp_relation):
1683+ ctxt['notifications'] = "True"
1684+
1685+ return ctxt
1686
1687=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
1688--- hooks/charmhelpers/contrib/openstack/ip.py 2014-09-22 20:22:04 +0000
1689+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-12-11 17:56:54 +0000
1690@@ -2,21 +2,19 @@
1691 config,
1692 unit_get,
1693 )
1694-
1695 from charmhelpers.contrib.network.ip import (
1696 get_address_in_network,
1697 is_address_in_network,
1698 is_ipv6,
1699 get_ipv6_addr,
1700 )
1701-
1702 from charmhelpers.contrib.hahelpers.cluster import is_clustered
1703
1704 PUBLIC = 'public'
1705 INTERNAL = 'int'
1706 ADMIN = 'admin'
1707
1708-_address_map = {
1709+ADDRESS_MAP = {
1710 PUBLIC: {
1711 'config': 'os-public-network',
1712 'fallback': 'public-address'
1713@@ -33,16 +31,14 @@
1714
1715
1716 def canonical_url(configs, endpoint_type=PUBLIC):
1717- '''
1718- Returns the correct HTTP URL to this host given the state of HTTPS
1719+ """Returns the correct HTTP URL to this host given the state of HTTPS
1720 configuration, hacluster and charm configuration.
1721
1722- :configs OSTemplateRenderer: A config tempating object to inspect for
1723- a complete https context.
1724- :endpoint_type str: The endpoint type to resolve.
1725-
1726- :returns str: Base URL for services on the current service unit.
1727- '''
1728+ :param configs: OSTemplateRenderer config templating object to inspect
1729+ for a complete https context.
1730+ :param endpoint_type: str endpoint type to resolve.
1731+ :param returns: str base URL for services on the current service unit.
1732+ """
1733 scheme = 'http'
1734 if 'https' in configs.complete_contexts():
1735 scheme = 'https'
1736@@ -53,27 +49,45 @@
1737
1738
1739 def resolve_address(endpoint_type=PUBLIC):
1740+ """Return unit address depending on net config.
1741+
1742+ If unit is clustered with vip(s) and has net splits defined, return vip on
1743+ correct network. If clustered with no nets defined, return primary vip.
1744+
1745+ If not clustered, return unit address ensuring address is on configured net
1746+ split if one is configured.
1747+
1748+ :param endpoint_type: Network endpoing type
1749+ """
1750 resolved_address = None
1751- if is_clustered():
1752- if config(_address_map[endpoint_type]['config']) is None:
1753- # Assume vip is simple and pass back directly
1754- resolved_address = config('vip')
1755+ vips = config('vip')
1756+ if vips:
1757+ vips = vips.split()
1758+
1759+ net_type = ADDRESS_MAP[endpoint_type]['config']
1760+ net_addr = config(net_type)
1761+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
1762+ clustered = is_clustered()
1763+ if clustered:
1764+ if not net_addr:
1765+ # If no net-splits defined, we expect a single vip
1766+ resolved_address = vips[0]
1767 else:
1768- for vip in config('vip').split():
1769- if is_address_in_network(
1770- config(_address_map[endpoint_type]['config']),
1771- vip):
1772+ for vip in vips:
1773+ if is_address_in_network(net_addr, vip):
1774 resolved_address = vip
1775+ break
1776 else:
1777 if config('prefer-ipv6'):
1778- fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1779+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
1780 else:
1781- fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1782- resolved_address = get_address_in_network(
1783- config(_address_map[endpoint_type]['config']), fallback_addr)
1784+ fallback_addr = unit_get(net_fallback)
1785+
1786+ resolved_address = get_address_in_network(net_addr, fallback_addr)
1787
1788 if resolved_address is None:
1789- raise ValueError('Unable to resolve a suitable IP address'
1790- ' based on charm state and configuration')
1791- else:
1792- return resolved_address
1793+ raise ValueError("Unable to resolve a suitable IP address based on "
1794+ "charm state and configuration. (net_type=%s, "
1795+ "clustered=%s)" % (net_type, clustered))
1796+
1797+ return resolved_address
1798
1799=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1800--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-28 14:38:51 +0000
1801+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-12-11 17:56:54 +0000
1802@@ -14,7 +14,7 @@
1803 def headers_package():
1804 """Ensures correct linux-headers for running kernel are installed,
1805 for building DKMS package"""
1806- kver = check_output(['uname', '-r']).strip()
1807+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1808 return 'linux-headers-%s' % kver
1809
1810 QUANTUM_CONF_DIR = '/etc/quantum'
1811@@ -22,7 +22,7 @@
1812
1813 def kernel_version():
1814 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
1815- kver = check_output(['uname', '-r']).strip()
1816+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1817 kver = kver.split('.')
1818 return (int(kver[0]), int(kver[1]))
1819
1820@@ -138,10 +138,25 @@
1821 relation_prefix='neutron',
1822 ssl_dir=NEUTRON_CONF_DIR)],
1823 'services': [],
1824- 'packages': [['neutron-plugin-cisco']],
1825+ 'packages': [[headers_package()] + determine_dkms_package(),
1826+ ['neutron-plugin-cisco']],
1827 'server_packages': ['neutron-server',
1828 'neutron-plugin-cisco'],
1829 'server_services': ['neutron-server']
1830+ },
1831+ 'Calico': {
1832+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
1833+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
1834+ 'contexts': [
1835+ context.SharedDBContext(user=config('neutron-database-user'),
1836+ database=config('neutron-database'),
1837+ relation_prefix='neutron',
1838+ ssl_dir=NEUTRON_CONF_DIR)],
1839+ 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
1840+ 'packages': [[headers_package()] + determine_dkms_package(),
1841+ ['calico-compute', 'bird', 'neutron-dhcp-agent']],
1842+ 'server_packages': ['neutron-server', 'calico-control'],
1843+ 'server_services': ['neutron-server']
1844 }
1845 }
1846 if release >= 'icehouse':
1847@@ -162,7 +177,8 @@
1848 elif manager == 'neutron':
1849 plugins = neutron_plugins()
1850 else:
1851- log('Error: Network manager does not support plugins.')
1852+ log("Network manager '%s' does not support plugins." % (manager),
1853+ level=ERROR)
1854 raise Exception
1855
1856 try:
1857
1858=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1859--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-06 21:57:43 +0000
1860+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-12-11 17:56:54 +0000
1861@@ -35,7 +35,7 @@
1862 stats auth admin:password
1863
1864 {% if frontends -%}
1865-{% for service, ports in service_ports.iteritems() -%}
1866+{% for service, ports in service_ports.items() -%}
1867 frontend tcp-in_{{ service }}
1868 bind *:{{ ports[0] }}
1869 bind :::{{ ports[0] }}
1870@@ -46,7 +46,7 @@
1871 {% for frontend in frontends -%}
1872 backend {{ service }}_{{ frontend }}
1873 balance leastconn
1874- {% for unit, address in frontends[frontend]['backends'].iteritems() -%}
1875+ {% for unit, address in frontends[frontend]['backends'].items() -%}
1876 server {{ unit }} {{ address }}:{{ ports[1] }} check
1877 {% endfor %}
1878 {% endfor -%}
1879
1880=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1881--- hooks/charmhelpers/contrib/openstack/templating.py 2014-07-28 14:38:51 +0000
1882+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-12-11 17:56:54 +0000
1883@@ -1,13 +1,13 @@
1884 import os
1885
1886+import six
1887+
1888 from charmhelpers.fetch import apt_install
1889-
1890 from charmhelpers.core.hookenv import (
1891 log,
1892 ERROR,
1893 INFO
1894 )
1895-
1896 from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1897
1898 try:
1899@@ -43,7 +43,7 @@
1900 order by OpenStack release.
1901 """
1902 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1903- for rel in OPENSTACK_CODENAMES.itervalues()]
1904+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
1905
1906 if not os.path.isdir(templates_dir):
1907 log('Templates directory not found @ %s.' % templates_dir,
1908@@ -258,7 +258,7 @@
1909 """
1910 Write out all registered config files.
1911 """
1912- [self.write(k) for k in self.templates.iterkeys()]
1913+ [self.write(k) for k in six.iterkeys(self.templates)]
1914
1915 def set_release(self, openstack_release):
1916 """
1917@@ -275,5 +275,5 @@
1918 '''
1919 interfaces = []
1920 [interfaces.extend(i.complete_contexts())
1921- for i in self.templates.itervalues()]
1922+ for i in six.itervalues(self.templates)]
1923 return interfaces
1924
1925=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1926--- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-06 21:57:43 +0000
1927+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-12-11 17:56:54 +0000
1928@@ -2,6 +2,7 @@
1929
1930 # Common python helper functions used for OpenStack charms.
1931 from collections import OrderedDict
1932+from functools import wraps
1933
1934 import subprocess
1935 import json
1936@@ -9,11 +10,13 @@
1937 import socket
1938 import sys
1939
1940+import six
1941+import yaml
1942+
1943 from charmhelpers.core.hookenv import (
1944 config,
1945 log as juju_log,
1946 charm_dir,
1947- ERROR,
1948 INFO,
1949 relation_ids,
1950 relation_set
1951@@ -30,7 +33,8 @@
1952 )
1953
1954 from charmhelpers.core.host import lsb_release, mounts, umount
1955-from charmhelpers.fetch import apt_install, apt_cache
1956+from charmhelpers.fetch import apt_install, apt_cache, install_remote
1957+from charmhelpers.contrib.python.packages import pip_install
1958 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1959 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
1960
1961@@ -112,7 +116,7 @@
1962
1963 # Best guess match based on deb string provided
1964 if src.startswith('deb') or src.startswith('ppa'):
1965- for k, v in OPENSTACK_CODENAMES.iteritems():
1966+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1967 if v in src:
1968 return v
1969
1970@@ -133,7 +137,7 @@
1971
1972 def get_os_version_codename(codename):
1973 '''Determine OpenStack version number from codename.'''
1974- for k, v in OPENSTACK_CODENAMES.iteritems():
1975+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1976 if v == codename:
1977 return k
1978 e = 'Could not derive OpenStack version for '\
1979@@ -193,7 +197,7 @@
1980 else:
1981 vers_map = OPENSTACK_CODENAMES
1982
1983- for version, cname in vers_map.iteritems():
1984+ for version, cname in six.iteritems(vers_map):
1985 if cname == codename:
1986 return version
1987 # e = "Could not determine OpenStack version for package: %s" % pkg
1988@@ -317,7 +321,7 @@
1989 rc_script.write(
1990 "#!/bin/bash\n")
1991 [rc_script.write('export %s=%s\n' % (u, p))
1992- for u, p in env_vars.iteritems() if u != "script_path"]
1993+ for u, p in six.iteritems(env_vars) if u != "script_path"]
1994
1995
1996 def openstack_upgrade_available(package):
1997@@ -350,8 +354,8 @@
1998 '''
1999 _none = ['None', 'none', None]
2000 if (block_device in _none):
2001- error_out('prepare_storage(): Missing required input: '
2002- 'block_device=%s.' % block_device, level=ERROR)
2003+ error_out('prepare_storage(): Missing required input: block_device=%s.'
2004+ % block_device)
2005
2006 if block_device.startswith('/dev/'):
2007 bdev = block_device
2008@@ -367,8 +371,7 @@
2009 bdev = '/dev/%s' % block_device
2010
2011 if not is_block_device(bdev):
2012- error_out('Failed to locate valid block device at %s' % bdev,
2013- level=ERROR)
2014+ error_out('Failed to locate valid block device at %s' % bdev)
2015
2016 return bdev
2017
2018@@ -417,7 +420,7 @@
2019
2020 if isinstance(address, dns.name.Name):
2021 rtype = 'PTR'
2022- elif isinstance(address, basestring):
2023+ elif isinstance(address, six.string_types):
2024 rtype = 'A'
2025 else:
2026 return None
2027@@ -468,6 +471,14 @@
2028 return result.split('.')[0]
2029
2030
2031+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
2032+ mm_map = {}
2033+ if os.path.isfile(mm_file):
2034+ with open(mm_file, 'r') as f:
2035+ mm_map = json.load(f)
2036+ return mm_map
2037+
2038+
2039 def sync_db_with_multi_ipv6_addresses(database, database_user,
2040 relation_prefix=None):
2041 hosts = get_ipv6_addr(dynamic_only=False)
2042@@ -477,10 +488,132 @@
2043 'hostname': json.dumps(hosts)}
2044
2045 if relation_prefix:
2046- keys = kwargs.keys()
2047- for key in keys:
2048+ for key in list(kwargs.keys()):
2049 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
2050 del kwargs[key]
2051
2052 for rid in relation_ids('shared-db'):
2053 relation_set(relation_id=rid, **kwargs)
2054+
2055+
2056+def os_requires_version(ostack_release, pkg):
2057+ """
2058+ Decorator for hook to specify minimum supported release
2059+ """
2060+ def wrap(f):
2061+ @wraps(f)
2062+ def wrapped_f(*args):
2063+ if os_release(pkg) < ostack_release:
2064+ raise Exception("This hook is not supported on releases"
2065+ " before %s" % ostack_release)
2066+ f(*args)
2067+ return wrapped_f
2068+ return wrap
2069+
2070+
2071+def git_install_requested():
2072+ """Returns true if openstack-origin-git is specified."""
2073+ return config('openstack-origin-git') != "None"
2074+
2075+
2076+requirements_dir = None
2077+
2078+
2079+def git_clone_and_install(file_name, core_project):
2080+ """Clone/install all OpenStack repos specified in yaml config file."""
2081+ global requirements_dir
2082+
2083+ if file_name == "None":
2084+ return
2085+
2086+ yaml_file = os.path.join(charm_dir(), file_name)
2087+
2088+ # clone/install the requirements project first
2089+ installed = _git_clone_and_install_subset(yaml_file,
2090+ whitelist=['requirements'])
2091+ if 'requirements' not in installed:
2092+ error_out('requirements git repository must be specified')
2093+
2094+ # clone/install all other projects except requirements and the core project
2095+ blacklist = ['requirements', core_project]
2096+ _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
2097+ update_requirements=True)
2098+
2099+ # clone/install the core project
2100+ whitelist = [core_project]
2101+ installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
2102+ update_requirements=True)
2103+ if core_project not in installed:
2104+ error_out('{} git repository must be specified'.format(core_project))
2105+
2106+
2107+def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
2108+ update_requirements=False):
2109+ """Clone/install subset of OpenStack repos specified in yaml config file."""
2110+ global requirements_dir
2111+ installed = []
2112+
2113+ with open(yaml_file, 'r') as fd:
2114+ projects = yaml.load(fd)
2115+ for proj, val in projects.items():
2116+ # The project subset is chosen based on the following 3 rules:
2117+ # 1) If project is in blacklist, we don't clone/install it, period.
2118+ # 2) If whitelist is empty, we clone/install everything else.
2119+ # 3) If whitelist is not empty, we clone/install everything in the
2120+ # whitelist.
2121+ if proj in blacklist:
2122+ continue
2123+ if whitelist and proj not in whitelist:
2124+ continue
2125+ repo = val['repository']
2126+ branch = val['branch']
2127+ repo_dir = _git_clone_and_install_single(repo, branch,
2128+ update_requirements)
2129+ if proj == 'requirements':
2130+ requirements_dir = repo_dir
2131+ installed.append(proj)
2132+ return installed
2133+
2134+
2135+def _git_clone_and_install_single(repo, branch, update_requirements=False):
2136+ """Clone and install a single git repository."""
2137+ dest_parent_dir = "/mnt/openstack-git/"
2138+ dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
2139+
2140+ if not os.path.exists(dest_parent_dir):
2141+ juju_log('Host dir not mounted at {}. '
2142+ 'Creating directory there instead.'.format(dest_parent_dir))
2143+ os.mkdir(dest_parent_dir)
2144+
2145+ if not os.path.exists(dest_dir):
2146+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
2147+ repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
2148+ else:
2149+ repo_dir = dest_dir
2150+
2151+ if update_requirements:
2152+ if not requirements_dir:
2153+ error_out('requirements repo must be cloned before '
2154+ 'updating from global requirements.')
2155+ _git_update_requirements(repo_dir, requirements_dir)
2156+
2157+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
2158+ pip_install(repo_dir)
2159+
2160+ return repo_dir
2161+
2162+
2163+def _git_update_requirements(package_dir, reqs_dir):
2164+ """Update from global requirements.
2165+
2166+ Update an OpenStack git directory's requirements.txt and
2167+ test-requirements.txt from global-requirements.txt."""
2168+ orig_dir = os.getcwd()
2169+ os.chdir(reqs_dir)
2170+ cmd = "python update.py {}".format(package_dir)
2171+ try:
2172+ subprocess.check_call(cmd.split(' '))
2173+ except subprocess.CalledProcessError:
2174+ package = os.path.basename(package_dir)
2175+ error_out("Error updating {} from global-requirements.txt".format(package))
2176+ os.chdir(orig_dir)
2177
2178=== added directory 'hooks/charmhelpers/contrib/python'
2179=== added file 'hooks/charmhelpers/contrib/python/__init__.py'
2180=== added file 'hooks/charmhelpers/contrib/python/packages.py'
2181--- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
2182+++ hooks/charmhelpers/contrib/python/packages.py 2014-12-11 17:56:54 +0000
2183@@ -0,0 +1,77 @@
2184+#!/usr/bin/env python
2185+# coding: utf-8
2186+
2187+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
2188+
2189+from charmhelpers.fetch import apt_install, apt_update
2190+from charmhelpers.core.hookenv import log
2191+
2192+try:
2193+ from pip import main as pip_execute
2194+except ImportError:
2195+ apt_update()
2196+ apt_install('python-pip')
2197+ from pip import main as pip_execute
2198+
2199+
2200+def parse_options(given, available):
2201+ """Given a set of options, check if available"""
2202+ for key, value in sorted(given.items()):
2203+ if key in available:
2204+ yield "--{0}={1}".format(key, value)
2205+
2206+
2207+def pip_install_requirements(requirements, **options):
2208+ """Install a requirements file """
2209+ command = ["install"]
2210+
2211+ available_options = ('proxy', 'src', 'log', )
2212+ for option in parse_options(options, available_options):
2213+ command.append(option)
2214+
2215+ command.append("-r {0}".format(requirements))
2216+ log("Installing from file: {} with options: {}".format(requirements,
2217+ command))
2218+ pip_execute(command)
2219+
2220+
2221+def pip_install(package, fatal=False, **options):
2222+ """Install a python package"""
2223+ command = ["install"]
2224+
2225+ available_options = ('proxy', 'src', 'log', "index-url", )
2226+ for option in parse_options(options, available_options):
2227+ command.append(option)
2228+
2229+ if isinstance(package, list):
2230+ command.extend(package)
2231+ else:
2232+ command.append(package)
2233+
2234+ log("Installing {} package with options: {}".format(package,
2235+ command))
2236+ pip_execute(command)
2237+
2238+
2239+def pip_uninstall(package, **options):
2240+ """Uninstall a python package"""
2241+ command = ["uninstall", "-q", "-y"]
2242+
2243+ available_options = ('proxy', 'log', )
2244+ for option in parse_options(options, available_options):
2245+ command.append(option)
2246+
2247+ if isinstance(package, list):
2248+ command.extend(package)
2249+ else:
2250+ command.append(package)
2251+
2252+ log("Uninstalling {} package with options: {}".format(package,
2253+ command))
2254+ pip_execute(command)
2255+
2256+
2257+def pip_list():
2258+ """Returns the list of current python installed packages
2259+ """
2260+ return pip_execute(["list"])
2261
2262=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2263--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-28 14:38:51 +0000
2264+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-12-11 17:56:54 +0000
2265@@ -16,19 +16,18 @@
2266 from subprocess import (
2267 check_call,
2268 check_output,
2269- CalledProcessError
2270+ CalledProcessError,
2271 )
2272-
2273 from charmhelpers.core.hookenv import (
2274 relation_get,
2275 relation_ids,
2276 related_units,
2277 log,
2278+ DEBUG,
2279 INFO,
2280 WARNING,
2281- ERROR
2282+ ERROR,
2283 )
2284-
2285 from charmhelpers.core.host import (
2286 mount,
2287 mounts,
2288@@ -37,7 +36,6 @@
2289 service_running,
2290 umount,
2291 )
2292-
2293 from charmhelpers.fetch import (
2294 apt_install,
2295 )
2296@@ -56,99 +54,85 @@
2297
2298
2299 def install():
2300- ''' Basic Ceph client installation '''
2301+ """Basic Ceph client installation."""
2302 ceph_dir = "/etc/ceph"
2303 if not os.path.exists(ceph_dir):
2304 os.mkdir(ceph_dir)
2305+
2306 apt_install('ceph-common', fatal=True)
2307
2308
2309 def rbd_exists(service, pool, rbd_img):
2310- ''' Check to see if a RADOS block device exists '''
2311+ """Check to see if a RADOS block device exists."""
2312 try:
2313- out = check_output(['rbd', 'list', '--id', service,
2314- '--pool', pool])
2315+ out = check_output(['rbd', 'list', '--id',
2316+ service, '--pool', pool]).decode('UTF-8')
2317 except CalledProcessError:
2318 return False
2319- else:
2320- return rbd_img in out
2321+
2322+ return rbd_img in out
2323
2324
2325 def create_rbd_image(service, pool, image, sizemb):
2326- ''' Create a new RADOS block device '''
2327- cmd = [
2328- 'rbd',
2329- 'create',
2330- image,
2331- '--size',
2332- str(sizemb),
2333- '--id',
2334- service,
2335- '--pool',
2336- pool
2337- ]
2338+ """Create a new RADOS block device."""
2339+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
2340+ '--pool', pool]
2341 check_call(cmd)
2342
2343
2344 def pool_exists(service, name):
2345- ''' Check to see if a RADOS pool already exists '''
2346+ """Check to see if a RADOS pool already exists."""
2347 try:
2348- out = check_output(['rados', '--id', service, 'lspools'])
2349+ out = check_output(['rados', '--id', service,
2350+ 'lspools']).decode('UTF-8')
2351 except CalledProcessError:
2352 return False
2353- else:
2354- return name in out
2355+
2356+ return name in out
2357
2358
2359 def get_osds(service):
2360- '''
2361- Return a list of all Ceph Object Storage Daemons
2362- currently in the cluster
2363- '''
2364+ """Return a list of all Ceph Object Storage Daemons currently in the
2365+ cluster.
2366+ """
2367 version = ceph_version()
2368 if version and version >= '0.56':
2369 return json.loads(check_output(['ceph', '--id', service,
2370- 'osd', 'ls', '--format=json']))
2371- else:
2372- return None
2373-
2374-
2375-def create_pool(service, name, replicas=2):
2376- ''' Create a new RADOS pool '''
2377+ 'osd', 'ls',
2378+ '--format=json']).decode('UTF-8'))
2379+
2380+ return None
2381+
2382+
2383+def create_pool(service, name, replicas=3):
2384+ """Create a new RADOS pool."""
2385 if pool_exists(service, name):
2386 log("Ceph pool {} already exists, skipping creation".format(name),
2387 level=WARNING)
2388 return
2389+
2390 # Calculate the number of placement groups based
2391 # on upstream recommended best practices.
2392 osds = get_osds(service)
2393 if osds:
2394- pgnum = (len(osds) * 100 / replicas)
2395+ pgnum = (len(osds) * 100 // replicas)
2396 else:
2397 # NOTE(james-page): Default to 200 for older ceph versions
2398 # which don't support OSD query from cli
2399 pgnum = 200
2400- cmd = [
2401- 'ceph', '--id', service,
2402- 'osd', 'pool', 'create',
2403- name, str(pgnum)
2404- ]
2405+
2406+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
2407 check_call(cmd)
2408- cmd = [
2409- 'ceph', '--id', service,
2410- 'osd', 'pool', 'set', name,
2411- 'size', str(replicas)
2412- ]
2413+
2414+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
2415+ str(replicas)]
2416 check_call(cmd)
2417
2418
2419 def delete_pool(service, name):
2420- ''' Delete a RADOS pool from ceph '''
2421- cmd = [
2422- 'ceph', '--id', service,
2423- 'osd', 'pool', 'delete',
2424- name, '--yes-i-really-really-mean-it'
2425- ]
2426+ """Delete a RADOS pool from ceph."""
2427+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
2428+ '--yes-i-really-really-mean-it']
2429 check_call(cmd)
2430
2431
2432@@ -161,44 +145,43 @@
2433
2434
2435 def create_keyring(service, key):
2436- ''' Create a new Ceph keyring containing key'''
2437+ """Create a new Ceph keyring containing key."""
2438 keyring = _keyring_path(service)
2439 if os.path.exists(keyring):
2440- log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
2441+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
2442 return
2443- cmd = [
2444- 'ceph-authtool',
2445- keyring,
2446- '--create-keyring',
2447- '--name=client.{}'.format(service),
2448- '--add-key={}'.format(key)
2449- ]
2450+
2451+ cmd = ['ceph-authtool', keyring, '--create-keyring',
2452+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
2453 check_call(cmd)
2454- log('ceph: Created new ring at %s.' % keyring, level=INFO)
2455+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
2456
2457
2458 def create_key_file(service, key):
2459- ''' Create a file containing key '''
2460+ """Create a file containing key."""
2461 keyfile = _keyfile_path(service)
2462 if os.path.exists(keyfile):
2463- log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
2464+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
2465 return
2466+
2467 with open(keyfile, 'w') as fd:
2468 fd.write(key)
2469- log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
2470+
2471+ log('Created new keyfile at %s.' % keyfile, level=INFO)
2472
2473
2474 def get_ceph_nodes():
2475- ''' Query named relation 'ceph' to detemine current nodes '''
2476+ """Query named relation 'ceph' to determine current nodes."""
2477 hosts = []
2478 for r_id in relation_ids('ceph'):
2479 for unit in related_units(r_id):
2480 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2481+
2482 return hosts
2483
2484
2485 def configure(service, key, auth, use_syslog):
2486- ''' Perform basic configuration of Ceph '''
2487+ """Perform basic configuration of Ceph."""
2488 create_keyring(service, key)
2489 create_key_file(service, key)
2490 hosts = get_ceph_nodes()
2491@@ -211,17 +194,17 @@
2492
2493
2494 def image_mapped(name):
2495- ''' Determine whether a RADOS block device is mapped locally '''
2496+ """Determine whether a RADOS block device is mapped locally."""
2497 try:
2498- out = check_output(['rbd', 'showmapped'])
2499+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
2500 except CalledProcessError:
2501 return False
2502- else:
2503- return name in out
2504+
2505+ return name in out
2506
2507
2508 def map_block_storage(service, pool, image):
2509- ''' Map a RADOS block device for local use '''
2510+ """Map a RADOS block device for local use."""
2511 cmd = [
2512 'rbd',
2513 'map',
2514@@ -235,31 +218,32 @@
2515
2516
2517 def filesystem_mounted(fs):
2518- ''' Determine whether a filesytems is already mounted '''
2519+ """Determine whether a filesytems is already mounted."""
2520 return fs in [f for f, m in mounts()]
2521
2522
2523 def make_filesystem(blk_device, fstype='ext4', timeout=10):
2524- ''' Make a new filesystem on the specified block device '''
2525+ """Make a new filesystem on the specified block device."""
2526 count = 0
2527 e_noent = os.errno.ENOENT
2528 while not os.path.exists(blk_device):
2529 if count >= timeout:
2530- log('ceph: gave up waiting on block device %s' % blk_device,
2531+ log('Gave up waiting on block device %s' % blk_device,
2532 level=ERROR)
2533 raise IOError(e_noent, os.strerror(e_noent), blk_device)
2534- log('ceph: waiting for block device %s to appear' % blk_device,
2535- level=INFO)
2536+
2537+ log('Waiting for block device %s to appear' % blk_device,
2538+ level=DEBUG)
2539 count += 1
2540 time.sleep(1)
2541 else:
2542- log('ceph: Formatting block device %s as filesystem %s.' %
2543+ log('Formatting block device %s as filesystem %s.' %
2544 (blk_device, fstype), level=INFO)
2545 check_call(['mkfs', '-t', fstype, blk_device])
2546
2547
2548 def place_data_on_block_device(blk_device, data_src_dst):
2549- ''' Migrate data in data_src_dst to blk_device and then remount '''
2550+ """Migrate data in data_src_dst to blk_device and then remount."""
2551 # mount block device into /mnt
2552 mount(blk_device, '/mnt')
2553 # copy data to /mnt
2554@@ -279,8 +263,8 @@
2555
2556 # TODO: re-use
2557 def modprobe(module):
2558- ''' Load a kernel module and configure for auto-load on reboot '''
2559- log('ceph: Loading kernel module', level=INFO)
2560+ """Load a kernel module and configure for auto-load on reboot."""
2561+ log('Loading kernel module', level=INFO)
2562 cmd = ['modprobe', module]
2563 check_call(cmd)
2564 with open('/etc/modules', 'r+') as modules:
2565@@ -289,7 +273,7 @@
2566
2567
2568 def copy_files(src, dst, symlinks=False, ignore=None):
2569- ''' Copy files from src to dst '''
2570+ """Copy files from src to dst."""
2571 for item in os.listdir(src):
2572 s = os.path.join(src, item)
2573 d = os.path.join(dst, item)
2574@@ -300,9 +284,9 @@
2575
2576
2577 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
2578- blk_device, fstype, system_services=[]):
2579- """
2580- NOTE: This function must only be called from a single service unit for
2581+ blk_device, fstype, system_services=[],
2582+ replicas=3):
2583+ """NOTE: This function must only be called from a single service unit for
2584 the same rbd_img otherwise data loss will occur.
2585
2586 Ensures given pool and RBD image exists, is mapped to a block device,
2587@@ -316,15 +300,16 @@
2588 """
2589 # Ensure pool, RBD image, RBD mappings are in place.
2590 if not pool_exists(service, pool):
2591- log('ceph: Creating new pool {}.'.format(pool))
2592- create_pool(service, pool)
2593+ log('Creating new pool {}.'.format(pool), level=INFO)
2594+ create_pool(service, pool, replicas=replicas)
2595
2596 if not rbd_exists(service, pool, rbd_img):
2597- log('ceph: Creating RBD image ({}).'.format(rbd_img))
2598+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
2599 create_rbd_image(service, pool, rbd_img, sizemb)
2600
2601 if not image_mapped(rbd_img):
2602- log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
2603+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
2604+ level=INFO)
2605 map_block_storage(service, pool, rbd_img)
2606
2607 # make file system
2608@@ -339,45 +324,47 @@
2609
2610 for svc in system_services:
2611 if service_running(svc):
2612- log('ceph: Stopping services {} prior to migrating data.'
2613- .format(svc))
2614+ log('Stopping services {} prior to migrating data.'
2615+ .format(svc), level=DEBUG)
2616 service_stop(svc)
2617
2618 place_data_on_block_device(blk_device, mount_point)
2619
2620 for svc in system_services:
2621- log('ceph: Starting service {} after migrating data.'
2622- .format(svc))
2623+ log('Starting service {} after migrating data.'
2624+ .format(svc), level=DEBUG)
2625 service_start(svc)
2626
2627
2628 def ensure_ceph_keyring(service, user=None, group=None):
2629- '''
2630- Ensures a ceph keyring is created for a named service
2631- and optionally ensures user and group ownership.
2632+ """Ensures a ceph keyring is created for a named service and optionally
2633+ ensures user and group ownership.
2634
2635 Returns False if no ceph key is available in relation state.
2636- '''
2637+ """
2638 key = None
2639 for rid in relation_ids('ceph'):
2640 for unit in related_units(rid):
2641 key = relation_get('key', rid=rid, unit=unit)
2642 if key:
2643 break
2644+
2645 if not key:
2646 return False
2647+
2648 create_keyring(service=service, key=key)
2649 keyring = _keyring_path(service)
2650 if user and group:
2651 check_call(['chown', '%s.%s' % (user, group), keyring])
2652+
2653 return True
2654
2655
2656 def ceph_version():
2657- ''' Retrieve the local version of ceph '''
2658+ """Retrieve the local version of ceph."""
2659 if os.path.exists('/usr/bin/ceph'):
2660 cmd = ['ceph', '-v']
2661- output = check_output(cmd)
2662+ output = check_output(cmd).decode('US-ASCII')
2663 output = output.split()
2664 if len(output) > 3:
2665 return output[2]
2666
2667=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2668--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-08-12 21:48:24 +0000
2669+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-12-11 17:56:54 +0000
2670@@ -1,12 +1,12 @@
2671-
2672 import os
2673 import re
2674-
2675 from subprocess import (
2676 check_call,
2677 check_output,
2678 )
2679
2680+import six
2681+
2682
2683 ##################################################
2684 # loopback device helpers.
2685@@ -37,7 +37,7 @@
2686 '''
2687 file_path = os.path.abspath(file_path)
2688 check_call(['losetup', '--find', file_path])
2689- for d, f in loopback_devices().iteritems():
2690+ for d, f in six.iteritems(loopback_devices()):
2691 if f == file_path:
2692 return d
2693
2694@@ -51,7 +51,7 @@
2695
2696 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
2697 '''
2698- for d, f in loopback_devices().iteritems():
2699+ for d, f in six.iteritems(loopback_devices()):
2700 if f == path:
2701 return d
2702
2703
2704=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
2705--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-05-19 11:41:02 +0000
2706+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-12-11 17:56:54 +0000
2707@@ -61,6 +61,7 @@
2708 vg = None
2709 pvd = check_output(['pvdisplay', block_device]).splitlines()
2710 for l in pvd:
2711+ l = l.decode('UTF-8')
2712 if l.strip().startswith('VG Name'):
2713 vg = ' '.join(l.strip().split()[2:])
2714 return vg
2715
2716=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2717--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-08-13 13:12:14 +0000
2718+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-12-11 17:56:54 +0000
2719@@ -30,7 +30,8 @@
2720 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2721 call(['sgdisk', '--zap-all', '--mbrtogpt',
2722 '--clear', block_device])
2723- dev_end = check_output(['blockdev', '--getsz', block_device])
2724+ dev_end = check_output(['blockdev', '--getsz',
2725+ block_device]).decode('UTF-8')
2726 gpt_end = int(dev_end.split()[0]) - 100
2727 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
2728 'bs=1M', 'count=1'])
2729@@ -47,7 +48,7 @@
2730 it doesn't.
2731 '''
2732 is_partition = bool(re.search(r".*[0-9]+\b", device))
2733- out = check_output(['mount'])
2734+ out = check_output(['mount']).decode('UTF-8')
2735 if is_partition:
2736 return bool(re.search(device + r"\b", out))
2737 return bool(re.search(device + r"[0-9]+\b", out))
2738
2739=== modified file 'hooks/charmhelpers/core/fstab.py'
2740--- hooks/charmhelpers/core/fstab.py 2014-07-11 02:24:52 +0000
2741+++ hooks/charmhelpers/core/fstab.py 2014-12-11 17:56:54 +0000
2742@@ -3,10 +3,11 @@
2743
2744 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2745
2746+import io
2747 import os
2748
2749
2750-class Fstab(file):
2751+class Fstab(io.FileIO):
2752 """This class extends file in order to implement a file reader/writer
2753 for file `/etc/fstab`
2754 """
2755@@ -24,8 +25,8 @@
2756 options = "defaults"
2757
2758 self.options = options
2759- self.d = d
2760- self.p = p
2761+ self.d = int(d)
2762+ self.p = int(p)
2763
2764 def __eq__(self, o):
2765 return str(self) == str(o)
2766@@ -45,7 +46,7 @@
2767 self._path = path
2768 else:
2769 self._path = self.DEFAULT_PATH
2770- file.__init__(self, self._path, 'r+')
2771+ super(Fstab, self).__init__(self._path, 'rb+')
2772
2773 def _hydrate_entry(self, line):
2774 # NOTE: use split with no arguments to split on any
2775@@ -58,8 +59,9 @@
2776 def entries(self):
2777 self.seek(0)
2778 for line in self.readlines():
2779+ line = line.decode('us-ascii')
2780 try:
2781- if not line.startswith("#"):
2782+ if line.strip() and not line.startswith("#"):
2783 yield self._hydrate_entry(line)
2784 except ValueError:
2785 pass
2786@@ -75,14 +77,14 @@
2787 if self.get_entry_by_attr('device', entry.device):
2788 return False
2789
2790- self.write(str(entry) + '\n')
2791+ self.write((str(entry) + '\n').encode('us-ascii'))
2792 self.truncate()
2793 return entry
2794
2795 def remove_entry(self, entry):
2796 self.seek(0)
2797
2798- lines = self.readlines()
2799+ lines = [l.decode('us-ascii') for l in self.readlines()]
2800
2801 found = False
2802 for index, line in enumerate(lines):
2803@@ -97,7 +99,7 @@
2804 lines.remove(line)
2805
2806 self.seek(0)
2807- self.write(''.join(lines))
2808+ self.write(''.join(lines).encode('us-ascii'))
2809 self.truncate()
2810 return True
2811
2812
2813=== modified file 'hooks/charmhelpers/core/hookenv.py'
2814--- hooks/charmhelpers/core/hookenv.py 2014-10-06 21:57:43 +0000
2815+++ hooks/charmhelpers/core/hookenv.py 2014-12-11 17:56:54 +0000
2816@@ -9,9 +9,14 @@
2817 import yaml
2818 import subprocess
2819 import sys
2820-import UserDict
2821 from subprocess import CalledProcessError
2822
2823+import six
2824+if not six.PY3:
2825+ from UserDict import UserDict
2826+else:
2827+ from collections import UserDict
2828+
2829 CRITICAL = "CRITICAL"
2830 ERROR = "ERROR"
2831 WARNING = "WARNING"
2832@@ -63,16 +68,18 @@
2833 command = ['juju-log']
2834 if level:
2835 command += ['-l', level]
2836+ if not isinstance(message, six.string_types):
2837+ message = repr(message)
2838 command += [message]
2839 subprocess.call(command)
2840
2841
2842-class Serializable(UserDict.IterableUserDict):
2843+class Serializable(UserDict):
2844 """Wrapper, an object that can be serialized to yaml or json"""
2845
2846 def __init__(self, obj):
2847 # wrap the object
2848- UserDict.IterableUserDict.__init__(self)
2849+ UserDict.__init__(self)
2850 self.data = obj
2851
2852 def __getattr__(self, attr):
2853@@ -214,6 +221,12 @@
2854 except KeyError:
2855 return (self._prev_dict or {})[key]
2856
2857+ def keys(self):
2858+ prev_keys = []
2859+ if self._prev_dict is not None:
2860+ prev_keys = self._prev_dict.keys()
2861+ return list(set(prev_keys + list(dict.keys(self))))
2862+
2863 def load_previous(self, path=None):
2864 """Load previous copy of config from disk.
2865
2866@@ -263,7 +276,7 @@
2867
2868 """
2869 if self._prev_dict:
2870- for k, v in self._prev_dict.iteritems():
2871+ for k, v in six.iteritems(self._prev_dict):
2872 if k not in self:
2873 self[k] = v
2874 with open(self.path, 'w') as f:
2875@@ -278,7 +291,8 @@
2876 config_cmd_line.append(scope)
2877 config_cmd_line.append('--format=json')
2878 try:
2879- config_data = json.loads(subprocess.check_output(config_cmd_line))
2880+ config_data = json.loads(
2881+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
2882 if scope is not None:
2883 return config_data
2884 return Config(config_data)
2885@@ -297,10 +311,10 @@
2886 if unit:
2887 _args.append(unit)
2888 try:
2889- return json.loads(subprocess.check_output(_args))
2890+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2891 except ValueError:
2892 return None
2893- except CalledProcessError, e:
2894+ except CalledProcessError as e:
2895 if e.returncode == 2:
2896 return None
2897 raise
2898@@ -312,7 +326,7 @@
2899 relation_cmd_line = ['relation-set']
2900 if relation_id is not None:
2901 relation_cmd_line.extend(('-r', relation_id))
2902- for k, v in (relation_settings.items() + kwargs.items()):
2903+ for k, v in (list(relation_settings.items()) + list(kwargs.items())):
2904 if v is None:
2905 relation_cmd_line.append('{}='.format(k))
2906 else:
2907@@ -329,7 +343,8 @@
2908 relid_cmd_line = ['relation-ids', '--format=json']
2909 if reltype is not None:
2910 relid_cmd_line.append(reltype)
2911- return json.loads(subprocess.check_output(relid_cmd_line)) or []
2912+ return json.loads(
2913+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
2914 return []
2915
2916
2917@@ -340,7 +355,8 @@
2918 units_cmd_line = ['relation-list', '--format=json']
2919 if relid is not None:
2920 units_cmd_line.extend(('-r', relid))
2921- return json.loads(subprocess.check_output(units_cmd_line)) or []
2922+ return json.loads(
2923+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
2924
2925
2926 @cached
2927@@ -380,21 +396,31 @@
2928
2929
2930 @cached
2931+def metadata():
2932+ """Get the current charm metadata.yaml contents as a python object"""
2933+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
2934+ return yaml.safe_load(md)
2935+
2936+
2937+@cached
2938 def relation_types():
2939 """Get a list of relation types supported by this charm"""
2940- charmdir = os.environ.get('CHARM_DIR', '')
2941- mdf = open(os.path.join(charmdir, 'metadata.yaml'))
2942- md = yaml.safe_load(mdf)
2943 rel_types = []
2944+ md = metadata()
2945 for key in ('provides', 'requires', 'peers'):
2946 section = md.get(key)
2947 if section:
2948 rel_types.extend(section.keys())
2949- mdf.close()
2950 return rel_types
2951
2952
2953 @cached
2954+def charm_name():
2955+ """Get the name of the current charm as is specified on metadata.yaml"""
2956+ return metadata().get('name')
2957+
2958+
2959+@cached
2960 def relations():
2961 """Get a nested dictionary of relation data for all related units"""
2962 rels = {}
2963@@ -449,7 +475,7 @@
2964 """Get the unit ID for the remote unit"""
2965 _args = ['unit-get', '--format=json', attribute]
2966 try:
2967- return json.loads(subprocess.check_output(_args))
2968+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2969 except ValueError:
2970 return None
2971
2972
2973=== modified file 'hooks/charmhelpers/core/host.py'
2974--- hooks/charmhelpers/core/host.py 2014-10-06 21:57:43 +0000
2975+++ hooks/charmhelpers/core/host.py 2014-12-11 17:56:54 +0000
2976@@ -6,19 +6,20 @@
2977 # Matthew Wedgwood <matthew.wedgwood@canonical.com>
2978
2979 import os
2980+import re
2981 import pwd
2982 import grp
2983 import random
2984 import string
2985 import subprocess
2986 import hashlib
2987-import shutil
2988 from contextlib import contextmanager
2989-
2990 from collections import OrderedDict
2991
2992-from hookenv import log
2993-from fstab import Fstab
2994+import six
2995+
2996+from .hookenv import log
2997+from .fstab import Fstab
2998
2999
3000 def service_start(service_name):
3001@@ -54,7 +55,9 @@
3002 def service_running(service):
3003 """Determine whether a system service is running"""
3004 try:
3005- output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
3006+ output = subprocess.check_output(
3007+ ['service', service, 'status'],
3008+ stderr=subprocess.STDOUT).decode('UTF-8')
3009 except subprocess.CalledProcessError:
3010 return False
3011 else:
3012@@ -67,7 +70,9 @@
3013 def service_available(service_name):
3014 """Determine whether a system service is available"""
3015 try:
3016- subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
3017+ subprocess.check_output(
3018+ ['service', service_name, 'status'],
3019+ stderr=subprocess.STDOUT).decode('UTF-8')
3020 except subprocess.CalledProcessError as e:
3021 return 'unrecognized service' not in e.output
3022 else:
3023@@ -96,6 +101,26 @@
3024 return user_info
3025
3026
3027+def add_group(group_name, system_group=False):
3028+ """Add a group to the system"""
3029+ try:
3030+ group_info = grp.getgrnam(group_name)
3031+ log('group {0} already exists!'.format(group_name))
3032+ except KeyError:
3033+ log('creating group {0}'.format(group_name))
3034+ cmd = ['addgroup']
3035+ if system_group:
3036+ cmd.append('--system')
3037+ else:
3038+ cmd.extend([
3039+ '--group',
3040+ ])
3041+ cmd.append(group_name)
3042+ subprocess.check_call(cmd)
3043+ group_info = grp.getgrnam(group_name)
3044+ return group_info
3045+
3046+
3047 def add_user_to_group(username, group):
3048 """Add a user to a group"""
3049 cmd = [
3050@@ -115,7 +140,7 @@
3051 cmd.append(from_path)
3052 cmd.append(to_path)
3053 log(" ".join(cmd))
3054- return subprocess.check_output(cmd).strip()
3055+ return subprocess.check_output(cmd).decode('UTF-8').strip()
3056
3057
3058 def symlink(source, destination):
3059@@ -130,7 +155,7 @@
3060 subprocess.check_call(cmd)
3061
3062
3063-def mkdir(path, owner='root', group='root', perms=0555, force=False):
3064+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
3065 """Create a directory"""
3066 log("Making dir {} {}:{} {:o}".format(path, owner, group,
3067 perms))
3068@@ -146,7 +171,7 @@
3069 os.chown(realpath, uid, gid)
3070
3071
3072-def write_file(path, content, owner='root', group='root', perms=0444):
3073+def write_file(path, content, owner='root', group='root', perms=0o444):
3074 """Create or overwrite a file with the contents of a string"""
3075 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
3076 uid = pwd.getpwnam(owner).pw_uid
3077@@ -177,7 +202,7 @@
3078 cmd_args.extend([device, mountpoint])
3079 try:
3080 subprocess.check_output(cmd_args)
3081- except subprocess.CalledProcessError, e:
3082+ except subprocess.CalledProcessError as e:
3083 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
3084 return False
3085
3086@@ -191,7 +216,7 @@
3087 cmd_args = ['umount', mountpoint]
3088 try:
3089 subprocess.check_output(cmd_args)
3090- except subprocess.CalledProcessError, e:
3091+ except subprocess.CalledProcessError as e:
3092 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
3093 return False
3094
3095@@ -218,8 +243,8 @@
3096 """
3097 if os.path.exists(path):
3098 h = getattr(hashlib, hash_type)()
3099- with open(path, 'r') as source:
3100- h.update(source.read()) # IGNORE:E1101 - it does have update
3101+ with open(path, 'rb') as source:
3102+ h.update(source.read())
3103 return h.hexdigest()
3104 else:
3105 return None
3106@@ -297,7 +322,7 @@
3107 if length is None:
3108 length = random.choice(range(35, 45))
3109 alphanumeric_chars = [
3110- l for l in (string.letters + string.digits)
3111+ l for l in (string.ascii_letters + string.digits)
3112 if l not in 'l0QD1vAEIOUaeiou']
3113 random_chars = [
3114 random.choice(alphanumeric_chars) for _ in range(length)]
3115@@ -306,18 +331,24 @@
3116
3117 def list_nics(nic_type):
3118 '''Return a list of nics of given type(s)'''
3119- if isinstance(nic_type, basestring):
3120+ if isinstance(nic_type, six.string_types):
3121 int_types = [nic_type]
3122 else:
3123 int_types = nic_type
3124 interfaces = []
3125 for int_type in int_types:
3126 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3127- ip_output = subprocess.check_output(cmd).split('\n')
3128+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3129 ip_output = (line for line in ip_output if line)
3130 for line in ip_output:
3131 if line.split()[1].startswith(int_type):
3132- interfaces.append(line.split()[1].replace(":", ""))
3133+ matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
3134+ if matched:
3135+ interface = matched.groups()[0]
3136+ else:
3137+ interface = line.split()[1].replace(":", "")
3138+ interfaces.append(interface)
3139+
3140 return interfaces
3141
3142
3143@@ -329,7 +360,7 @@
3144
3145 def get_nic_mtu(nic):
3146 cmd = ['ip', 'addr', 'show', nic]
3147- ip_output = subprocess.check_output(cmd).split('\n')
3148+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3149 mtu = ""
3150 for line in ip_output:
3151 words = line.split()
3152@@ -340,7 +371,7 @@
3153
3154 def get_nic_hwaddr(nic):
3155 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
3156- ip_output = subprocess.check_output(cmd)
3157+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
3158 hwaddr = ""
3159 words = ip_output.split()
3160 if 'link/ether' in words:
3161@@ -357,8 +388,8 @@
3162
3163 '''
3164 import apt_pkg
3165- from charmhelpers.fetch import apt_cache
3166 if not pkgcache:
3167+ from charmhelpers.fetch import apt_cache
3168 pkgcache = apt_cache()
3169 pkg = pkgcache[package]
3170 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
3171
3172=== modified file 'hooks/charmhelpers/core/services/__init__.py'
3173--- hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:12:14 +0000
3174+++ hooks/charmhelpers/core/services/__init__.py 2014-12-11 17:56:54 +0000
3175@@ -1,2 +1,2 @@
3176-from .base import *
3177-from .helpers import *
3178+from .base import * # NOQA
3179+from .helpers import * # NOQA
3180
3181=== modified file 'hooks/charmhelpers/core/services/helpers.py'
3182--- hooks/charmhelpers/core/services/helpers.py 2014-10-06 21:57:43 +0000
3183+++ hooks/charmhelpers/core/services/helpers.py 2014-12-11 17:56:54 +0000
3184@@ -196,7 +196,7 @@
3185 if not os.path.isabs(file_name):
3186 file_name = os.path.join(hookenv.charm_dir(), file_name)
3187 with open(file_name, 'w') as file_stream:
3188- os.fchmod(file_stream.fileno(), 0600)
3189+ os.fchmod(file_stream.fileno(), 0o600)
3190 yaml.dump(config_data, file_stream)
3191
3192 def read_context(self, file_name):
3193@@ -211,15 +211,19 @@
3194
3195 class TemplateCallback(ManagerCallback):
3196 """
3197- Callback class that will render a Jinja2 template, for use as a ready action.
3198-
3199- :param str source: The template source file, relative to `$CHARM_DIR/templates`
3200+ Callback class that will render a Jinja2 template, for use as a ready
3201+ action.
3202+
3203+ :param str source: The template source file, relative to
3204+ `$CHARM_DIR/templates`
3205+
3206 :param str target: The target to write the rendered template to
3207 :param str owner: The owner of the rendered file
3208 :param str group: The group of the rendered file
3209 :param int perms: The permissions of the rendered file
3210 """
3211- def __init__(self, source, target, owner='root', group='root', perms=0444):
3212+ def __init__(self, source, target,
3213+ owner='root', group='root', perms=0o444):
3214 self.source = source
3215 self.target = target
3216 self.owner = owner
3217
3218=== added file 'hooks/charmhelpers/core/sysctl.py'
3219--- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000
3220+++ hooks/charmhelpers/core/sysctl.py 2014-12-11 17:56:54 +0000
3221@@ -0,0 +1,34 @@
3222+#!/usr/bin/env python
3223+# -*- coding: utf-8 -*-
3224+
3225+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
3226+
3227+import yaml
3228+
3229+from subprocess import check_call
3230+
3231+from charmhelpers.core.hookenv import (
3232+ log,
3233+ DEBUG,
3234+)
3235+
3236+
3237+def create(sysctl_dict, sysctl_file):
3238+ """Creates a sysctl.conf file from a YAML associative array
3239+
3240+ :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
3241+ :type sysctl_dict: dict
3242+ :param sysctl_file: path to the sysctl file to be saved
3243+ :type sysctl_file: str or unicode
3244+ :returns: None
3245+ """
3246+ sysctl_dict = yaml.load(sysctl_dict)
3247+
3248+ with open(sysctl_file, "w") as fd:
3249+ for key, value in sysctl_dict.items():
3250+ fd.write("{}={}\n".format(key, value))
3251+
3252+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
3253+ level=DEBUG)
3254+
3255+ check_call(["sysctl", "-p", sysctl_file])
3256
3257=== modified file 'hooks/charmhelpers/core/templating.py'
3258--- hooks/charmhelpers/core/templating.py 2014-08-13 13:12:14 +0000
3259+++ hooks/charmhelpers/core/templating.py 2014-12-11 17:56:54 +0000
3260@@ -4,7 +4,8 @@
3261 from charmhelpers.core import hookenv
3262
3263
3264-def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3265+def render(source, target, context, owner='root', group='root',
3266+ perms=0o444, templates_dir=None):
3267 """
3268 Render a template.
3269
3270
3271=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3272--- hooks/charmhelpers/fetch/__init__.py 2014-10-06 21:57:43 +0000
3273+++ hooks/charmhelpers/fetch/__init__.py 2014-12-11 17:56:54 +0000
3274@@ -5,10 +5,6 @@
3275 from charmhelpers.core.host import (
3276 lsb_release
3277 )
3278-from urlparse import (
3279- urlparse,
3280- urlunparse,
3281-)
3282 import subprocess
3283 from charmhelpers.core.hookenv import (
3284 config,
3285@@ -16,6 +12,12 @@
3286 )
3287 import os
3288
3289+import six
3290+if six.PY3:
3291+ from urllib.parse import urlparse, urlunparse
3292+else:
3293+ from urlparse import urlparse, urlunparse
3294+
3295
3296 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
3297 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
3298@@ -72,6 +74,7 @@
3299 FETCH_HANDLERS = (
3300 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
3301 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
3302+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
3303 )
3304
3305 APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
3306@@ -148,7 +151,7 @@
3307 cmd = ['apt-get', '--assume-yes']
3308 cmd.extend(options)
3309 cmd.append('install')
3310- if isinstance(packages, basestring):
3311+ if isinstance(packages, six.string_types):
3312 cmd.append(packages)
3313 else:
3314 cmd.extend(packages)
3315@@ -181,7 +184,7 @@
3316 def apt_purge(packages, fatal=False):
3317 """Purge one or more packages"""
3318 cmd = ['apt-get', '--assume-yes', 'purge']
3319- if isinstance(packages, basestring):
3320+ if isinstance(packages, six.string_types):
3321 cmd.append(packages)
3322 else:
3323 cmd.extend(packages)
3324@@ -192,7 +195,7 @@
3325 def apt_hold(packages, fatal=False):
3326 """Hold one or more packages"""
3327 cmd = ['apt-mark', 'hold']
3328- if isinstance(packages, basestring):
3329+ if isinstance(packages, six.string_types):
3330 cmd.append(packages)
3331 else:
3332 cmd.extend(packages)
3333@@ -218,6 +221,7 @@
3334 pocket for the release.
3335 'cloud:' may be used to activate official cloud archive pockets,
3336 such as 'cloud:icehouse'
3337+ 'distro' may be used as a noop
3338
3339 @param key: A key to be added to the system's APT keyring and used
3340 to verify the signatures on packages. Ideally, this should be an
3341@@ -251,12 +255,14 @@
3342 release = lsb_release()['DISTRIB_CODENAME']
3343 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
3344 apt.write(PROPOSED_POCKET.format(release))
3345+ elif source == 'distro':
3346+ pass
3347 else:
3348- raise SourceConfigError("Unknown source: {!r}".format(source))
3349+ log("Unknown source: {!r}".format(source))
3350
3351 if key:
3352 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
3353- with NamedTemporaryFile() as key_file:
3354+ with NamedTemporaryFile('w+') as key_file:
3355 key_file.write(key)
3356 key_file.flush()
3357 key_file.seek(0)
3358@@ -293,14 +299,14 @@
3359 sources = safe_load((config(sources_var) or '').strip()) or []
3360 keys = safe_load((config(keys_var) or '').strip()) or None
3361
3362- if isinstance(sources, basestring):
3363+ if isinstance(sources, six.string_types):
3364 sources = [sources]
3365
3366 if keys is None:
3367 for source in sources:
3368 add_source(source, None)
3369 else:
3370- if isinstance(keys, basestring):
3371+ if isinstance(keys, six.string_types):
3372 keys = [keys]
3373
3374 if len(sources) != len(keys):
3375@@ -397,7 +403,7 @@
3376 while result is None or result == APT_NO_LOCK:
3377 try:
3378 result = subprocess.check_call(cmd, env=env)
3379- except subprocess.CalledProcessError, e:
3380+ except subprocess.CalledProcessError as e:
3381 retry_count = retry_count + 1
3382 if retry_count > APT_NO_LOCK_RETRY_COUNT:
3383 raise
3384
3385=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3386--- hooks/charmhelpers/fetch/archiveurl.py 2014-10-06 21:57:43 +0000
3387+++ hooks/charmhelpers/fetch/archiveurl.py 2014-12-11 17:56:54 +0000
3388@@ -1,8 +1,23 @@
3389 import os
3390-import urllib2
3391-from urllib import urlretrieve
3392-import urlparse
3393 import hashlib
3394+import re
3395+
3396+import six
3397+if six.PY3:
3398+ from urllib.request import (
3399+ build_opener, install_opener, urlopen, urlretrieve,
3400+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
3401+ )
3402+ from urllib.parse import urlparse, urlunparse, parse_qs
3403+ from urllib.error import URLError
3404+else:
3405+ from urllib import urlretrieve
3406+ from urllib2 import (
3407+ build_opener, install_opener, urlopen,
3408+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
3409+ URLError
3410+ )
3411+ from urlparse import urlparse, urlunparse, parse_qs
3412
3413 from charmhelpers.fetch import (
3414 BaseFetchHandler,
3415@@ -15,6 +30,24 @@
3416 from charmhelpers.core.host import mkdir, check_hash
3417
3418
3419+def splituser(host):
3420+ '''urllib.splituser(), but six's support of this seems broken'''
3421+ _userprog = re.compile('^(.*)@(.*)$')
3422+ match = _userprog.match(host)
3423+ if match:
3424+ return match.group(1, 2)
3425+ return None, host
3426+
3427+
3428+def splitpasswd(user):
3429+ '''urllib.splitpasswd(), but six's support of this is missing'''
3430+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
3431+ match = _passwdprog.match(user)
3432+ if match:
3433+ return match.group(1, 2)
3434+ return user, None
3435+
3436+
3437 class ArchiveUrlFetchHandler(BaseFetchHandler):
3438 """
3439 Handler to download archive files from arbitrary URLs.
3440@@ -42,20 +75,20 @@
3441 """
3442 # propogate all exceptions
3443 # URLError, OSError, etc
3444- proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
3445+ proto, netloc, path, params, query, fragment = urlparse(source)
3446 if proto in ('http', 'https'):
3447- auth, barehost = urllib2.splituser(netloc)
3448+ auth, barehost = splituser(netloc)
3449 if auth is not None:
3450- source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
3451- username, password = urllib2.splitpasswd(auth)
3452- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
3453+ source = urlunparse((proto, barehost, path, params, query, fragment))
3454+ username, password = splitpasswd(auth)
3455+ passman = HTTPPasswordMgrWithDefaultRealm()
3456 # Realm is set to None in add_password to force the username and password
3457 # to be used whatever the realm
3458 passman.add_password(None, source, username, password)
3459- authhandler = urllib2.HTTPBasicAuthHandler(passman)
3460- opener = urllib2.build_opener(authhandler)
3461- urllib2.install_opener(opener)
3462- response = urllib2.urlopen(source)
3463+ authhandler = HTTPBasicAuthHandler(passman)
3464+ opener = build_opener(authhandler)
3465+ install_opener(opener)
3466+ response = urlopen(source)
3467 try:
3468 with open(dest, 'w') as dest_file:
3469 dest_file.write(response.read())
3470@@ -91,17 +124,21 @@
3471 url_parts = self.parse_url(source)
3472 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3473 if not os.path.exists(dest_dir):
3474- mkdir(dest_dir, perms=0755)
3475+ mkdir(dest_dir, perms=0o755)
3476 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
3477 try:
3478 self.download(source, dld_file)
3479- except urllib2.URLError as e:
3480+ except URLError as e:
3481 raise UnhandledSource(e.reason)
3482 except OSError as e:
3483 raise UnhandledSource(e.strerror)
3484- options = urlparse.parse_qs(url_parts.fragment)
3485+ options = parse_qs(url_parts.fragment)
3486 for key, value in options.items():
3487- if key in hashlib.algorithms:
3488+ if not six.PY3:
3489+ algorithms = hashlib.algorithms
3490+ else:
3491+ algorithms = hashlib.algorithms_available
3492+ if key in algorithms:
3493 check_hash(dld_file, value, key)
3494 if checksum:
3495 check_hash(dld_file, checksum, hash_type)
3496
3497=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
3498--- hooks/charmhelpers/fetch/bzrurl.py 2014-07-28 14:38:51 +0000
3499+++ hooks/charmhelpers/fetch/bzrurl.py 2014-12-11 17:56:54 +0000
3500@@ -5,6 +5,10 @@
3501 )
3502 from charmhelpers.core.host import mkdir
3503
3504+import six
3505+if six.PY3:
3506+ raise ImportError('bzrlib does not support Python3')
3507+
3508 try:
3509 from bzrlib.branch import Branch
3510 except ImportError:
3511@@ -42,7 +46,7 @@
3512 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3513 branch_name)
3514 if not os.path.exists(dest_dir):
3515- mkdir(dest_dir, perms=0755)
3516+ mkdir(dest_dir, perms=0o755)
3517 try:
3518 self.branch(source, dest_dir)
3519 except OSError as e:
3520
3521=== added file 'hooks/charmhelpers/fetch/giturl.py'
3522--- hooks/charmhelpers/fetch/giturl.py 1970-01-01 00:00:00 +0000
3523+++ hooks/charmhelpers/fetch/giturl.py 2014-12-11 17:56:54 +0000
3524@@ -0,0 +1,51 @@
3525+import os
3526+from charmhelpers.fetch import (
3527+ BaseFetchHandler,
3528+ UnhandledSource
3529+)
3530+from charmhelpers.core.host import mkdir
3531+
3532+import six
3533+if six.PY3:
3534+ raise ImportError('GitPython does not support Python 3')
3535+
3536+try:
3537+ from git import Repo
3538+except ImportError:
3539+ from charmhelpers.fetch import apt_install
3540+ apt_install("python-git")
3541+ from git import Repo
3542+
3543+
3544+class GitUrlFetchHandler(BaseFetchHandler):
3545+ """Handler for git branches via generic and github URLs"""
3546+ def can_handle(self, source):
3547+ url_parts = self.parse_url(source)
3548+ # TODO (mattyw) no support for ssh git@ yet
3549+ if url_parts.scheme not in ('http', 'https', 'git'):
3550+ return False
3551+ else:
3552+ return True
3553+
3554+ def clone(self, source, dest, branch):
3555+ if not self.can_handle(source):
3556+ raise UnhandledSource("Cannot handle {}".format(source))
3557+
3558+ repo = Repo.clone_from(source, dest)
3559+ repo.git.checkout(branch)
3560+
3561+ def install(self, source, branch="master", dest=None):
3562+ url_parts = self.parse_url(source)
3563+ branch_name = url_parts.path.strip("/").split("/")[-1]
3564+ if dest:
3565+ dest_dir = os.path.join(dest, branch_name)
3566+ else:
3567+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3568+ branch_name)
3569+ if not os.path.exists(dest_dir):
3570+ mkdir(dest_dir, perms=0o755)
3571+ try:
3572+ self.clone(source, dest_dir, branch)
3573+ except OSError as e:
3574+ raise UnhandledSource(e.strerror)
3575+ return dest_dir
3576
3577=== modified file 'tests/charmhelpers/__init__.py'
3578--- tests/charmhelpers/__init__.py 2014-07-11 02:24:52 +0000
3579+++ tests/charmhelpers/__init__.py 2014-12-11 17:56:54 +0000
3580@@ -0,0 +1,22 @@
3581+# Bootstrap charm-helpers, installing its dependencies if necessary using
3582+# only standard libraries.
3583+import subprocess
3584+import sys
3585+
3586+try:
3587+ import six # flake8: noqa
3588+except ImportError:
3589+ if sys.version_info.major == 2:
3590+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
3591+ else:
3592+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
3593+ import six # flake8: noqa
3594+
3595+try:
3596+ import yaml # flake8: noqa
3597+except ImportError:
3598+ if sys.version_info.major == 2:
3599+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
3600+ else:
3601+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
3602+ import yaml # flake8: noqa
3603
3604=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
3605--- tests/charmhelpers/contrib/amulet/deployment.py 2014-10-06 21:57:43 +0000
3606+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-12-11 17:56:54 +0000
3607@@ -1,6 +1,6 @@
3608 import amulet
3609-
3610 import os
3611+import six
3612
3613
3614 class AmuletDeployment(object):
3615@@ -52,12 +52,12 @@
3616
3617 def _add_relations(self, relations):
3618 """Add all of the relations for the services."""
3619- for k, v in relations.iteritems():
3620+ for k, v in six.iteritems(relations):
3621 self.d.relate(k, v)
3622
3623 def _configure_services(self, configs):
3624 """Configure all of the services."""
3625- for service, config in configs.iteritems():
3626+ for service, config in six.iteritems(configs):
3627 self.d.configure(service, config)
3628
3629 def _deploy(self):
3630
3631=== modified file 'tests/charmhelpers/contrib/amulet/utils.py'
3632--- tests/charmhelpers/contrib/amulet/utils.py 2014-07-30 15:16:25 +0000
3633+++ tests/charmhelpers/contrib/amulet/utils.py 2014-12-11 17:56:54 +0000
3634@@ -5,6 +5,8 @@
3635 import sys
3636 import time
3637
3638+import six
3639+
3640
3641 class AmuletUtils(object):
3642 """Amulet utilities.
3643@@ -58,7 +60,7 @@
3644 Verify the specified services are running on the corresponding
3645 service units.
3646 """
3647- for k, v in commands.iteritems():
3648+ for k, v in six.iteritems(commands):
3649 for cmd in v:
3650 output, code = k.run(cmd)
3651 if code != 0:
3652@@ -100,11 +102,11 @@
3653 longs, or can be a function that evaluate a variable and returns a
3654 bool.
3655 """
3656- for k, v in expected.iteritems():
3657+ for k, v in six.iteritems(expected):
3658 if k in actual:
3659- if (isinstance(v, basestring) or
3660+ if (isinstance(v, six.string_types) or
3661 isinstance(v, bool) or
3662- isinstance(v, (int, long))):
3663+ isinstance(v, six.integer_types)):
3664 if v != actual[k]:
3665 return "{}:{}".format(k, actual[k])
3666 elif not v(actual[k]):
3667
3668=== modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py'
3669--- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-06 21:57:43 +0000
3670+++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-12-11 17:56:54 +0000
3671@@ -1,3 +1,4 @@
3672+import six
3673 from charmhelpers.contrib.amulet.deployment import (
3674 AmuletDeployment
3675 )
3676@@ -69,7 +70,7 @@
3677
3678 def _configure_services(self, configs):
3679 """Configure all of the services."""
3680- for service, config in configs.iteritems():
3681+ for service, config in six.iteritems(configs):
3682 self.d.configure(service, config)
3683
3684 def _get_openstack_release(self):
3685
3686=== modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py'
3687--- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-06 21:57:43 +0000
3688+++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-12-11 17:56:54 +0000
3689@@ -7,6 +7,8 @@
3690 import keystoneclient.v2_0 as keystone_client
3691 import novaclient.v1_1.client as nova_client
3692
3693+import six
3694+
3695 from charmhelpers.contrib.amulet.utils import (
3696 AmuletUtils
3697 )
3698@@ -60,7 +62,7 @@
3699 expected service catalog endpoints.
3700 """
3701 self.log.debug('actual: {}'.format(repr(actual)))
3702- for k, v in expected.iteritems():
3703+ for k, v in six.iteritems(expected):
3704 if k in actual:
3705 ret = self._validate_dict_data(expected[k][0], actual[k][0])
3706 if ret:

Subscribers

People subscribed via source and target branches