Merge lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages into lp:~openstack-charmers/charms/trusty/nova-compute-vmware/next

Proposed by Corey Bryant
Status: Merged
Merged at revision: 107
Proposed branch: lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages
Merge into: lp:~openstack-charmers/charms/trusty/nova-compute-vmware/next
Diff against target: 3400 lines (+1118/-524)
27 files modified
charm-helpers.yaml (+1/-0)
hooks/charmhelpers/__init__.py (+22/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+16/-7)
hooks/charmhelpers/contrib/network/ip.py (+52/-48)
hooks/charmhelpers/contrib/network/ufw.py (+189/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+2/-1)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+3/-1)
hooks/charmhelpers/contrib/openstack/context.py (+292/-232)
hooks/charmhelpers/contrib/openstack/ip.py (+41/-27)
hooks/charmhelpers/contrib/openstack/neutron.py (+20/-4)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+2/-2)
hooks/charmhelpers/contrib/openstack/templating.py (+5/-5)
hooks/charmhelpers/contrib/openstack/utils.py (+122/-13)
hooks/charmhelpers/contrib/python/packages.py (+77/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+83/-97)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+4/-4)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+1/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+3/-2)
hooks/charmhelpers/core/fstab.py (+10/-8)
hooks/charmhelpers/core/hookenv.py (+36/-16)
hooks/charmhelpers/core/host.py (+43/-18)
hooks/charmhelpers/core/services/helpers.py (+9/-5)
hooks/charmhelpers/core/templating.py (+2/-1)
hooks/charmhelpers/fetch/__init__.py (+13/-11)
hooks/charmhelpers/fetch/archiveurl.py (+53/-16)
hooks/charmhelpers/fetch/bzrurl.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+12/-5)
To merge this branch: bzr merge lp:~corey.bryant/charms/trusty/nova-compute-vmware/contrib.python.packages
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+244330@code.launchpad.net
To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers.yaml'
2--- charm-helpers.yaml 2014-10-23 17:30:13 +0000
3+++ charm-helpers.yaml 2014-12-11 17:56:59 +0000
4@@ -10,3 +10,4 @@
5 - cluster
6 - payload.execd
7 - contrib.network
8+ - contrib.python.packages
9
10=== added file 'hooks/charmhelpers/__init__.py'
11--- hooks/charmhelpers/__init__.py 1970-01-01 00:00:00 +0000
12+++ hooks/charmhelpers/__init__.py 2014-12-11 17:56:59 +0000
13@@ -0,0 +1,22 @@
14+# Bootstrap charm-helpers, installing its dependencies if necessary using
15+# only standard libraries.
16+import subprocess
17+import sys
18+
19+try:
20+ import six # flake8: noqa
21+except ImportError:
22+ if sys.version_info.major == 2:
23+ subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
24+ else:
25+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
26+ import six # flake8: noqa
27+
28+try:
29+ import yaml # flake8: noqa
30+except ImportError:
31+ if sys.version_info.major == 2:
32+ subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
33+ else:
34+ subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
35+ import yaml # flake8: noqa
36
37=== removed file 'hooks/charmhelpers/__init__.py'
38=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
39--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-23 17:30:13 +0000
40+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-12-11 17:56:59 +0000
41@@ -13,9 +13,10 @@
42
43 import subprocess
44 import os
45-
46 from socket import gethostname as get_unit_hostname
47
48+import six
49+
50 from charmhelpers.core.hookenv import (
51 log,
52 relation_ids,
53@@ -77,7 +78,7 @@
54 "show", resource
55 ]
56 try:
57- status = subprocess.check_output(cmd)
58+ status = subprocess.check_output(cmd).decode('UTF-8')
59 except subprocess.CalledProcessError:
60 return False
61 else:
62@@ -150,34 +151,42 @@
63 return False
64
65
66-def determine_api_port(public_port):
67+def determine_api_port(public_port, singlenode_mode=False):
68 '''
69 Determine correct API server listening port based on
70 existence of HTTPS reverse proxy and/or haproxy.
71
72 public_port: int: standard public port for given service
73
74+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
75+
76 returns: int: the correct listening port for the API service
77 '''
78 i = 0
79- if len(peer_units()) > 0 or is_clustered():
80+ if singlenode_mode:
81+ i += 1
82+ elif len(peer_units()) > 0 or is_clustered():
83 i += 1
84 if https():
85 i += 1
86 return public_port - (i * 10)
87
88
89-def determine_apache_port(public_port):
90+def determine_apache_port(public_port, singlenode_mode=False):
91 '''
92 Description: Determine correct apache listening port based on public IP +
93 state of the cluster.
94
95 public_port: int: standard public port for given service
96
97+ singlenode_mode: boolean: Shuffle ports when only a single unit is present
98+
99 returns: int: the correct listening port for the HAProxy service
100 '''
101 i = 0
102- if len(peer_units()) > 0 or is_clustered():
103+ if singlenode_mode:
104+ i += 1
105+ elif len(peer_units()) > 0 or is_clustered():
106 i += 1
107 return public_port - (i * 10)
108
109@@ -197,7 +206,7 @@
110 for setting in settings:
111 conf[setting] = config_get(setting)
112 missing = []
113- [missing.append(s) for s, v in conf.iteritems() if v is None]
114+ [missing.append(s) for s, v in six.iteritems(conf) if v is None]
115 if missing:
116 log('Insufficient config data to configure hacluster.', level=ERROR)
117 raise HAIncompleteConfig
118
119=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
120--- hooks/charmhelpers/contrib/network/ip.py 2014-10-23 17:30:13 +0000
121+++ hooks/charmhelpers/contrib/network/ip.py 2014-12-11 17:56:59 +0000
122@@ -1,14 +1,12 @@
123 import glob
124 import re
125 import subprocess
126-import sys
127
128 from functools import partial
129
130 from charmhelpers.core.hookenv import unit_get
131 from charmhelpers.fetch import apt_install
132 from charmhelpers.core.hookenv import (
133- ERROR,
134 log
135 )
136
137@@ -33,31 +31,28 @@
138 network)
139
140
141+def no_ip_found_error_out(network):
142+ errmsg = ("No IP address found in network: %s" % network)
143+ raise ValueError(errmsg)
144+
145+
146 def get_address_in_network(network, fallback=None, fatal=False):
147- """
148- Get an IPv4 or IPv6 address within the network from the host.
149+ """Get an IPv4 or IPv6 address within the network from the host.
150
151 :param network (str): CIDR presentation format. For example,
152 '192.168.1.0/24'.
153 :param fallback (str): If no address is found, return fallback.
154 :param fatal (boolean): If no address is found, fallback is not
155 set and fatal is True then exit(1).
156-
157 """
158-
159- def not_found_error_out():
160- log("No IP address found in network: %s" % network,
161- level=ERROR)
162- sys.exit(1)
163-
164 if network is None:
165 if fallback is not None:
166 return fallback
167+
168+ if fatal:
169+ no_ip_found_error_out(network)
170 else:
171- if fatal:
172- not_found_error_out()
173- else:
174- return None
175+ return None
176
177 _validate_cidr(network)
178 network = netaddr.IPNetwork(network)
179@@ -69,6 +64,7 @@
180 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
181 if cidr in network:
182 return str(cidr.ip)
183+
184 if network.version == 6 and netifaces.AF_INET6 in addresses:
185 for addr in addresses[netifaces.AF_INET6]:
186 if not addr['addr'].startswith('fe80'):
187@@ -81,20 +77,20 @@
188 return fallback
189
190 if fatal:
191- not_found_error_out()
192+ no_ip_found_error_out(network)
193
194 return None
195
196
197 def is_ipv6(address):
198- '''Determine whether provided address is IPv6 or not'''
199+ """Determine whether provided address is IPv6 or not."""
200 try:
201 address = netaddr.IPAddress(address)
202 except netaddr.AddrFormatError:
203 # probably a hostname - so not an address at all!
204 return False
205- else:
206- return address.version == 6
207+
208+ return address.version == 6
209
210
211 def is_address_in_network(network, address):
212@@ -112,11 +108,13 @@
213 except (netaddr.core.AddrFormatError, ValueError):
214 raise ValueError("Network (%s) is not in CIDR presentation format" %
215 network)
216+
217 try:
218 address = netaddr.IPAddress(address)
219 except (netaddr.core.AddrFormatError, ValueError):
220 raise ValueError("Address (%s) is not in correct presentation format" %
221 address)
222+
223 if address in network:
224 return True
225 else:
226@@ -146,6 +144,7 @@
227 return iface
228 else:
229 return addresses[netifaces.AF_INET][0][key]
230+
231 if address.version == 6 and netifaces.AF_INET6 in addresses:
232 for addr in addresses[netifaces.AF_INET6]:
233 if not addr['addr'].startswith('fe80'):
234@@ -159,40 +158,42 @@
235 return str(cidr).split('/')[1]
236 else:
237 return addr[key]
238+
239 return None
240
241
242 get_iface_for_address = partial(_get_for_address, key='iface')
243
244+
245 get_netmask_for_address = partial(_get_for_address, key='netmask')
246
247
248 def format_ipv6_addr(address):
249- """
250- IPv6 needs to be wrapped with [] in url link to parse correctly.
251+ """If address is IPv6, wrap it in '[]' otherwise return None.
252+
253+ This is required by most configuration files when specifying IPv6
254+ addresses.
255 """
256 if is_ipv6(address):
257- address = "[%s]" % address
258- else:
259- address = None
260+ return "[%s]" % address
261
262- return address
263+ return None
264
265
266 def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
267 fatal=True, exc_list=None):
268- """
269- Return the assigned IP address for a given interface, if any, or [].
270- """
271+ """Return the assigned IP address for a given interface, if any."""
272 # Extract nic if passed /dev/ethX
273 if '/' in iface:
274 iface = iface.split('/')[-1]
275+
276 if not exc_list:
277 exc_list = []
278+
279 try:
280 inet_num = getattr(netifaces, inet_type)
281 except AttributeError:
282- raise Exception('Unknown inet type ' + str(inet_type))
283+ raise Exception("Unknown inet type '%s'" % str(inet_type))
284
285 interfaces = netifaces.interfaces()
286 if inc_aliases:
287@@ -200,15 +201,18 @@
288 for _iface in interfaces:
289 if iface == _iface or _iface.split(':')[0] == iface:
290 ifaces.append(_iface)
291+
292 if fatal and not ifaces:
293 raise Exception("Invalid interface '%s'" % iface)
294+
295 ifaces.sort()
296 else:
297 if iface not in interfaces:
298 if fatal:
299- raise Exception("%s not found " % (iface))
300+ raise Exception("Interface '%s' not found " % (iface))
301 else:
302 return []
303+
304 else:
305 ifaces = [iface]
306
307@@ -219,10 +223,13 @@
308 for entry in net_info[inet_num]:
309 if 'addr' in entry and entry['addr'] not in exc_list:
310 addresses.append(entry['addr'])
311+
312 if fatal and not addresses:
313 raise Exception("Interface '%s' doesn't have any %s addresses." %
314 (iface, inet_type))
315- return addresses
316+
317+ return sorted(addresses)
318+
319
320 get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
321
322@@ -239,6 +246,7 @@
323 raw = re.match(ll_key, _addr)
324 if raw:
325 _addr = raw.group(1)
326+
327 if _addr == addr:
328 log("Address '%s' is configured on iface '%s'" %
329 (addr, iface))
330@@ -249,8 +257,9 @@
331
332
333 def sniff_iface(f):
334- """If no iface provided, inject net iface inferred from unit private
335- address.
336+ """Ensure decorated function is called with a value for iface.
337+
338+ If no iface provided, inject net iface inferred from unit private address.
339 """
340 def iface_sniffer(*args, **kwargs):
341 if not kwargs.get('iface', None):
342@@ -293,7 +302,7 @@
343 if global_addrs:
344 # Make sure any found global addresses are not temporary
345 cmd = ['ip', 'addr', 'show', iface]
346- out = subprocess.check_output(cmd)
347+ out = subprocess.check_output(cmd).decode('UTF-8')
348 if dynamic_only:
349 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
350 else:
351@@ -315,33 +324,28 @@
352 return addrs
353
354 if fatal:
355- raise Exception("Interface '%s' doesn't have a scope global "
356+ raise Exception("Interface '%s' does not have a scope global "
357 "non-temporary ipv6 address." % iface)
358
359 return []
360
361
362 def get_bridges(vnic_dir='/sys/devices/virtual/net'):
363- """
364- Return a list of bridges on the system or []
365- """
366- b_rgex = vnic_dir + '/*/bridge'
367- return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
368+ """Return a list of bridges on the system."""
369+ b_regex = "%s/*/bridge" % vnic_dir
370+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
371
372
373 def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
374- """
375- Return a list of nics comprising a given bridge on the system or []
376- """
377- brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
378- return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
379+ """Return a list of nics comprising a given bridge on the system."""
380+ brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
381+ return [x.split('/')[-1] for x in glob.glob(brif_regex)]
382
383
384 def is_bridge_member(nic):
385- """
386- Check if a given nic is a member of a bridge
387- """
388+ """Check if a given nic is a member of a bridge."""
389 for bridge in get_bridges():
390 if nic in get_bridge_nics(bridge):
391 return True
392+
393 return False
394
395=== added file 'hooks/charmhelpers/contrib/network/ufw.py'
396--- hooks/charmhelpers/contrib/network/ufw.py 1970-01-01 00:00:00 +0000
397+++ hooks/charmhelpers/contrib/network/ufw.py 2014-12-11 17:56:59 +0000
398@@ -0,0 +1,189 @@
399+"""
400+This module contains helpers to add and remove ufw rules.
401+
402+Examples:
403+
404+- open SSH port for subnet 10.0.3.0/24:
405+
406+ >>> from charmhelpers.contrib.network import ufw
407+ >>> ufw.enable()
408+ >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
409+
410+- open service by name as defined in /etc/services:
411+
412+ >>> from charmhelpers.contrib.network import ufw
413+ >>> ufw.enable()
414+ >>> ufw.service('ssh', 'open')
415+
416+- close service by port number:
417+
418+ >>> from charmhelpers.contrib.network import ufw
419+ >>> ufw.enable()
420+ >>> ufw.service('4949', 'close') # munin
421+"""
422+
423+__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
424+
425+import re
426+import os
427+import subprocess
428+from charmhelpers.core import hookenv
429+
430+
431+def is_enabled():
432+ """
433+ Check if `ufw` is enabled
434+
435+ :returns: True if ufw is enabled
436+ """
437+ output = subprocess.check_output(['ufw', 'status'],
438+ env={'LANG': 'en_US',
439+ 'PATH': os.environ['PATH']})
440+
441+ m = re.findall(r'^Status: active\n', output, re.M)
442+
443+ return len(m) >= 1
444+
445+
446+def enable():
447+ """
448+ Enable ufw
449+
450+ :returns: True if ufw is successfully enabled
451+ """
452+ if is_enabled():
453+ return True
454+
455+ output = subprocess.check_output(['ufw', 'enable'],
456+ env={'LANG': 'en_US',
457+ 'PATH': os.environ['PATH']})
458+
459+ m = re.findall('^Firewall is active and enabled on system startup\n',
460+ output, re.M)
461+ hookenv.log(output, level='DEBUG')
462+
463+ if len(m) == 0:
464+ hookenv.log("ufw couldn't be enabled", level='WARN')
465+ return False
466+ else:
467+ hookenv.log("ufw enabled", level='INFO')
468+ return True
469+
470+
471+def disable():
472+ """
473+ Disable ufw
474+
475+ :returns: True if ufw is successfully disabled
476+ """
477+ if not is_enabled():
478+ return True
479+
480+ output = subprocess.check_output(['ufw', 'disable'],
481+ env={'LANG': 'en_US',
482+ 'PATH': os.environ['PATH']})
483+
484+ m = re.findall(r'^Firewall stopped and disabled on system startup\n',
485+ output, re.M)
486+ hookenv.log(output, level='DEBUG')
487+
488+ if len(m) == 0:
489+ hookenv.log("ufw couldn't be disabled", level='WARN')
490+ return False
491+ else:
492+ hookenv.log("ufw disabled", level='INFO')
493+ return True
494+
495+
496+def modify_access(src, dst='any', port=None, proto=None, action='allow'):
497+ """
498+ Grant access to an address or subnet
499+
500+ :param src: address (e.g. 192.168.1.234) or subnet
501+ (e.g. 192.168.1.0/24).
502+ :param dst: destiny of the connection, if the machine has multiple IPs and
503+ connections to only one of those have to accepted this is the
504+ field has to be set.
505+ :param port: destiny port
506+ :param proto: protocol (tcp or udp)
507+ :param action: `allow` or `delete`
508+ """
509+ if not is_enabled():
510+ hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
511+ return
512+
513+ if action == 'delete':
514+ cmd = ['ufw', 'delete', 'allow']
515+ else:
516+ cmd = ['ufw', action]
517+
518+ if src is not None:
519+ cmd += ['from', src]
520+
521+ if dst is not None:
522+ cmd += ['to', dst]
523+
524+ if port is not None:
525+ cmd += ['port', port]
526+
527+ if proto is not None:
528+ cmd += ['proto', proto]
529+
530+ hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
531+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
532+ (stdout, stderr) = p.communicate()
533+
534+ hookenv.log(stdout, level='INFO')
535+
536+ if p.returncode != 0:
537+ hookenv.log(stderr, level='ERROR')
538+ hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
539+ p.returncode),
540+ level='ERROR')
541+
542+
543+def grant_access(src, dst='any', port=None, proto=None):
544+ """
545+ Grant access to an address or subnet
546+
547+ :param src: address (e.g. 192.168.1.234) or subnet
548+ (e.g. 192.168.1.0/24).
549+ :param dst: destiny of the connection, if the machine has multiple IPs and
550+ connections to only one of those have to accepted this is the
551+ field has to be set.
552+ :param port: destiny port
553+ :param proto: protocol (tcp or udp)
554+ """
555+ return modify_access(src, dst=dst, port=port, proto=proto, action='allow')
556+
557+
558+def revoke_access(src, dst='any', port=None, proto=None):
559+ """
560+ Revoke access to an address or subnet
561+
562+ :param src: address (e.g. 192.168.1.234) or subnet
563+ (e.g. 192.168.1.0/24).
564+ :param dst: destiny of the connection, if the machine has multiple IPs and
565+ connections to only one of those have to accepted this is the
566+ field has to be set.
567+ :param port: destiny port
568+ :param proto: protocol (tcp or udp)
569+ """
570+ return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
571+
572+
573+def service(name, action):
574+ """
575+ Open/close access to a service
576+
577+ :param name: could be a service name defined in `/etc/services` or a port
578+ number.
579+ :param action: `open` or `close`
580+ """
581+ if action == 'open':
582+ subprocess.check_output(['ufw', 'allow', name])
583+ elif action == 'close':
584+ subprocess.check_output(['ufw', 'delete', 'allow', name])
585+ else:
586+ raise Exception(("'{}' not supported, use 'allow' "
587+ "or 'delete'").format(action))
588
589=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
590--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-23 17:30:13 +0000
591+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-12-11 17:56:59 +0000
592@@ -1,3 +1,4 @@
593+import six
594 from charmhelpers.contrib.amulet.deployment import (
595 AmuletDeployment
596 )
597@@ -69,7 +70,7 @@
598
599 def _configure_services(self, configs):
600 """Configure all of the services."""
601- for service, config in configs.iteritems():
602+ for service, config in six.iteritems(configs):
603 self.d.configure(service, config)
604
605 def _get_openstack_release(self):
606
607=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
608--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-23 17:30:13 +0000
609+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-12-11 17:56:59 +0000
610@@ -7,6 +7,8 @@
611 import keystoneclient.v2_0 as keystone_client
612 import novaclient.v1_1.client as nova_client
613
614+import six
615+
616 from charmhelpers.contrib.amulet.utils import (
617 AmuletUtils
618 )
619@@ -60,7 +62,7 @@
620 expected service catalog endpoints.
621 """
622 self.log.debug('actual: {}'.format(repr(actual)))
623- for k, v in expected.iteritems():
624+ for k, v in six.iteritems(expected):
625 if k in actual:
626 ret = self._validate_dict_data(expected[k][0], actual[k][0])
627 if ret:
628
629=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
630--- hooks/charmhelpers/contrib/openstack/context.py 2014-10-23 17:30:13 +0000
631+++ hooks/charmhelpers/contrib/openstack/context.py 2014-12-11 17:56:59 +0000
632@@ -1,18 +1,15 @@
633 import json
634 import os
635 import time
636-
637 from base64 import b64decode
638+from subprocess import check_call
639
640-from subprocess import (
641- check_call
642-)
643+import six
644
645 from charmhelpers.fetch import (
646 apt_install,
647 filter_installed_packages,
648 )
649-
650 from charmhelpers.core.hookenv import (
651 config,
652 is_relation_made,
653@@ -24,44 +21,40 @@
654 relation_set,
655 unit_get,
656 unit_private_ip,
657+ DEBUG,
658+ INFO,
659+ WARNING,
660 ERROR,
661- DEBUG
662 )
663-
664 from charmhelpers.core.host import (
665 mkdir,
666- write_file
667+ write_file,
668 )
669-
670 from charmhelpers.contrib.hahelpers.cluster import (
671 determine_apache_port,
672 determine_api_port,
673 https,
674- is_clustered
675+ is_clustered,
676 )
677-
678 from charmhelpers.contrib.hahelpers.apache import (
679 get_cert,
680 get_ca_cert,
681 install_ca_cert,
682 )
683-
684 from charmhelpers.contrib.openstack.neutron import (
685 neutron_plugin_attribute,
686 )
687-
688 from charmhelpers.contrib.network.ip import (
689 get_address_in_network,
690 get_ipv6_addr,
691 get_netmask_for_address,
692 format_ipv6_addr,
693- is_address_in_network
694+ is_address_in_network,
695 )
696+from charmhelpers.contrib.openstack.utils import get_host_ip
697
698-from charmhelpers.contrib.openstack.utils import (
699- get_host_ip,
700-)
701 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
702+ADDRESS_TYPES = ['admin', 'internal', 'public']
703
704
705 class OSContextError(Exception):
706@@ -69,7 +62,7 @@
707
708
709 def ensure_packages(packages):
710- '''Install but do not upgrade required plugin packages'''
711+ """Install but do not upgrade required plugin packages."""
712 required = filter_installed_packages(packages)
713 if required:
714 apt_install(required, fatal=True)
715@@ -77,20 +70,27 @@
716
717 def context_complete(ctxt):
718 _missing = []
719- for k, v in ctxt.iteritems():
720+ for k, v in six.iteritems(ctxt):
721 if v is None or v == '':
722 _missing.append(k)
723+
724 if _missing:
725- log('Missing required data: %s' % ' '.join(_missing), level='INFO')
726+ log('Missing required data: %s' % ' '.join(_missing), level=INFO)
727 return False
728+
729 return True
730
731
732 def config_flags_parser(config_flags):
733+ """Parses config flags string into dict.
734+
735+ The provided config_flags string may be a list of comma-separated values
736+ which themselves may be comma-separated list of values.
737+ """
738 if config_flags.find('==') >= 0:
739- log("config_flags is not in expected format (key=value)",
740- level=ERROR)
741+ log("config_flags is not in expected format (key=value)", level=ERROR)
742 raise OSContextError
743+
744 # strip the following from each value.
745 post_strippers = ' ,'
746 # we strip any leading/trailing '=' or ' ' from the string then
747@@ -98,7 +98,7 @@
748 split = config_flags.strip(' =').split('=')
749 limit = len(split)
750 flags = {}
751- for i in xrange(0, limit - 1):
752+ for i in range(0, limit - 1):
753 current = split[i]
754 next = split[i + 1]
755 vindex = next.rfind(',')
756@@ -113,17 +113,18 @@
757 # if this not the first entry, expect an embedded key.
758 index = current.rfind(',')
759 if index < 0:
760- log("invalid config value(s) at index %s" % (i),
761- level=ERROR)
762+ log("Invalid config value(s) at index %s" % (i), level=ERROR)
763 raise OSContextError
764 key = current[index + 1:]
765
766 # Add to collection.
767 flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
768+
769 return flags
770
771
772 class OSContextGenerator(object):
773+ """Base class for all context generators."""
774 interfaces = []
775
776 def __call__(self):
777@@ -135,11 +136,11 @@
778
779 def __init__(self,
780 database=None, user=None, relation_prefix=None, ssl_dir=None):
781- '''
782- Allows inspecting relation for settings prefixed with relation_prefix.
783- This is useful for parsing access for multiple databases returned via
784- the shared-db interface (eg, nova_password, quantum_password)
785- '''
786+ """Allows inspecting relation for settings prefixed with
787+ relation_prefix. This is useful for parsing access for multiple
788+ databases returned via the shared-db interface (eg, nova_password,
789+ quantum_password)
790+ """
791 self.relation_prefix = relation_prefix
792 self.database = database
793 self.user = user
794@@ -149,9 +150,8 @@
795 self.database = self.database or config('database')
796 self.user = self.user or config('database-user')
797 if None in [self.database, self.user]:
798- log('Could not generate shared_db context. '
799- 'Missing required charm config options. '
800- '(database name and user)')
801+ log("Could not generate shared_db context. Missing required charm "
802+ "config options. (database name and user)", level=ERROR)
803 raise OSContextError
804
805 ctxt = {}
806@@ -204,23 +204,24 @@
807 def __call__(self):
808 self.database = self.database or config('database')
809 if self.database is None:
810- log('Could not generate postgresql_db context. '
811- 'Missing required charm config options. '
812- '(database name)')
813+ log('Could not generate postgresql_db context. Missing required '
814+ 'charm config options. (database name)', level=ERROR)
815 raise OSContextError
816+
817 ctxt = {}
818-
819 for rid in relation_ids(self.interfaces[0]):
820 for unit in related_units(rid):
821- ctxt = {
822- 'database_host': relation_get('host', rid=rid, unit=unit),
823- 'database': self.database,
824- 'database_user': relation_get('user', rid=rid, unit=unit),
825- 'database_password': relation_get('password', rid=rid, unit=unit),
826- 'database_type': 'postgresql',
827- }
828+ rel_host = relation_get('host', rid=rid, unit=unit)
829+ rel_user = relation_get('user', rid=rid, unit=unit)
830+ rel_passwd = relation_get('password', rid=rid, unit=unit)
831+ ctxt = {'database_host': rel_host,
832+ 'database': self.database,
833+ 'database_user': rel_user,
834+ 'database_password': rel_passwd,
835+ 'database_type': 'postgresql'}
836 if context_complete(ctxt):
837 return ctxt
838+
839 return {}
840
841
842@@ -229,23 +230,29 @@
843 ca_path = os.path.join(ssl_dir, 'db-client.ca')
844 with open(ca_path, 'w') as fh:
845 fh.write(b64decode(rdata['ssl_ca']))
846+
847 ctxt['database_ssl_ca'] = ca_path
848 elif 'ssl_ca' in rdata:
849- log("Charm not setup for ssl support but ssl ca found")
850+ log("Charm not setup for ssl support but ssl ca found", level=INFO)
851 return ctxt
852+
853 if 'ssl_cert' in rdata:
854 cert_path = os.path.join(
855 ssl_dir, 'db-client.cert')
856 if not os.path.exists(cert_path):
857- log("Waiting 1m for ssl client cert validity")
858+ log("Waiting 1m for ssl client cert validity", level=INFO)
859 time.sleep(60)
860+
861 with open(cert_path, 'w') as fh:
862 fh.write(b64decode(rdata['ssl_cert']))
863+
864 ctxt['database_ssl_cert'] = cert_path
865 key_path = os.path.join(ssl_dir, 'db-client.key')
866 with open(key_path, 'w') as fh:
867 fh.write(b64decode(rdata['ssl_key']))
868+
869 ctxt['database_ssl_key'] = key_path
870+
871 return ctxt
872
873
874@@ -253,9 +260,8 @@
875 interfaces = ['identity-service']
876
877 def __call__(self):
878- log('Generating template context for identity-service')
879+ log('Generating template context for identity-service', level=DEBUG)
880 ctxt = {}
881-
882 for rid in relation_ids('identity-service'):
883 for unit in related_units(rid):
884 rdata = relation_get(rid=rid, unit=unit)
885@@ -263,26 +269,24 @@
886 serv_host = format_ipv6_addr(serv_host) or serv_host
887 auth_host = rdata.get('auth_host')
888 auth_host = format_ipv6_addr(auth_host) or auth_host
889-
890- ctxt = {
891- 'service_port': rdata.get('service_port'),
892- 'service_host': serv_host,
893- 'auth_host': auth_host,
894- 'auth_port': rdata.get('auth_port'),
895- 'admin_tenant_name': rdata.get('service_tenant'),
896- 'admin_user': rdata.get('service_username'),
897- 'admin_password': rdata.get('service_password'),
898- 'service_protocol':
899- rdata.get('service_protocol') or 'http',
900- 'auth_protocol':
901- rdata.get('auth_protocol') or 'http',
902- }
903+ svc_protocol = rdata.get('service_protocol') or 'http'
904+ auth_protocol = rdata.get('auth_protocol') or 'http'
905+ ctxt = {'service_port': rdata.get('service_port'),
906+ 'service_host': serv_host,
907+ 'auth_host': auth_host,
908+ 'auth_port': rdata.get('auth_port'),
909+ 'admin_tenant_name': rdata.get('service_tenant'),
910+ 'admin_user': rdata.get('service_username'),
911+ 'admin_password': rdata.get('service_password'),
912+ 'service_protocol': svc_protocol,
913+ 'auth_protocol': auth_protocol}
914 if context_complete(ctxt):
915 # NOTE(jamespage) this is required for >= icehouse
916 # so a missing value just indicates keystone needs
917 # upgrading
918 ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
919 return ctxt
920+
921 return {}
922
923
924@@ -295,21 +299,23 @@
925 self.interfaces = [rel_name]
926
927 def __call__(self):
928- log('Generating template context for amqp')
929+ log('Generating template context for amqp', level=DEBUG)
930 conf = config()
931- user_setting = 'rabbit-user'
932- vhost_setting = 'rabbit-vhost'
933 if self.relation_prefix:
934- user_setting = self.relation_prefix + '-rabbit-user'
935- vhost_setting = self.relation_prefix + '-rabbit-vhost'
936+ user_setting = '%s-rabbit-user' % (self.relation_prefix)
937+ vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix)
938+ else:
939+ user_setting = 'rabbit-user'
940+ vhost_setting = 'rabbit-vhost'
941
942 try:
943 username = conf[user_setting]
944 vhost = conf[vhost_setting]
945 except KeyError as e:
946- log('Could not generate shared_db context. '
947- 'Missing required charm config options: %s.' % e)
948+ log('Could not generate shared_db context. Missing required charm '
949+ 'config options: %s.' % e, level=ERROR)
950 raise OSContextError
951+
952 ctxt = {}
953 for rid in relation_ids(self.rel_name):
954 ha_vip_only = False
955@@ -323,6 +329,7 @@
956 host = relation_get('private-address', rid=rid, unit=unit)
957 host = format_ipv6_addr(host) or host
958 ctxt['rabbitmq_host'] = host
959+
960 ctxt.update({
961 'rabbitmq_user': username,
962 'rabbitmq_password': relation_get('password', rid=rid,
963@@ -333,6 +340,7 @@
964 ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
965 if ssl_port:
966 ctxt['rabbit_ssl_port'] = ssl_port
967+
968 ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
969 if ssl_ca:
970 ctxt['rabbit_ssl_ca'] = ssl_ca
971@@ -346,41 +354,45 @@
972 if context_complete(ctxt):
973 if 'rabbit_ssl_ca' in ctxt:
974 if not self.ssl_dir:
975- log(("Charm not setup for ssl support "
976- "but ssl ca found"))
977+ log("Charm not setup for ssl support but ssl ca "
978+ "found", level=INFO)
979 break
980+
981 ca_path = os.path.join(
982 self.ssl_dir, 'rabbit-client-ca.pem')
983 with open(ca_path, 'w') as fh:
984 fh.write(b64decode(ctxt['rabbit_ssl_ca']))
985 ctxt['rabbit_ssl_ca'] = ca_path
986+
987 # Sufficient information found = break out!
988 break
989+
990 # Used for active/active rabbitmq >= grizzly
991- if ('clustered' not in ctxt or ha_vip_only) \
992- and len(related_units(rid)) > 1:
993+ if (('clustered' not in ctxt or ha_vip_only) and
994+ len(related_units(rid)) > 1):
995 rabbitmq_hosts = []
996 for unit in related_units(rid):
997 host = relation_get('private-address', rid=rid, unit=unit)
998 host = format_ipv6_addr(host) or host
999 rabbitmq_hosts.append(host)
1000- ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
1001+
1002+ ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts))
1003+
1004 if not context_complete(ctxt):
1005 return {}
1006- else:
1007- return ctxt
1008+
1009+ return ctxt
1010
1011
1012 class CephContext(OSContextGenerator):
1013+ """Generates context for /etc/ceph/ceph.conf templates."""
1014 interfaces = ['ceph']
1015
1016 def __call__(self):
1017- '''This generates context for /etc/ceph/ceph.conf templates'''
1018 if not relation_ids('ceph'):
1019 return {}
1020
1021- log('Generating template context for ceph')
1022-
1023+ log('Generating template context for ceph', level=DEBUG)
1024 mon_hosts = []
1025 auth = None
1026 key = None
1027@@ -389,18 +401,18 @@
1028 for unit in related_units(rid):
1029 auth = relation_get('auth', rid=rid, unit=unit)
1030 key = relation_get('key', rid=rid, unit=unit)
1031- ceph_addr = \
1032- relation_get('ceph-public-address', rid=rid, unit=unit) or \
1033- relation_get('private-address', rid=rid, unit=unit)
1034+ ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
1035+ unit=unit)
1036+ unit_priv_addr = relation_get('private-address', rid=rid,
1037+ unit=unit)
1038+ ceph_addr = ceph_pub_addr or unit_priv_addr
1039 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1040 mon_hosts.append(ceph_addr)
1041
1042- ctxt = {
1043- 'mon_hosts': ' '.join(mon_hosts),
1044- 'auth': auth,
1045- 'key': key,
1046- 'use_syslog': use_syslog
1047- }
1048+ ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
1049+ 'auth': auth,
1050+ 'key': key,
1051+ 'use_syslog': use_syslog}
1052
1053 if not os.path.isdir('/etc/ceph'):
1054 os.mkdir('/etc/ceph')
1055@@ -409,79 +421,68 @@
1056 return {}
1057
1058 ensure_packages(['ceph-common'])
1059-
1060 return ctxt
1061
1062
1063-ADDRESS_TYPES = ['admin', 'internal', 'public']
1064-
1065-
1066 class HAProxyContext(OSContextGenerator):
1067+ """Provides half a context for the haproxy template, which describes
1068+ all peers to be included in the cluster. Each charm needs to include
1069+ its own context generator that describes the port mapping.
1070+ """
1071 interfaces = ['cluster']
1072
1073+ def __init__(self, singlenode_mode=False):
1074+ self.singlenode_mode = singlenode_mode
1075+
1076 def __call__(self):
1077- '''
1078- Builds half a context for the haproxy template, which describes
1079- all peers to be included in the cluster. Each charm needs to include
1080- its own context generator that describes the port mapping.
1081- '''
1082- if not relation_ids('cluster'):
1083+ if not relation_ids('cluster') and not self.singlenode_mode:
1084 return {}
1085
1086- l_unit = local_unit().replace('/', '-')
1087-
1088 if config('prefer-ipv6'):
1089 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1090 else:
1091 addr = get_host_ip(unit_get('private-address'))
1092
1093+ l_unit = local_unit().replace('/', '-')
1094 cluster_hosts = {}
1095
1096 # NOTE(jamespage): build out map of configured network endpoints
1097 # and associated backends
1098 for addr_type in ADDRESS_TYPES:
1099- laddr = get_address_in_network(
1100- config('os-{}-network'.format(addr_type)))
1101+ cfg_opt = 'os-{}-network'.format(addr_type)
1102+ laddr = get_address_in_network(config(cfg_opt))
1103 if laddr:
1104- cluster_hosts[laddr] = {}
1105- cluster_hosts[laddr]['network'] = "{}/{}".format(
1106- laddr,
1107- get_netmask_for_address(laddr)
1108- )
1109- cluster_hosts[laddr]['backends'] = {}
1110- cluster_hosts[laddr]['backends'][l_unit] = laddr
1111+ netmask = get_netmask_for_address(laddr)
1112+ cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
1113+ netmask),
1114+ 'backends': {l_unit: laddr}}
1115 for rid in relation_ids('cluster'):
1116 for unit in related_units(rid):
1117- _unit = unit.replace('/', '-')
1118 _laddr = relation_get('{}-address'.format(addr_type),
1119 rid=rid, unit=unit)
1120 if _laddr:
1121+ _unit = unit.replace('/', '-')
1122 cluster_hosts[laddr]['backends'][_unit] = _laddr
1123
1124 # NOTE(jamespage) no split configurations found, just use
1125 # private addresses
1126 if not cluster_hosts:
1127- cluster_hosts[addr] = {}
1128- cluster_hosts[addr]['network'] = "{}/{}".format(
1129- addr,
1130- get_netmask_for_address(addr)
1131- )
1132- cluster_hosts[addr]['backends'] = {}
1133- cluster_hosts[addr]['backends'][l_unit] = addr
1134+ netmask = get_netmask_for_address(addr)
1135+ cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
1136+ 'backends': {l_unit: addr}}
1137 for rid in relation_ids('cluster'):
1138 for unit in related_units(rid):
1139- _unit = unit.replace('/', '-')
1140 _laddr = relation_get('private-address',
1141 rid=rid, unit=unit)
1142 if _laddr:
1143+ _unit = unit.replace('/', '-')
1144 cluster_hosts[addr]['backends'][_unit] = _laddr
1145
1146- ctxt = {
1147- 'frontends': cluster_hosts,
1148- }
1149+ ctxt = {'frontends': cluster_hosts}
1150
1151 if config('haproxy-server-timeout'):
1152 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1153+
1154 if config('haproxy-client-timeout'):
1155 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1156
1157@@ -495,13 +496,18 @@
1158 ctxt['stat_port'] = ':8888'
1159
1160 for frontend in cluster_hosts:
1161- if len(cluster_hosts[frontend]['backends']) > 1:
1162+ if (len(cluster_hosts[frontend]['backends']) > 1 or
1163+ self.singlenode_mode):
1164 # Enable haproxy when we have enough peers.
1165- log('Ensuring haproxy enabled in /etc/default/haproxy.')
1166+ log('Ensuring haproxy enabled in /etc/default/haproxy.',
1167+ level=DEBUG)
1168 with open('/etc/default/haproxy', 'w') as out:
1169 out.write('ENABLED=1\n')
1170+
1171 return ctxt
1172- log('HAProxy context is incomplete, this unit has no peers.')
1173+
1174+ log('HAProxy context is incomplete, this unit has no peers.',
1175+ level=INFO)
1176 return {}
1177
1178
1179@@ -509,29 +515,28 @@
1180 interfaces = ['image-service']
1181
1182 def __call__(self):
1183- '''
1184- Obtains the glance API server from the image-service relation. Useful
1185- in nova and cinder (currently).
1186- '''
1187- log('Generating template context for image-service.')
1188+ """Obtains the glance API server from the image-service relation.
1189+ Useful in nova and cinder (currently).
1190+ """
1191+ log('Generating template context for image-service.', level=DEBUG)
1192 rids = relation_ids('image-service')
1193 if not rids:
1194 return {}
1195+
1196 for rid in rids:
1197 for unit in related_units(rid):
1198 api_server = relation_get('glance-api-server',
1199 rid=rid, unit=unit)
1200 if api_server:
1201 return {'glance_api_servers': api_server}
1202- log('ImageService context is incomplete. '
1203- 'Missing required relation data.')
1204+
1205+ log("ImageService context is incomplete. Missing required relation "
1206+ "data.", level=INFO)
1207 return {}
1208
1209
1210 class ApacheSSLContext(OSContextGenerator):
1211-
1212- """
1213- Generates a context for an apache vhost configuration that configures
1214+ """Generates a context for an apache vhost configuration that configures
1215 HTTPS reverse proxying for one or many endpoints. Generated context
1216 looks something like::
1217
1218@@ -565,6 +570,7 @@
1219 else:
1220 cert_filename = 'cert'
1221 key_filename = 'key'
1222+
1223 write_file(path=os.path.join(ssl_dir, cert_filename),
1224 content=b64decode(cert))
1225 write_file(path=os.path.join(ssl_dir, key_filename),
1226@@ -576,7 +582,8 @@
1227 install_ca_cert(b64decode(ca_cert))
1228
1229 def canonical_names(self):
1230- '''Figure out which canonical names clients will access this service'''
1231+ """Figure out which canonical names clients will access this service.
1232+ """
1233 cns = []
1234 for r_id in relation_ids('identity-service'):
1235 for unit in related_units(r_id):
1236@@ -584,55 +591,80 @@
1237 for k in rdata:
1238 if k.startswith('ssl_key_'):
1239 cns.append(k.lstrip('ssl_key_'))
1240- return list(set(cns))
1241+
1242+ return sorted(list(set(cns)))
1243+
1244+ def get_network_addresses(self):
1245+ """For each network configured, return corresponding address and vip
1246+ (if available).
1247+
1248+ Returns a list of tuples of the form:
1249+
1250+ [(address_in_net_a, vip_in_net_a),
1251+ (address_in_net_b, vip_in_net_b),
1252+ ...]
1253+
1254+ or, if no vip(s) available:
1255+
1256+ [(address_in_net_a, address_in_net_a),
1257+ (address_in_net_b, address_in_net_b),
1258+ ...]
1259+ """
1260+ addresses = []
1261+ if config('vip'):
1262+ vips = config('vip').split()
1263+ else:
1264+ vips = []
1265+
1266+ for net_type in ['os-internal-network', 'os-admin-network',
1267+ 'os-public-network']:
1268+ addr = get_address_in_network(config(net_type),
1269+ unit_get('private-address'))
1270+ if len(vips) > 1 and is_clustered():
1271+ if not config(net_type):
1272+ log("Multiple networks configured but net_type "
1273+ "is None (%s)." % net_type, level=WARNING)
1274+ continue
1275+
1276+ for vip in vips:
1277+ if is_address_in_network(config(net_type), vip):
1278+ addresses.append((addr, vip))
1279+ break
1280+
1281+ elif is_clustered() and config('vip'):
1282+ addresses.append((addr, config('vip')))
1283+ else:
1284+ addresses.append((addr, addr))
1285+
1286+ return sorted(addresses)
1287
1288 def __call__(self):
1289- if isinstance(self.external_ports, basestring):
1290+ if isinstance(self.external_ports, six.string_types):
1291 self.external_ports = [self.external_ports]
1292- if (not self.external_ports or not https()):
1293+
1294+ if not self.external_ports or not https():
1295 return {}
1296
1297 self.configure_ca()
1298 self.enable_modules()
1299
1300- ctxt = {
1301- 'namespace': self.service_namespace,
1302- 'endpoints': [],
1303- 'ext_ports': []
1304- }
1305+ ctxt = {'namespace': self.service_namespace,
1306+ 'endpoints': [],
1307+ 'ext_ports': []}
1308
1309 for cn in self.canonical_names():
1310 self.configure_cert(cn)
1311
1312- addresses = []
1313- vips = []
1314- if config('vip'):
1315- vips = config('vip').split()
1316-
1317- for network_type in ['os-internal-network',
1318- 'os-admin-network',
1319- 'os-public-network']:
1320- address = get_address_in_network(config(network_type),
1321- unit_get('private-address'))
1322- if len(vips) > 0 and is_clustered():
1323- for vip in vips:
1324- if is_address_in_network(config(network_type),
1325- vip):
1326- addresses.append((address, vip))
1327- break
1328- elif is_clustered():
1329- addresses.append((address, config('vip')))
1330- else:
1331- addresses.append((address, address))
1332-
1333- for address, endpoint in set(addresses):
1334+ addresses = self.get_network_addresses()
1335+ for address, endpoint in sorted(set(addresses)):
1336 for api_port in self.external_ports:
1337 ext_port = determine_apache_port(api_port)
1338 int_port = determine_api_port(api_port)
1339 portmap = (address, endpoint, int(ext_port), int(int_port))
1340 ctxt['endpoints'].append(portmap)
1341 ctxt['ext_ports'].append(int(ext_port))
1342- ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
1343+
1344+ ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports'])))
1345 return ctxt
1346
1347
1348@@ -649,21 +681,23 @@
1349
1350 @property
1351 def packages(self):
1352- return neutron_plugin_attribute(
1353- self.plugin, 'packages', self.network_manager)
1354+ return neutron_plugin_attribute(self.plugin, 'packages',
1355+ self.network_manager)
1356
1357 @property
1358 def neutron_security_groups(self):
1359 return None
1360
1361 def _ensure_packages(self):
1362- [ensure_packages(pkgs) for pkgs in self.packages]
1363+ for pkgs in self.packages:
1364+ ensure_packages(pkgs)
1365
1366 def _save_flag_file(self):
1367 if self.network_manager == 'quantum':
1368 _file = '/etc/nova/quantum_plugin.conf'
1369 else:
1370 _file = '/etc/nova/neutron_plugin.conf'
1371+
1372 with open(_file, 'wb') as out:
1373 out.write(self.plugin + '\n')
1374
1375@@ -672,13 +706,11 @@
1376 self.network_manager)
1377 config = neutron_plugin_attribute(self.plugin, 'config',
1378 self.network_manager)
1379- ovs_ctxt = {
1380- 'core_plugin': driver,
1381- 'neutron_plugin': 'ovs',
1382- 'neutron_security_groups': self.neutron_security_groups,
1383- 'local_ip': unit_private_ip(),
1384- 'config': config
1385- }
1386+ ovs_ctxt = {'core_plugin': driver,
1387+ 'neutron_plugin': 'ovs',
1388+ 'neutron_security_groups': self.neutron_security_groups,
1389+ 'local_ip': unit_private_ip(),
1390+ 'config': config}
1391
1392 return ovs_ctxt
1393
1394@@ -687,13 +719,11 @@
1395 self.network_manager)
1396 config = neutron_plugin_attribute(self.plugin, 'config',
1397 self.network_manager)
1398- nvp_ctxt = {
1399- 'core_plugin': driver,
1400- 'neutron_plugin': 'nvp',
1401- 'neutron_security_groups': self.neutron_security_groups,
1402- 'local_ip': unit_private_ip(),
1403- 'config': config
1404- }
1405+ nvp_ctxt = {'core_plugin': driver,
1406+ 'neutron_plugin': 'nvp',
1407+ 'neutron_security_groups': self.neutron_security_groups,
1408+ 'local_ip': unit_private_ip(),
1409+ 'config': config}
1410
1411 return nvp_ctxt
1412
1413@@ -702,35 +732,50 @@
1414 self.network_manager)
1415 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1416 self.network_manager)
1417- n1kv_ctxt = {
1418- 'core_plugin': driver,
1419- 'neutron_plugin': 'n1kv',
1420- 'neutron_security_groups': self.neutron_security_groups,
1421- 'local_ip': unit_private_ip(),
1422- 'config': n1kv_config,
1423- 'vsm_ip': config('n1kv-vsm-ip'),
1424- 'vsm_username': config('n1kv-vsm-username'),
1425- 'vsm_password': config('n1kv-vsm-password'),
1426- 'restrict_policy_profiles': config(
1427- 'n1kv_restrict_policy_profiles'),
1428- }
1429+ n1kv_user_config_flags = config('n1kv-config-flags')
1430+ restrict_policy_profiles = config('n1kv-restrict-policy-profiles')
1431+ n1kv_ctxt = {'core_plugin': driver,
1432+ 'neutron_plugin': 'n1kv',
1433+ 'neutron_security_groups': self.neutron_security_groups,
1434+ 'local_ip': unit_private_ip(),
1435+ 'config': n1kv_config,
1436+ 'vsm_ip': config('n1kv-vsm-ip'),
1437+ 'vsm_username': config('n1kv-vsm-username'),
1438+ 'vsm_password': config('n1kv-vsm-password'),
1439+ 'restrict_policy_profiles': restrict_policy_profiles}
1440+
1441+ if n1kv_user_config_flags:
1442+ flags = config_flags_parser(n1kv_user_config_flags)
1443+ n1kv_ctxt['user_config_flags'] = flags
1444
1445 return n1kv_ctxt
1446
1447+ def calico_ctxt(self):
1448+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1449+ self.network_manager)
1450+ config = neutron_plugin_attribute(self.plugin, 'config',
1451+ self.network_manager)
1452+ calico_ctxt = {'core_plugin': driver,
1453+ 'neutron_plugin': 'Calico',
1454+ 'neutron_security_groups': self.neutron_security_groups,
1455+ 'local_ip': unit_private_ip(),
1456+ 'config': config}
1457+
1458+ return calico_ctxt
1459+
1460 def neutron_ctxt(self):
1461 if https():
1462 proto = 'https'
1463 else:
1464 proto = 'http'
1465+
1466 if is_clustered():
1467 host = config('vip')
1468 else:
1469 host = unit_get('private-address')
1470- url = '%s://%s:%s' % (proto, host, '9696')
1471- ctxt = {
1472- 'network_manager': self.network_manager,
1473- 'neutron_url': url,
1474- }
1475+
1476+ ctxt = {'network_manager': self.network_manager,
1477+ 'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
1478 return ctxt
1479
1480 def __call__(self):
1481@@ -750,6 +795,8 @@
1482 ctxt.update(self.nvp_ctxt())
1483 elif self.plugin == 'n1kv':
1484 ctxt.update(self.n1kv_ctxt())
1485+ elif self.plugin == 'Calico':
1486+ ctxt.update(self.calico_ctxt())
1487
1488 alchemy_flags = config('neutron-alchemy-flags')
1489 if alchemy_flags:
1490@@ -761,23 +808,40 @@
1491
1492
1493 class OSConfigFlagContext(OSContextGenerator):
1494-
1495- """
1496- Responsible for adding user-defined config-flags in charm config to a
1497- template context.
1498+ """Provides support for user-defined config flags.
1499+
1500+ Users can define a comma-seperated list of key=value pairs
1501+ in the charm configuration and apply them at any point in
1502+ any file by using a template flag.
1503+
1504+ Sometimes users might want config flags inserted within a
1505+ specific section so this class allows users to specify the
1506+ template flag name, allowing for multiple template flags
1507+ (sections) within the same context.
1508
1509 NOTE: the value of config-flags may be a comma-separated list of
1510 key=value pairs and some Openstack config files support
1511 comma-separated lists as values.
1512 """
1513
1514+ def __init__(self, charm_flag='config-flags',
1515+ template_flag='user_config_flags'):
1516+ """
1517+ :param charm_flag: config flags in charm configuration.
1518+ :param template_flag: insert point for user-defined flags in template
1519+ file.
1520+ """
1521+ super(OSConfigFlagContext, self).__init__()
1522+ self._charm_flag = charm_flag
1523+ self._template_flag = template_flag
1524+
1525 def __call__(self):
1526- config_flags = config('config-flags')
1527+ config_flags = config(self._charm_flag)
1528 if not config_flags:
1529 return {}
1530
1531- flags = config_flags_parser(config_flags)
1532- return {'user_config_flags': flags}
1533+ return {self._template_flag:
1534+ config_flags_parser(config_flags)}
1535
1536
1537 class SubordinateConfigContext(OSContextGenerator):
1538@@ -821,7 +885,6 @@
1539 },
1540 }
1541 }
1542-
1543 """
1544
1545 def __init__(self, service, config_file, interface):
1546@@ -851,26 +914,28 @@
1547
1548 if self.service not in sub_config:
1549 log('Found subordinate_config on %s but it contained'
1550- 'nothing for %s service' % (rid, self.service))
1551+ 'nothing for %s service' % (rid, self.service),
1552+ level=INFO)
1553 continue
1554
1555 sub_config = sub_config[self.service]
1556 if self.config_file not in sub_config:
1557 log('Found subordinate_config on %s but it contained'
1558- 'nothing for %s' % (rid, self.config_file))
1559+ 'nothing for %s' % (rid, self.config_file),
1560+ level=INFO)
1561 continue
1562
1563 sub_config = sub_config[self.config_file]
1564- for k, v in sub_config.iteritems():
1565+ for k, v in six.iteritems(sub_config):
1566 if k == 'sections':
1567- for section, config_dict in v.iteritems():
1568- log("adding section '%s'" % (section))
1569+ for section, config_dict in six.iteritems(v):
1570+ log("adding section '%s'" % (section),
1571+ level=DEBUG)
1572 ctxt[k][section] = config_dict
1573 else:
1574 ctxt[k] = v
1575
1576 log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1577-
1578 return ctxt
1579
1580
1581@@ -882,15 +947,14 @@
1582 False if config('debug') is None else config('debug')
1583 ctxt['verbose'] = \
1584 False if config('verbose') is None else config('verbose')
1585+
1586 return ctxt
1587
1588
1589 class SyslogContext(OSContextGenerator):
1590
1591 def __call__(self):
1592- ctxt = {
1593- 'use_syslog': config('use-syslog')
1594- }
1595+ ctxt = {'use_syslog': config('use-syslog')}
1596 return ctxt
1597
1598
1599@@ -898,13 +962,9 @@
1600
1601 def __call__(self):
1602 if config('prefer-ipv6'):
1603- return {
1604- 'bind_host': '::'
1605- }
1606+ return {'bind_host': '::'}
1607 else:
1608- return {
1609- 'bind_host': '0.0.0.0'
1610- }
1611+ return {'bind_host': '0.0.0.0'}
1612
1613
1614 class WorkerConfigContext(OSContextGenerator):
1615@@ -916,13 +976,12 @@
1616 except ImportError:
1617 apt_install('python-psutil', fatal=True)
1618 from psutil import NUM_CPUS
1619+
1620 return NUM_CPUS
1621
1622 def __call__(self):
1623- multiplier = config('worker-multiplier') or 1
1624- ctxt = {
1625- "workers": self.num_cpus * multiplier
1626- }
1627+ multiplier = config('worker-multiplier') or 0
1628+ ctxt = {"workers": self.num_cpus * multiplier}
1629 return ctxt
1630
1631
1632@@ -936,22 +995,23 @@
1633 for unit in related_units(rid):
1634 ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1635 ctxt['zmq_host'] = relation_get('host', unit, rid)
1636+
1637 return ctxt
1638
1639
1640 class NotificationDriverContext(OSContextGenerator):
1641
1642- def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'):
1643+ def __init__(self, zmq_relation='zeromq-configuration',
1644+ amqp_relation='amqp'):
1645 """
1646- :param zmq_relation : Name of Zeromq relation to check
1647+ :param zmq_relation: Name of Zeromq relation to check
1648 """
1649 self.zmq_relation = zmq_relation
1650 self.amqp_relation = amqp_relation
1651
1652 def __call__(self):
1653- ctxt = {
1654- 'notifications': 'False',
1655- }
1656+ ctxt = {'notifications': 'False'}
1657 if is_relation_made(self.amqp_relation):
1658 ctxt['notifications'] = "True"
1659+
1660 return ctxt
1661
1662=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
1663--- hooks/charmhelpers/contrib/openstack/ip.py 2014-10-23 17:30:13 +0000
1664+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-12-11 17:56:59 +0000
1665@@ -2,21 +2,19 @@
1666 config,
1667 unit_get,
1668 )
1669-
1670 from charmhelpers.contrib.network.ip import (
1671 get_address_in_network,
1672 is_address_in_network,
1673 is_ipv6,
1674 get_ipv6_addr,
1675 )
1676-
1677 from charmhelpers.contrib.hahelpers.cluster import is_clustered
1678
1679 PUBLIC = 'public'
1680 INTERNAL = 'int'
1681 ADMIN = 'admin'
1682
1683-_address_map = {
1684+ADDRESS_MAP = {
1685 PUBLIC: {
1686 'config': 'os-public-network',
1687 'fallback': 'public-address'
1688@@ -33,16 +31,14 @@
1689
1690
1691 def canonical_url(configs, endpoint_type=PUBLIC):
1692- '''
1693- Returns the correct HTTP URL to this host given the state of HTTPS
1694+ """Returns the correct HTTP URL to this host given the state of HTTPS
1695 configuration, hacluster and charm configuration.
1696
1697- :configs OSTemplateRenderer: A config tempating object to inspect for
1698- a complete https context.
1699- :endpoint_type str: The endpoint type to resolve.
1700-
1701- :returns str: Base URL for services on the current service unit.
1702- '''
1703+ :param configs: OSTemplateRenderer config templating object to inspect
1704+ for a complete https context.
1705+ :param endpoint_type: str endpoint type to resolve.
1706+ :param returns: str base URL for services on the current service unit.
1707+ """
1708 scheme = 'http'
1709 if 'https' in configs.complete_contexts():
1710 scheme = 'https'
1711@@ -53,27 +49,45 @@
1712
1713
1714 def resolve_address(endpoint_type=PUBLIC):
1715+ """Return unit address depending on net config.
1716+
1717+ If unit is clustered with vip(s) and has net splits defined, return vip on
1718+ correct network. If clustered with no nets defined, return primary vip.
1719+
1720+ If not clustered, return unit address ensuring address is on configured net
1721+ split if one is configured.
1722+
1723+ :param endpoint_type: Network endpoing type
1724+ """
1725 resolved_address = None
1726- if is_clustered():
1727- if config(_address_map[endpoint_type]['config']) is None:
1728- # Assume vip is simple and pass back directly
1729- resolved_address = config('vip')
1730+ vips = config('vip')
1731+ if vips:
1732+ vips = vips.split()
1733+
1734+ net_type = ADDRESS_MAP[endpoint_type]['config']
1735+ net_addr = config(net_type)
1736+ net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
1737+ clustered = is_clustered()
1738+ if clustered:
1739+ if not net_addr:
1740+ # If no net-splits defined, we expect a single vip
1741+ resolved_address = vips[0]
1742 else:
1743- for vip in config('vip').split():
1744- if is_address_in_network(
1745- config(_address_map[endpoint_type]['config']),
1746- vip):
1747+ for vip in vips:
1748+ if is_address_in_network(net_addr, vip):
1749 resolved_address = vip
1750+ break
1751 else:
1752 if config('prefer-ipv6'):
1753- fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1754+ fallback_addr = get_ipv6_addr(exc_list=vips)[0]
1755 else:
1756- fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1757- resolved_address = get_address_in_network(
1758- config(_address_map[endpoint_type]['config']), fallback_addr)
1759+ fallback_addr = unit_get(net_fallback)
1760+
1761+ resolved_address = get_address_in_network(net_addr, fallback_addr)
1762
1763 if resolved_address is None:
1764- raise ValueError('Unable to resolve a suitable IP address'
1765- ' based on charm state and configuration')
1766- else:
1767- return resolved_address
1768+ raise ValueError("Unable to resolve a suitable IP address based on "
1769+ "charm state and configuration. (net_type=%s, "
1770+ "clustered=%s)" % (net_type, clustered))
1771+
1772+ return resolved_address
1773
1774=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1775--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-10-23 17:30:13 +0000
1776+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-12-11 17:56:59 +0000
1777@@ -14,7 +14,7 @@
1778 def headers_package():
1779 """Ensures correct linux-headers for running kernel are installed,
1780 for building DKMS package"""
1781- kver = check_output(['uname', '-r']).strip()
1782+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1783 return 'linux-headers-%s' % kver
1784
1785 QUANTUM_CONF_DIR = '/etc/quantum'
1786@@ -22,7 +22,7 @@
1787
1788 def kernel_version():
1789 """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """
1790- kver = check_output(['uname', '-r']).strip()
1791+ kver = check_output(['uname', '-r']).decode('UTF-8').strip()
1792 kver = kver.split('.')
1793 return (int(kver[0]), int(kver[1]))
1794
1795@@ -138,10 +138,25 @@
1796 relation_prefix='neutron',
1797 ssl_dir=NEUTRON_CONF_DIR)],
1798 'services': [],
1799- 'packages': [['neutron-plugin-cisco']],
1800+ 'packages': [[headers_package()] + determine_dkms_package(),
1801+ ['neutron-plugin-cisco']],
1802 'server_packages': ['neutron-server',
1803 'neutron-plugin-cisco'],
1804 'server_services': ['neutron-server']
1805+ },
1806+ 'Calico': {
1807+ 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
1808+ 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
1809+ 'contexts': [
1810+ context.SharedDBContext(user=config('neutron-database-user'),
1811+ database=config('neutron-database'),
1812+ relation_prefix='neutron',
1813+ ssl_dir=NEUTRON_CONF_DIR)],
1814+ 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
1815+ 'packages': [[headers_package()] + determine_dkms_package(),
1816+ ['calico-compute', 'bird', 'neutron-dhcp-agent']],
1817+ 'server_packages': ['neutron-server', 'calico-control'],
1818+ 'server_services': ['neutron-server']
1819 }
1820 }
1821 if release >= 'icehouse':
1822@@ -162,7 +177,8 @@
1823 elif manager == 'neutron':
1824 plugins = neutron_plugins()
1825 else:
1826- log('Error: Network manager does not support plugins.')
1827+ log("Network manager '%s' does not support plugins." % (manager),
1828+ level=ERROR)
1829 raise Exception
1830
1831 try:
1832
1833=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1834--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-23 17:30:13 +0000
1835+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-12-11 17:56:59 +0000
1836@@ -35,7 +35,7 @@
1837 stats auth admin:password
1838
1839 {% if frontends -%}
1840-{% for service, ports in service_ports.iteritems() -%}
1841+{% for service, ports in service_ports.items() -%}
1842 frontend tcp-in_{{ service }}
1843 bind *:{{ ports[0] }}
1844 bind :::{{ ports[0] }}
1845@@ -46,7 +46,7 @@
1846 {% for frontend in frontends -%}
1847 backend {{ service }}_{{ frontend }}
1848 balance leastconn
1849- {% for unit, address in frontends[frontend]['backends'].iteritems() -%}
1850+ {% for unit, address in frontends[frontend]['backends'].items() -%}
1851 server {{ unit }} {{ address }}:{{ ports[1] }} check
1852 {% endfor %}
1853 {% endfor -%}
1854
1855=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1856--- hooks/charmhelpers/contrib/openstack/templating.py 2014-10-23 17:30:13 +0000
1857+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-12-11 17:56:59 +0000
1858@@ -1,13 +1,13 @@
1859 import os
1860
1861+import six
1862+
1863 from charmhelpers.fetch import apt_install
1864-
1865 from charmhelpers.core.hookenv import (
1866 log,
1867 ERROR,
1868 INFO
1869 )
1870-
1871 from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
1872
1873 try:
1874@@ -43,7 +43,7 @@
1875 order by OpenStack release.
1876 """
1877 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1878- for rel in OPENSTACK_CODENAMES.itervalues()]
1879+ for rel in six.itervalues(OPENSTACK_CODENAMES)]
1880
1881 if not os.path.isdir(templates_dir):
1882 log('Templates directory not found @ %s.' % templates_dir,
1883@@ -258,7 +258,7 @@
1884 """
1885 Write out all registered config files.
1886 """
1887- [self.write(k) for k in self.templates.iterkeys()]
1888+ [self.write(k) for k in six.iterkeys(self.templates)]
1889
1890 def set_release(self, openstack_release):
1891 """
1892@@ -275,5 +275,5 @@
1893 '''
1894 interfaces = []
1895 [interfaces.extend(i.complete_contexts())
1896- for i in self.templates.itervalues()]
1897+ for i in six.itervalues(self.templates)]
1898 return interfaces
1899
1900=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1901--- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-23 17:30:13 +0000
1902+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-12-11 17:56:59 +0000
1903@@ -10,11 +10,13 @@
1904 import socket
1905 import sys
1906
1907+import six
1908+import yaml
1909+
1910 from charmhelpers.core.hookenv import (
1911 config,
1912 log as juju_log,
1913 charm_dir,
1914- ERROR,
1915 INFO,
1916 relation_ids,
1917 relation_set
1918@@ -31,7 +33,8 @@
1919 )
1920
1921 from charmhelpers.core.host import lsb_release, mounts, umount
1922-from charmhelpers.fetch import apt_install, apt_cache
1923+from charmhelpers.fetch import apt_install, apt_cache, install_remote
1924+from charmhelpers.contrib.python.packages import pip_install
1925 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1926 from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
1927
1928@@ -113,7 +116,7 @@
1929
1930 # Best guess match based on deb string provided
1931 if src.startswith('deb') or src.startswith('ppa'):
1932- for k, v in OPENSTACK_CODENAMES.iteritems():
1933+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1934 if v in src:
1935 return v
1936
1937@@ -134,7 +137,7 @@
1938
1939 def get_os_version_codename(codename):
1940 '''Determine OpenStack version number from codename.'''
1941- for k, v in OPENSTACK_CODENAMES.iteritems():
1942+ for k, v in six.iteritems(OPENSTACK_CODENAMES):
1943 if v == codename:
1944 return k
1945 e = 'Could not derive OpenStack version for '\
1946@@ -194,7 +197,7 @@
1947 else:
1948 vers_map = OPENSTACK_CODENAMES
1949
1950- for version, cname in vers_map.iteritems():
1951+ for version, cname in six.iteritems(vers_map):
1952 if cname == codename:
1953 return version
1954 # e = "Could not determine OpenStack version for package: %s" % pkg
1955@@ -318,7 +321,7 @@
1956 rc_script.write(
1957 "#!/bin/bash\n")
1958 [rc_script.write('export %s=%s\n' % (u, p))
1959- for u, p in env_vars.iteritems() if u != "script_path"]
1960+ for u, p in six.iteritems(env_vars) if u != "script_path"]
1961
1962
1963 def openstack_upgrade_available(package):
1964@@ -351,8 +354,8 @@
1965 '''
1966 _none = ['None', 'none', None]
1967 if (block_device in _none):
1968- error_out('prepare_storage(): Missing required input: '
1969- 'block_device=%s.' % block_device, level=ERROR)
1970+ error_out('prepare_storage(): Missing required input: block_device=%s.'
1971+ % block_device)
1972
1973 if block_device.startswith('/dev/'):
1974 bdev = block_device
1975@@ -368,8 +371,7 @@
1976 bdev = '/dev/%s' % block_device
1977
1978 if not is_block_device(bdev):
1979- error_out('Failed to locate valid block device at %s' % bdev,
1980- level=ERROR)
1981+ error_out('Failed to locate valid block device at %s' % bdev)
1982
1983 return bdev
1984
1985@@ -418,7 +420,7 @@
1986
1987 if isinstance(address, dns.name.Name):
1988 rtype = 'PTR'
1989- elif isinstance(address, basestring):
1990+ elif isinstance(address, six.string_types):
1991 rtype = 'A'
1992 else:
1993 return None
1994@@ -486,8 +488,7 @@
1995 'hostname': json.dumps(hosts)}
1996
1997 if relation_prefix:
1998- keys = kwargs.keys()
1999- for key in keys:
2000+ for key in list(kwargs.keys()):
2001 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
2002 del kwargs[key]
2003
2004@@ -508,3 +509,111 @@
2005 f(*args)
2006 return wrapped_f
2007 return wrap
2008+
2009+
2010+def git_install_requested():
2011+ """Returns true if openstack-origin-git is specified."""
2012+ return config('openstack-origin-git') != "None"
2013+
2014+
2015+requirements_dir = None
2016+
2017+
2018+def git_clone_and_install(file_name, core_project):
2019+ """Clone/install all OpenStack repos specified in yaml config file."""
2020+ global requirements_dir
2021+
2022+ if file_name == "None":
2023+ return
2024+
2025+ yaml_file = os.path.join(charm_dir(), file_name)
2026+
2027+ # clone/install the requirements project first
2028+ installed = _git_clone_and_install_subset(yaml_file,
2029+ whitelist=['requirements'])
2030+ if 'requirements' not in installed:
2031+ error_out('requirements git repository must be specified')
2032+
2033+ # clone/install all other projects except requirements and the core project
2034+ blacklist = ['requirements', core_project]
2035+ _git_clone_and_install_subset(yaml_file, blacklist=blacklist,
2036+ update_requirements=True)
2037+
2038+ # clone/install the core project
2039+ whitelist = [core_project]
2040+ installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
2041+ update_requirements=True)
2042+ if core_project not in installed:
2043+ error_out('{} git repository must be specified'.format(core_project))
2044+
2045+
2046+def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
2047+ update_requirements=False):
2048+ """Clone/install subset of OpenStack repos specified in yaml config file."""
2049+ global requirements_dir
2050+ installed = []
2051+
2052+ with open(yaml_file, 'r') as fd:
2053+ projects = yaml.load(fd)
2054+ for proj, val in projects.items():
2055+ # The project subset is chosen based on the following 3 rules:
2056+ # 1) If project is in blacklist, we don't clone/install it, period.
2057+ # 2) If whitelist is empty, we clone/install everything else.
2058+ # 3) If whitelist is not empty, we clone/install everything in the
2059+ # whitelist.
2060+ if proj in blacklist:
2061+ continue
2062+ if whitelist and proj not in whitelist:
2063+ continue
2064+ repo = val['repository']
2065+ branch = val['branch']
2066+ repo_dir = _git_clone_and_install_single(repo, branch,
2067+ update_requirements)
2068+ if proj == 'requirements':
2069+ requirements_dir = repo_dir
2070+ installed.append(proj)
2071+ return installed
2072+
2073+
2074+def _git_clone_and_install_single(repo, branch, update_requirements=False):
2075+ """Clone and install a single git repository."""
2076+ dest_parent_dir = "/mnt/openstack-git/"
2077+ dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
2078+
2079+ if not os.path.exists(dest_parent_dir):
2080+ juju_log('Host dir not mounted at {}. '
2081+ 'Creating directory there instead.'.format(dest_parent_dir))
2082+ os.mkdir(dest_parent_dir)
2083+
2084+ if not os.path.exists(dest_dir):
2085+ juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
2086+ repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
2087+ else:
2088+ repo_dir = dest_dir
2089+
2090+ if update_requirements:
2091+ if not requirements_dir:
2092+ error_out('requirements repo must be cloned before '
2093+ 'updating from global requirements.')
2094+ _git_update_requirements(repo_dir, requirements_dir)
2095+
2096+ juju_log('Installing git repo from dir: {}'.format(repo_dir))
2097+ pip_install(repo_dir)
2098+
2099+ return repo_dir
2100+
2101+
2102+def _git_update_requirements(package_dir, reqs_dir):
2103+ """Update from global requirements.
2104+
2105+ Update an OpenStack git directory's requirements.txt and
2106+ test-requirements.txt from global-requirements.txt."""
2107+ orig_dir = os.getcwd()
2108+ os.chdir(reqs_dir)
2109+ cmd = "python update.py {}".format(package_dir)
2110+ try:
2111+ subprocess.check_call(cmd.split(' '))
2112+ except subprocess.CalledProcessError:
2113+ package = os.path.basename(package_dir)
2114+ error_out("Error updating {} from global-requirements.txt".format(package))
2115+ os.chdir(orig_dir)
2116
2117=== added directory 'hooks/charmhelpers/contrib/python'
2118=== added file 'hooks/charmhelpers/contrib/python/__init__.py'
2119=== added file 'hooks/charmhelpers/contrib/python/packages.py'
2120--- hooks/charmhelpers/contrib/python/packages.py 1970-01-01 00:00:00 +0000
2121+++ hooks/charmhelpers/contrib/python/packages.py 2014-12-11 17:56:59 +0000
2122@@ -0,0 +1,77 @@
2123+#!/usr/bin/env python
2124+# coding: utf-8
2125+
2126+__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
2127+
2128+from charmhelpers.fetch import apt_install, apt_update
2129+from charmhelpers.core.hookenv import log
2130+
2131+try:
2132+ from pip import main as pip_execute
2133+except ImportError:
2134+ apt_update()
2135+ apt_install('python-pip')
2136+ from pip import main as pip_execute
2137+
2138+
2139+def parse_options(given, available):
2140+ """Given a set of options, check if available"""
2141+ for key, value in sorted(given.items()):
2142+ if key in available:
2143+ yield "--{0}={1}".format(key, value)
2144+
2145+
2146+def pip_install_requirements(requirements, **options):
2147+ """Install a requirements file """
2148+ command = ["install"]
2149+
2150+ available_options = ('proxy', 'src', 'log', )
2151+ for option in parse_options(options, available_options):
2152+ command.append(option)
2153+
2154+ command.append("-r {0}".format(requirements))
2155+ log("Installing from file: {} with options: {}".format(requirements,
2156+ command))
2157+ pip_execute(command)
2158+
2159+
2160+def pip_install(package, fatal=False, **options):
2161+ """Install a python package"""
2162+ command = ["install"]
2163+
2164+ available_options = ('proxy', 'src', 'log', "index-url", )
2165+ for option in parse_options(options, available_options):
2166+ command.append(option)
2167+
2168+ if isinstance(package, list):
2169+ command.extend(package)
2170+ else:
2171+ command.append(package)
2172+
2173+ log("Installing {} package with options: {}".format(package,
2174+ command))
2175+ pip_execute(command)
2176+
2177+
2178+def pip_uninstall(package, **options):
2179+ """Uninstall a python package"""
2180+ command = ["uninstall", "-q", "-y"]
2181+
2182+ available_options = ('proxy', 'log', )
2183+ for option in parse_options(options, available_options):
2184+ command.append(option)
2185+
2186+ if isinstance(package, list):
2187+ command.extend(package)
2188+ else:
2189+ command.append(package)
2190+
2191+ log("Uninstalling {} package with options: {}".format(package,
2192+ command))
2193+ pip_execute(command)
2194+
2195+
2196+def pip_list():
2197+ """Returns the list of current python installed packages
2198+ """
2199+ return pip_execute(["list"])
2200
2201=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2202--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-23 17:30:13 +0000
2203+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-12-11 17:56:59 +0000
2204@@ -16,19 +16,18 @@
2205 from subprocess import (
2206 check_call,
2207 check_output,
2208- CalledProcessError
2209+ CalledProcessError,
2210 )
2211-
2212 from charmhelpers.core.hookenv import (
2213 relation_get,
2214 relation_ids,
2215 related_units,
2216 log,
2217+ DEBUG,
2218 INFO,
2219 WARNING,
2220- ERROR
2221+ ERROR,
2222 )
2223-
2224 from charmhelpers.core.host import (
2225 mount,
2226 mounts,
2227@@ -37,7 +36,6 @@
2228 service_running,
2229 umount,
2230 )
2231-
2232 from charmhelpers.fetch import (
2233 apt_install,
2234 )
2235@@ -56,99 +54,85 @@
2236
2237
2238 def install():
2239- ''' Basic Ceph client installation '''
2240+ """Basic Ceph client installation."""
2241 ceph_dir = "/etc/ceph"
2242 if not os.path.exists(ceph_dir):
2243 os.mkdir(ceph_dir)
2244+
2245 apt_install('ceph-common', fatal=True)
2246
2247
2248 def rbd_exists(service, pool, rbd_img):
2249- ''' Check to see if a RADOS block device exists '''
2250+ """Check to see if a RADOS block device exists."""
2251 try:
2252- out = check_output(['rbd', 'list', '--id', service,
2253- '--pool', pool])
2254+ out = check_output(['rbd', 'list', '--id',
2255+ service, '--pool', pool]).decode('UTF-8')
2256 except CalledProcessError:
2257 return False
2258- else:
2259- return rbd_img in out
2260+
2261+ return rbd_img in out
2262
2263
2264 def create_rbd_image(service, pool, image, sizemb):
2265- ''' Create a new RADOS block device '''
2266- cmd = [
2267- 'rbd',
2268- 'create',
2269- image,
2270- '--size',
2271- str(sizemb),
2272- '--id',
2273- service,
2274- '--pool',
2275- pool
2276- ]
2277+ """Create a new RADOS block device."""
2278+ cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
2279+ '--pool', pool]
2280 check_call(cmd)
2281
2282
2283 def pool_exists(service, name):
2284- ''' Check to see if a RADOS pool already exists '''
2285+ """Check to see if a RADOS pool already exists."""
2286 try:
2287- out = check_output(['rados', '--id', service, 'lspools'])
2288+ out = check_output(['rados', '--id', service,
2289+ 'lspools']).decode('UTF-8')
2290 except CalledProcessError:
2291 return False
2292- else:
2293- return name in out
2294+
2295+ return name in out
2296
2297
2298 def get_osds(service):
2299- '''
2300- Return a list of all Ceph Object Storage Daemons
2301- currently in the cluster
2302- '''
2303+ """Return a list of all Ceph Object Storage Daemons currently in the
2304+ cluster.
2305+ """
2306 version = ceph_version()
2307 if version and version >= '0.56':
2308 return json.loads(check_output(['ceph', '--id', service,
2309- 'osd', 'ls', '--format=json']))
2310- else:
2311- return None
2312+ 'osd', 'ls',
2313+ '--format=json']).decode('UTF-8'))
2314+
2315+ return None
2316
2317
2318 def create_pool(service, name, replicas=3):
2319- ''' Create a new RADOS pool '''
2320+ """Create a new RADOS pool."""
2321 if pool_exists(service, name):
2322 log("Ceph pool {} already exists, skipping creation".format(name),
2323 level=WARNING)
2324 return
2325+
2326 # Calculate the number of placement groups based
2327 # on upstream recommended best practices.
2328 osds = get_osds(service)
2329 if osds:
2330- pgnum = (len(osds) * 100 / replicas)
2331+ pgnum = (len(osds) * 100 // replicas)
2332 else:
2333 # NOTE(james-page): Default to 200 for older ceph versions
2334 # which don't support OSD query from cli
2335 pgnum = 200
2336- cmd = [
2337- 'ceph', '--id', service,
2338- 'osd', 'pool', 'create',
2339- name, str(pgnum)
2340- ]
2341+
2342+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
2343 check_call(cmd)
2344- cmd = [
2345- 'ceph', '--id', service,
2346- 'osd', 'pool', 'set', name,
2347- 'size', str(replicas)
2348- ]
2349+
2350+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
2351+ str(replicas)]
2352 check_call(cmd)
2353
2354
2355 def delete_pool(service, name):
2356- ''' Delete a RADOS pool from ceph '''
2357- cmd = [
2358- 'ceph', '--id', service,
2359- 'osd', 'pool', 'delete',
2360- name, '--yes-i-really-really-mean-it'
2361- ]
2362+ """Delete a RADOS pool from ceph."""
2363+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
2364+ '--yes-i-really-really-mean-it']
2365 check_call(cmd)
2366
2367
2368@@ -161,44 +145,43 @@
2369
2370
2371 def create_keyring(service, key):
2372- ''' Create a new Ceph keyring containing key'''
2373+ """Create a new Ceph keyring containing key."""
2374 keyring = _keyring_path(service)
2375 if os.path.exists(keyring):
2376- log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
2377+ log('Ceph keyring exists at %s.' % keyring, level=WARNING)
2378 return
2379- cmd = [
2380- 'ceph-authtool',
2381- keyring,
2382- '--create-keyring',
2383- '--name=client.{}'.format(service),
2384- '--add-key={}'.format(key)
2385- ]
2386+
2387+ cmd = ['ceph-authtool', keyring, '--create-keyring',
2388+ '--name=client.{}'.format(service), '--add-key={}'.format(key)]
2389 check_call(cmd)
2390- log('ceph: Created new ring at %s.' % keyring, level=INFO)
2391+ log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
2392
2393
2394 def create_key_file(service, key):
2395- ''' Create a file containing key '''
2396+ """Create a file containing key."""
2397 keyfile = _keyfile_path(service)
2398 if os.path.exists(keyfile):
2399- log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
2400+ log('Keyfile exists at %s.' % keyfile, level=WARNING)
2401 return
2402+
2403 with open(keyfile, 'w') as fd:
2404 fd.write(key)
2405- log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
2406+
2407+ log('Created new keyfile at %s.' % keyfile, level=INFO)
2408
2409
2410 def get_ceph_nodes():
2411- ''' Query named relation 'ceph' to detemine current nodes '''
2412+ """Query named relation 'ceph' to determine current nodes."""
2413 hosts = []
2414 for r_id in relation_ids('ceph'):
2415 for unit in related_units(r_id):
2416 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
2417+
2418 return hosts
2419
2420
2421 def configure(service, key, auth, use_syslog):
2422- ''' Perform basic configuration of Ceph '''
2423+ """Perform basic configuration of Ceph."""
2424 create_keyring(service, key)
2425 create_key_file(service, key)
2426 hosts = get_ceph_nodes()
2427@@ -211,17 +194,17 @@
2428
2429
2430 def image_mapped(name):
2431- ''' Determine whether a RADOS block device is mapped locally '''
2432+ """Determine whether a RADOS block device is mapped locally."""
2433 try:
2434- out = check_output(['rbd', 'showmapped'])
2435+ out = check_output(['rbd', 'showmapped']).decode('UTF-8')
2436 except CalledProcessError:
2437 return False
2438- else:
2439- return name in out
2440+
2441+ return name in out
2442
2443
2444 def map_block_storage(service, pool, image):
2445- ''' Map a RADOS block device for local use '''
2446+ """Map a RADOS block device for local use."""
2447 cmd = [
2448 'rbd',
2449 'map',
2450@@ -235,31 +218,32 @@
2451
2452
2453 def filesystem_mounted(fs):
2454- ''' Determine whether a filesytems is already mounted '''
2455+ """Determine whether a filesytems is already mounted."""
2456 return fs in [f for f, m in mounts()]
2457
2458
2459 def make_filesystem(blk_device, fstype='ext4', timeout=10):
2460- ''' Make a new filesystem on the specified block device '''
2461+ """Make a new filesystem on the specified block device."""
2462 count = 0
2463 e_noent = os.errno.ENOENT
2464 while not os.path.exists(blk_device):
2465 if count >= timeout:
2466- log('ceph: gave up waiting on block device %s' % blk_device,
2467+ log('Gave up waiting on block device %s' % blk_device,
2468 level=ERROR)
2469 raise IOError(e_noent, os.strerror(e_noent), blk_device)
2470- log('ceph: waiting for block device %s to appear' % blk_device,
2471- level=INFO)
2472+
2473+ log('Waiting for block device %s to appear' % blk_device,
2474+ level=DEBUG)
2475 count += 1
2476 time.sleep(1)
2477 else:
2478- log('ceph: Formatting block device %s as filesystem %s.' %
2479+ log('Formatting block device %s as filesystem %s.' %
2480 (blk_device, fstype), level=INFO)
2481 check_call(['mkfs', '-t', fstype, blk_device])
2482
2483
2484 def place_data_on_block_device(blk_device, data_src_dst):
2485- ''' Migrate data in data_src_dst to blk_device and then remount '''
2486+ """Migrate data in data_src_dst to blk_device and then remount."""
2487 # mount block device into /mnt
2488 mount(blk_device, '/mnt')
2489 # copy data to /mnt
2490@@ -279,8 +263,8 @@
2491
2492 # TODO: re-use
2493 def modprobe(module):
2494- ''' Load a kernel module and configure for auto-load on reboot '''
2495- log('ceph: Loading kernel module', level=INFO)
2496+ """Load a kernel module and configure for auto-load on reboot."""
2497+ log('Loading kernel module', level=INFO)
2498 cmd = ['modprobe', module]
2499 check_call(cmd)
2500 with open('/etc/modules', 'r+') as modules:
2501@@ -289,7 +273,7 @@
2502
2503
2504 def copy_files(src, dst, symlinks=False, ignore=None):
2505- ''' Copy files from src to dst '''
2506+ """Copy files from src to dst."""
2507 for item in os.listdir(src):
2508 s = os.path.join(src, item)
2509 d = os.path.join(dst, item)
2510@@ -302,8 +286,7 @@
2511 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
2512 blk_device, fstype, system_services=[],
2513 replicas=3):
2514- """
2515- NOTE: This function must only be called from a single service unit for
2516+ """NOTE: This function must only be called from a single service unit for
2517 the same rbd_img otherwise data loss will occur.
2518
2519 Ensures given pool and RBD image exists, is mapped to a block device,
2520@@ -317,15 +300,16 @@
2521 """
2522 # Ensure pool, RBD image, RBD mappings are in place.
2523 if not pool_exists(service, pool):
2524- log('ceph: Creating new pool {}.'.format(pool))
2525+ log('Creating new pool {}.'.format(pool), level=INFO)
2526 create_pool(service, pool, replicas=replicas)
2527
2528 if not rbd_exists(service, pool, rbd_img):
2529- log('ceph: Creating RBD image ({}).'.format(rbd_img))
2530+ log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
2531 create_rbd_image(service, pool, rbd_img, sizemb)
2532
2533 if not image_mapped(rbd_img):
2534- log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
2535+ log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
2536+ level=INFO)
2537 map_block_storage(service, pool, rbd_img)
2538
2539 # make file system
2540@@ -340,45 +324,47 @@
2541
2542 for svc in system_services:
2543 if service_running(svc):
2544- log('ceph: Stopping services {} prior to migrating data.'
2545- .format(svc))
2546+ log('Stopping services {} prior to migrating data.'
2547+ .format(svc), level=DEBUG)
2548 service_stop(svc)
2549
2550 place_data_on_block_device(blk_device, mount_point)
2551
2552 for svc in system_services:
2553- log('ceph: Starting service {} after migrating data.'
2554- .format(svc))
2555+ log('Starting service {} after migrating data.'
2556+ .format(svc), level=DEBUG)
2557 service_start(svc)
2558
2559
2560 def ensure_ceph_keyring(service, user=None, group=None):
2561- '''
2562- Ensures a ceph keyring is created for a named service
2563- and optionally ensures user and group ownership.
2564+ """Ensures a ceph keyring is created for a named service and optionally
2565+ ensures user and group ownership.
2566
2567 Returns False if no ceph key is available in relation state.
2568- '''
2569+ """
2570 key = None
2571 for rid in relation_ids('ceph'):
2572 for unit in related_units(rid):
2573 key = relation_get('key', rid=rid, unit=unit)
2574 if key:
2575 break
2576+
2577 if not key:
2578 return False
2579+
2580 create_keyring(service=service, key=key)
2581 keyring = _keyring_path(service)
2582 if user and group:
2583 check_call(['chown', '%s.%s' % (user, group), keyring])
2584+
2585 return True
2586
2587
2588 def ceph_version():
2589- ''' Retrieve the local version of ceph '''
2590+ """Retrieve the local version of ceph."""
2591 if os.path.exists('/usr/bin/ceph'):
2592 cmd = ['ceph', '-v']
2593- output = check_output(cmd)
2594+ output = check_output(cmd).decode('US-ASCII')
2595 output = output.split()
2596 if len(output) > 3:
2597 return output[2]
2598
2599=== modified file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
2600--- hooks/charmhelpers/contrib/storage/linux/loopback.py 2013-08-12 21:48:24 +0000
2601+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-12-11 17:56:59 +0000
2602@@ -1,12 +1,12 @@
2603-
2604 import os
2605 import re
2606-
2607 from subprocess import (
2608 check_call,
2609 check_output,
2610 )
2611
2612+import six
2613+
2614
2615 ##################################################
2616 # loopback device helpers.
2617@@ -37,7 +37,7 @@
2618 '''
2619 file_path = os.path.abspath(file_path)
2620 check_call(['losetup', '--find', file_path])
2621- for d, f in loopback_devices().iteritems():
2622+ for d, f in six.iteritems(loopback_devices()):
2623 if f == file_path:
2624 return d
2625
2626@@ -51,7 +51,7 @@
2627
2628 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
2629 '''
2630- for d, f in loopback_devices().iteritems():
2631+ for d, f in six.iteritems(loopback_devices()):
2632 if f == path:
2633 return d
2634
2635
2636=== modified file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
2637--- hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-05-19 11:41:02 +0000
2638+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-12-11 17:56:59 +0000
2639@@ -61,6 +61,7 @@
2640 vg = None
2641 pvd = check_output(['pvdisplay', block_device]).splitlines()
2642 for l in pvd:
2643+ l = l.decode('UTF-8')
2644 if l.strip().startswith('VG Name'):
2645 vg = ' '.join(l.strip().split()[2:])
2646 return vg
2647
2648=== modified file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
2649--- hooks/charmhelpers/contrib/storage/linux/utils.py 2014-10-23 17:30:13 +0000
2650+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-12-11 17:56:59 +0000
2651@@ -30,7 +30,8 @@
2652 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
2653 call(['sgdisk', '--zap-all', '--mbrtogpt',
2654 '--clear', block_device])
2655- dev_end = check_output(['blockdev', '--getsz', block_device])
2656+ dev_end = check_output(['blockdev', '--getsz',
2657+ block_device]).decode('UTF-8')
2658 gpt_end = int(dev_end.split()[0]) - 100
2659 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
2660 'bs=1M', 'count=1'])
2661@@ -47,7 +48,7 @@
2662 it doesn't.
2663 '''
2664 is_partition = bool(re.search(r".*[0-9]+\b", device))
2665- out = check_output(['mount'])
2666+ out = check_output(['mount']).decode('UTF-8')
2667 if is_partition:
2668 return bool(re.search(device + r"\b", out))
2669 return bool(re.search(device + r"[0-9]+\b", out))
2670
2671=== modified file 'hooks/charmhelpers/core/fstab.py'
2672--- hooks/charmhelpers/core/fstab.py 2014-10-23 17:30:13 +0000
2673+++ hooks/charmhelpers/core/fstab.py 2014-12-11 17:56:59 +0000
2674@@ -3,10 +3,11 @@
2675
2676 __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2677
2678+import io
2679 import os
2680
2681
2682-class Fstab(file):
2683+class Fstab(io.FileIO):
2684 """This class extends file in order to implement a file reader/writer
2685 for file `/etc/fstab`
2686 """
2687@@ -24,8 +25,8 @@
2688 options = "defaults"
2689
2690 self.options = options
2691- self.d = d
2692- self.p = p
2693+ self.d = int(d)
2694+ self.p = int(p)
2695
2696 def __eq__(self, o):
2697 return str(self) == str(o)
2698@@ -45,7 +46,7 @@
2699 self._path = path
2700 else:
2701 self._path = self.DEFAULT_PATH
2702- file.__init__(self, self._path, 'r+')
2703+ super(Fstab, self).__init__(self._path, 'rb+')
2704
2705 def _hydrate_entry(self, line):
2706 # NOTE: use split with no arguments to split on any
2707@@ -58,8 +59,9 @@
2708 def entries(self):
2709 self.seek(0)
2710 for line in self.readlines():
2711+ line = line.decode('us-ascii')
2712 try:
2713- if not line.startswith("#"):
2714+ if line.strip() and not line.startswith("#"):
2715 yield self._hydrate_entry(line)
2716 except ValueError:
2717 pass
2718@@ -75,14 +77,14 @@
2719 if self.get_entry_by_attr('device', entry.device):
2720 return False
2721
2722- self.write(str(entry) + '\n')
2723+ self.write((str(entry) + '\n').encode('us-ascii'))
2724 self.truncate()
2725 return entry
2726
2727 def remove_entry(self, entry):
2728 self.seek(0)
2729
2730- lines = self.readlines()
2731+ lines = [l.decode('us-ascii') for l in self.readlines()]
2732
2733 found = False
2734 for index, line in enumerate(lines):
2735@@ -97,7 +99,7 @@
2736 lines.remove(line)
2737
2738 self.seek(0)
2739- self.write(''.join(lines))
2740+ self.write(''.join(lines).encode('us-ascii'))
2741 self.truncate()
2742 return True
2743
2744
2745=== modified file 'hooks/charmhelpers/core/hookenv.py'
2746--- hooks/charmhelpers/core/hookenv.py 2014-10-23 17:30:13 +0000
2747+++ hooks/charmhelpers/core/hookenv.py 2014-12-11 17:56:59 +0000
2748@@ -9,9 +9,14 @@
2749 import yaml
2750 import subprocess
2751 import sys
2752-import UserDict
2753 from subprocess import CalledProcessError
2754
2755+import six
2756+if not six.PY3:
2757+ from UserDict import UserDict
2758+else:
2759+ from collections import UserDict
2760+
2761 CRITICAL = "CRITICAL"
2762 ERROR = "ERROR"
2763 WARNING = "WARNING"
2764@@ -63,16 +68,18 @@
2765 command = ['juju-log']
2766 if level:
2767 command += ['-l', level]
2768+ if not isinstance(message, six.string_types):
2769+ message = repr(message)
2770 command += [message]
2771 subprocess.call(command)
2772
2773
2774-class Serializable(UserDict.IterableUserDict):
2775+class Serializable(UserDict):
2776 """Wrapper, an object that can be serialized to yaml or json"""
2777
2778 def __init__(self, obj):
2779 # wrap the object
2780- UserDict.IterableUserDict.__init__(self)
2781+ UserDict.__init__(self)
2782 self.data = obj
2783
2784 def __getattr__(self, attr):
2785@@ -218,7 +225,7 @@
2786 prev_keys = []
2787 if self._prev_dict is not None:
2788 prev_keys = self._prev_dict.keys()
2789- return list(set(prev_keys + dict.keys(self)))
2790+ return list(set(prev_keys + list(dict.keys(self))))
2791
2792 def load_previous(self, path=None):
2793 """Load previous copy of config from disk.
2794@@ -269,7 +276,7 @@
2795
2796 """
2797 if self._prev_dict:
2798- for k, v in self._prev_dict.iteritems():
2799+ for k, v in six.iteritems(self._prev_dict):
2800 if k not in self:
2801 self[k] = v
2802 with open(self.path, 'w') as f:
2803@@ -284,7 +291,8 @@
2804 config_cmd_line.append(scope)
2805 config_cmd_line.append('--format=json')
2806 try:
2807- config_data = json.loads(subprocess.check_output(config_cmd_line))
2808+ config_data = json.loads(
2809+ subprocess.check_output(config_cmd_line).decode('UTF-8'))
2810 if scope is not None:
2811 return config_data
2812 return Config(config_data)
2813@@ -303,10 +311,10 @@
2814 if unit:
2815 _args.append(unit)
2816 try:
2817- return json.loads(subprocess.check_output(_args))
2818+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2819 except ValueError:
2820 return None
2821- except CalledProcessError, e:
2822+ except CalledProcessError as e:
2823 if e.returncode == 2:
2824 return None
2825 raise
2826@@ -318,7 +326,7 @@
2827 relation_cmd_line = ['relation-set']
2828 if relation_id is not None:
2829 relation_cmd_line.extend(('-r', relation_id))
2830- for k, v in (relation_settings.items() + kwargs.items()):
2831+ for k, v in (list(relation_settings.items()) + list(kwargs.items())):
2832 if v is None:
2833 relation_cmd_line.append('{}='.format(k))
2834 else:
2835@@ -335,7 +343,8 @@
2836 relid_cmd_line = ['relation-ids', '--format=json']
2837 if reltype is not None:
2838 relid_cmd_line.append(reltype)
2839- return json.loads(subprocess.check_output(relid_cmd_line)) or []
2840+ return json.loads(
2841+ subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
2842 return []
2843
2844
2845@@ -346,7 +355,8 @@
2846 units_cmd_line = ['relation-list', '--format=json']
2847 if relid is not None:
2848 units_cmd_line.extend(('-r', relid))
2849- return json.loads(subprocess.check_output(units_cmd_line)) or []
2850+ return json.loads(
2851+ subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
2852
2853
2854 @cached
2855@@ -386,21 +396,31 @@
2856
2857
2858 @cached
2859+def metadata():
2860+ """Get the current charm metadata.yaml contents as a python object"""
2861+ with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
2862+ return yaml.safe_load(md)
2863+
2864+
2865+@cached
2866 def relation_types():
2867 """Get a list of relation types supported by this charm"""
2868- charmdir = os.environ.get('CHARM_DIR', '')
2869- mdf = open(os.path.join(charmdir, 'metadata.yaml'))
2870- md = yaml.safe_load(mdf)
2871 rel_types = []
2872+ md = metadata()
2873 for key in ('provides', 'requires', 'peers'):
2874 section = md.get(key)
2875 if section:
2876 rel_types.extend(section.keys())
2877- mdf.close()
2878 return rel_types
2879
2880
2881 @cached
2882+def charm_name():
2883+ """Get the name of the current charm as is specified on metadata.yaml"""
2884+ return metadata().get('name')
2885+
2886+
2887+@cached
2888 def relations():
2889 """Get a nested dictionary of relation data for all related units"""
2890 rels = {}
2891@@ -455,7 +475,7 @@
2892 """Get the unit ID for the remote unit"""
2893 _args = ['unit-get', '--format=json', attribute]
2894 try:
2895- return json.loads(subprocess.check_output(_args))
2896+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
2897 except ValueError:
2898 return None
2899
2900
2901=== modified file 'hooks/charmhelpers/core/host.py'
2902--- hooks/charmhelpers/core/host.py 2014-10-23 17:30:13 +0000
2903+++ hooks/charmhelpers/core/host.py 2014-12-11 17:56:59 +0000
2904@@ -14,11 +14,12 @@
2905 import subprocess
2906 import hashlib
2907 from contextlib import contextmanager
2908-
2909 from collections import OrderedDict
2910
2911-from hookenv import log
2912-from fstab import Fstab
2913+import six
2914+
2915+from .hookenv import log
2916+from .fstab import Fstab
2917
2918
2919 def service_start(service_name):
2920@@ -54,7 +55,9 @@
2921 def service_running(service):
2922 """Determine whether a system service is running"""
2923 try:
2924- output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
2925+ output = subprocess.check_output(
2926+ ['service', service, 'status'],
2927+ stderr=subprocess.STDOUT).decode('UTF-8')
2928 except subprocess.CalledProcessError:
2929 return False
2930 else:
2931@@ -67,7 +70,9 @@
2932 def service_available(service_name):
2933 """Determine whether a system service is available"""
2934 try:
2935- subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
2936+ subprocess.check_output(
2937+ ['service', service_name, 'status'],
2938+ stderr=subprocess.STDOUT).decode('UTF-8')
2939 except subprocess.CalledProcessError as e:
2940 return 'unrecognized service' not in e.output
2941 else:
2942@@ -96,6 +101,26 @@
2943 return user_info
2944
2945
2946+def add_group(group_name, system_group=False):
2947+ """Add a group to the system"""
2948+ try:
2949+ group_info = grp.getgrnam(group_name)
2950+ log('group {0} already exists!'.format(group_name))
2951+ except KeyError:
2952+ log('creating group {0}'.format(group_name))
2953+ cmd = ['addgroup']
2954+ if system_group:
2955+ cmd.append('--system')
2956+ else:
2957+ cmd.extend([
2958+ '--group',
2959+ ])
2960+ cmd.append(group_name)
2961+ subprocess.check_call(cmd)
2962+ group_info = grp.getgrnam(group_name)
2963+ return group_info
2964+
2965+
2966 def add_user_to_group(username, group):
2967 """Add a user to a group"""
2968 cmd = [
2969@@ -115,7 +140,7 @@
2970 cmd.append(from_path)
2971 cmd.append(to_path)
2972 log(" ".join(cmd))
2973- return subprocess.check_output(cmd).strip()
2974+ return subprocess.check_output(cmd).decode('UTF-8').strip()
2975
2976
2977 def symlink(source, destination):
2978@@ -130,7 +155,7 @@
2979 subprocess.check_call(cmd)
2980
2981
2982-def mkdir(path, owner='root', group='root', perms=0555, force=False):
2983+def mkdir(path, owner='root', group='root', perms=0o555, force=False):
2984 """Create a directory"""
2985 log("Making dir {} {}:{} {:o}".format(path, owner, group,
2986 perms))
2987@@ -146,7 +171,7 @@
2988 os.chown(realpath, uid, gid)
2989
2990
2991-def write_file(path, content, owner='root', group='root', perms=0444):
2992+def write_file(path, content, owner='root', group='root', perms=0o444):
2993 """Create or overwrite a file with the contents of a string"""
2994 log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
2995 uid = pwd.getpwnam(owner).pw_uid
2996@@ -177,7 +202,7 @@
2997 cmd_args.extend([device, mountpoint])
2998 try:
2999 subprocess.check_output(cmd_args)
3000- except subprocess.CalledProcessError, e:
3001+ except subprocess.CalledProcessError as e:
3002 log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
3003 return False
3004
3005@@ -191,7 +216,7 @@
3006 cmd_args = ['umount', mountpoint]
3007 try:
3008 subprocess.check_output(cmd_args)
3009- except subprocess.CalledProcessError, e:
3010+ except subprocess.CalledProcessError as e:
3011 log('Error unmounting {}\n{}'.format(mountpoint, e.output))
3012 return False
3013
3014@@ -218,8 +243,8 @@
3015 """
3016 if os.path.exists(path):
3017 h = getattr(hashlib, hash_type)()
3018- with open(path, 'r') as source:
3019- h.update(source.read()) # IGNORE:E1101 - it does have update
3020+ with open(path, 'rb') as source:
3021+ h.update(source.read())
3022 return h.hexdigest()
3023 else:
3024 return None
3025@@ -297,7 +322,7 @@
3026 if length is None:
3027 length = random.choice(range(35, 45))
3028 alphanumeric_chars = [
3029- l for l in (string.letters + string.digits)
3030+ l for l in (string.ascii_letters + string.digits)
3031 if l not in 'l0QD1vAEIOUaeiou']
3032 random_chars = [
3033 random.choice(alphanumeric_chars) for _ in range(length)]
3034@@ -306,14 +331,14 @@
3035
3036 def list_nics(nic_type):
3037 '''Return a list of nics of given type(s)'''
3038- if isinstance(nic_type, basestring):
3039+ if isinstance(nic_type, six.string_types):
3040 int_types = [nic_type]
3041 else:
3042 int_types = nic_type
3043 interfaces = []
3044 for int_type in int_types:
3045 cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
3046- ip_output = subprocess.check_output(cmd).split('\n')
3047+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3048 ip_output = (line for line in ip_output if line)
3049 for line in ip_output:
3050 if line.split()[1].startswith(int_type):
3051@@ -335,7 +360,7 @@
3052
3053 def get_nic_mtu(nic):
3054 cmd = ['ip', 'addr', 'show', nic]
3055- ip_output = subprocess.check_output(cmd).split('\n')
3056+ ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
3057 mtu = ""
3058 for line in ip_output:
3059 words = line.split()
3060@@ -346,7 +371,7 @@
3061
3062 def get_nic_hwaddr(nic):
3063 cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
3064- ip_output = subprocess.check_output(cmd)
3065+ ip_output = subprocess.check_output(cmd).decode('UTF-8')
3066 hwaddr = ""
3067 words = ip_output.split()
3068 if 'link/ether' in words:
3069@@ -363,8 +388,8 @@
3070
3071 '''
3072 import apt_pkg
3073- from charmhelpers.fetch import apt_cache
3074 if not pkgcache:
3075+ from charmhelpers.fetch import apt_cache
3076 pkgcache = apt_cache()
3077 pkg = pkgcache[package]
3078 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
3079
3080=== modified file 'hooks/charmhelpers/core/services/helpers.py'
3081--- hooks/charmhelpers/core/services/helpers.py 2014-10-23 17:30:13 +0000
3082+++ hooks/charmhelpers/core/services/helpers.py 2014-12-11 17:56:59 +0000
3083@@ -196,7 +196,7 @@
3084 if not os.path.isabs(file_name):
3085 file_name = os.path.join(hookenv.charm_dir(), file_name)
3086 with open(file_name, 'w') as file_stream:
3087- os.fchmod(file_stream.fileno(), 0600)
3088+ os.fchmod(file_stream.fileno(), 0o600)
3089 yaml.dump(config_data, file_stream)
3090
3091 def read_context(self, file_name):
3092@@ -211,15 +211,19 @@
3093
3094 class TemplateCallback(ManagerCallback):
3095 """
3096- Callback class that will render a Jinja2 template, for use as a ready action.
3097-
3098- :param str source: The template source file, relative to `$CHARM_DIR/templates`
3099+ Callback class that will render a Jinja2 template, for use as a ready
3100+ action.
3101+
3102+ :param str source: The template source file, relative to
3103+ `$CHARM_DIR/templates`
3104+
3105 :param str target: The target to write the rendered template to
3106 :param str owner: The owner of the rendered file
3107 :param str group: The group of the rendered file
3108 :param int perms: The permissions of the rendered file
3109 """
3110- def __init__(self, source, target, owner='root', group='root', perms=0444):
3111+ def __init__(self, source, target,
3112+ owner='root', group='root', perms=0o444):
3113 self.source = source
3114 self.target = target
3115 self.owner = owner
3116
3117=== modified file 'hooks/charmhelpers/core/templating.py'
3118--- hooks/charmhelpers/core/templating.py 2014-10-23 17:30:13 +0000
3119+++ hooks/charmhelpers/core/templating.py 2014-12-11 17:56:59 +0000
3120@@ -4,7 +4,8 @@
3121 from charmhelpers.core import hookenv
3122
3123
3124-def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3125+def render(source, target, context, owner='root', group='root',
3126+ perms=0o444, templates_dir=None):
3127 """
3128 Render a template.
3129
3130
3131=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3132--- hooks/charmhelpers/fetch/__init__.py 2014-10-23 17:30:13 +0000
3133+++ hooks/charmhelpers/fetch/__init__.py 2014-12-11 17:56:59 +0000
3134@@ -5,10 +5,6 @@
3135 from charmhelpers.core.host import (
3136 lsb_release
3137 )
3138-from urlparse import (
3139- urlparse,
3140- urlunparse,
3141-)
3142 import subprocess
3143 from charmhelpers.core.hookenv import (
3144 config,
3145@@ -16,6 +12,12 @@
3146 )
3147 import os
3148
3149+import six
3150+if six.PY3:
3151+ from urllib.parse import urlparse, urlunparse
3152+else:
3153+ from urlparse import urlparse, urlunparse
3154+
3155
3156 CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
3157 deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
3158@@ -149,7 +151,7 @@
3159 cmd = ['apt-get', '--assume-yes']
3160 cmd.extend(options)
3161 cmd.append('install')
3162- if isinstance(packages, basestring):
3163+ if isinstance(packages, six.string_types):
3164 cmd.append(packages)
3165 else:
3166 cmd.extend(packages)
3167@@ -182,7 +184,7 @@
3168 def apt_purge(packages, fatal=False):
3169 """Purge one or more packages"""
3170 cmd = ['apt-get', '--assume-yes', 'purge']
3171- if isinstance(packages, basestring):
3172+ if isinstance(packages, six.string_types):
3173 cmd.append(packages)
3174 else:
3175 cmd.extend(packages)
3176@@ -193,7 +195,7 @@
3177 def apt_hold(packages, fatal=False):
3178 """Hold one or more packages"""
3179 cmd = ['apt-mark', 'hold']
3180- if isinstance(packages, basestring):
3181+ if isinstance(packages, six.string_types):
3182 cmd.append(packages)
3183 else:
3184 cmd.extend(packages)
3185@@ -260,7 +262,7 @@
3186
3187 if key:
3188 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
3189- with NamedTemporaryFile() as key_file:
3190+ with NamedTemporaryFile('w+') as key_file:
3191 key_file.write(key)
3192 key_file.flush()
3193 key_file.seek(0)
3194@@ -297,14 +299,14 @@
3195 sources = safe_load((config(sources_var) or '').strip()) or []
3196 keys = safe_load((config(keys_var) or '').strip()) or None
3197
3198- if isinstance(sources, basestring):
3199+ if isinstance(sources, six.string_types):
3200 sources = [sources]
3201
3202 if keys is None:
3203 for source in sources:
3204 add_source(source, None)
3205 else:
3206- if isinstance(keys, basestring):
3207+ if isinstance(keys, six.string_types):
3208 keys = [keys]
3209
3210 if len(sources) != len(keys):
3211@@ -401,7 +403,7 @@
3212 while result is None or result == APT_NO_LOCK:
3213 try:
3214 result = subprocess.check_call(cmd, env=env)
3215- except subprocess.CalledProcessError, e:
3216+ except subprocess.CalledProcessError as e:
3217 retry_count = retry_count + 1
3218 if retry_count > APT_NO_LOCK_RETRY_COUNT:
3219 raise
3220
3221=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3222--- hooks/charmhelpers/fetch/archiveurl.py 2014-10-23 17:30:13 +0000
3223+++ hooks/charmhelpers/fetch/archiveurl.py 2014-12-11 17:56:59 +0000
3224@@ -1,8 +1,23 @@
3225 import os
3226-import urllib2
3227-from urllib import urlretrieve
3228-import urlparse
3229 import hashlib
3230+import re
3231+
3232+import six
3233+if six.PY3:
3234+ from urllib.request import (
3235+ build_opener, install_opener, urlopen, urlretrieve,
3236+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
3237+ )
3238+ from urllib.parse import urlparse, urlunparse, parse_qs
3239+ from urllib.error import URLError
3240+else:
3241+ from urllib import urlretrieve
3242+ from urllib2 import (
3243+ build_opener, install_opener, urlopen,
3244+ HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
3245+ URLError
3246+ )
3247+ from urlparse import urlparse, urlunparse, parse_qs
3248
3249 from charmhelpers.fetch import (
3250 BaseFetchHandler,
3251@@ -15,6 +30,24 @@
3252 from charmhelpers.core.host import mkdir, check_hash
3253
3254
3255+def splituser(host):
3256+ '''urllib.splituser(), but six's support of this seems broken'''
3257+ _userprog = re.compile('^(.*)@(.*)$')
3258+ match = _userprog.match(host)
3259+ if match:
3260+ return match.group(1, 2)
3261+ return None, host
3262+
3263+
3264+def splitpasswd(user):
3265+ '''urllib.splitpasswd(), but six's support of this is missing'''
3266+ _passwdprog = re.compile('^([^:]*):(.*)$', re.S)
3267+ match = _passwdprog.match(user)
3268+ if match:
3269+ return match.group(1, 2)
3270+ return user, None
3271+
3272+
3273 class ArchiveUrlFetchHandler(BaseFetchHandler):
3274 """
3275 Handler to download archive files from arbitrary URLs.
3276@@ -42,20 +75,20 @@
3277 """
3278 # propogate all exceptions
3279 # URLError, OSError, etc
3280- proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
3281+ proto, netloc, path, params, query, fragment = urlparse(source)
3282 if proto in ('http', 'https'):
3283- auth, barehost = urllib2.splituser(netloc)
3284+ auth, barehost = splituser(netloc)
3285 if auth is not None:
3286- source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
3287- username, password = urllib2.splitpasswd(auth)
3288- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
3289+ source = urlunparse((proto, barehost, path, params, query, fragment))
3290+ username, password = splitpasswd(auth)
3291+ passman = HTTPPasswordMgrWithDefaultRealm()
3292 # Realm is set to None in add_password to force the username and password
3293 # to be used whatever the realm
3294 passman.add_password(None, source, username, password)
3295- authhandler = urllib2.HTTPBasicAuthHandler(passman)
3296- opener = urllib2.build_opener(authhandler)
3297- urllib2.install_opener(opener)
3298- response = urllib2.urlopen(source)
3299+ authhandler = HTTPBasicAuthHandler(passman)
3300+ opener = build_opener(authhandler)
3301+ install_opener(opener)
3302+ response = urlopen(source)
3303 try:
3304 with open(dest, 'w') as dest_file:
3305 dest_file.write(response.read())
3306@@ -91,17 +124,21 @@
3307 url_parts = self.parse_url(source)
3308 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3309 if not os.path.exists(dest_dir):
3310- mkdir(dest_dir, perms=0755)
3311+ mkdir(dest_dir, perms=0o755)
3312 dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
3313 try:
3314 self.download(source, dld_file)
3315- except urllib2.URLError as e:
3316+ except URLError as e:
3317 raise UnhandledSource(e.reason)
3318 except OSError as e:
3319 raise UnhandledSource(e.strerror)
3320- options = urlparse.parse_qs(url_parts.fragment)
3321+ options = parse_qs(url_parts.fragment)
3322 for key, value in options.items():
3323- if key in hashlib.algorithms:
3324+ if not six.PY3:
3325+ algorithms = hashlib.algorithms
3326+ else:
3327+ algorithms = hashlib.algorithms_available
3328+ if key in algorithms:
3329 check_hash(dld_file, value, key)
3330 if checksum:
3331 check_hash(dld_file, checksum, hash_type)
3332
3333=== modified file 'hooks/charmhelpers/fetch/bzrurl.py'
3334--- hooks/charmhelpers/fetch/bzrurl.py 2014-06-10 12:45:45 +0000
3335+++ hooks/charmhelpers/fetch/bzrurl.py 2014-12-11 17:56:59 +0000
3336@@ -5,6 +5,10 @@
3337 )
3338 from charmhelpers.core.host import mkdir
3339
3340+import six
3341+if six.PY3:
3342+ raise ImportError('bzrlib does not support Python3')
3343+
3344 try:
3345 from bzrlib.branch import Branch
3346 except ImportError:
3347@@ -42,7 +46,7 @@
3348 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3349 branch_name)
3350 if not os.path.exists(dest_dir):
3351- mkdir(dest_dir, perms=0755)
3352+ mkdir(dest_dir, perms=0o755)
3353 try:
3354 self.branch(source, dest_dir)
3355 except OSError as e:
3356
3357=== modified file 'hooks/charmhelpers/fetch/giturl.py'
3358--- hooks/charmhelpers/fetch/giturl.py 2014-10-23 17:30:13 +0000
3359+++ hooks/charmhelpers/fetch/giturl.py 2014-12-11 17:56:59 +0000
3360@@ -5,6 +5,10 @@
3361 )
3362 from charmhelpers.core.host import mkdir
3363
3364+import six
3365+if six.PY3:
3366+ raise ImportError('GitPython does not support Python 3')
3367+
3368 try:
3369 from git import Repo
3370 except ImportError:
3371@@ -17,7 +21,7 @@
3372 """Handler for git branches via generic and github URLs"""
3373 def can_handle(self, source):
3374 url_parts = self.parse_url(source)
3375- #TODO (mattyw) no support for ssh git@ yet
3376+ # TODO (mattyw) no support for ssh git@ yet
3377 if url_parts.scheme not in ('http', 'https', 'git'):
3378 return False
3379 else:
3380@@ -30,13 +34,16 @@
3381 repo = Repo.clone_from(source, dest)
3382 repo.git.checkout(branch)
3383
3384- def install(self, source, branch="master"):
3385+ def install(self, source, branch="master", dest=None):
3386 url_parts = self.parse_url(source)
3387 branch_name = url_parts.path.strip("/").split("/")[-1]
3388- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3389- branch_name)
3390+ if dest:
3391+ dest_dir = os.path.join(dest, branch_name)
3392+ else:
3393+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3394+ branch_name)
3395 if not os.path.exists(dest_dir):
3396- mkdir(dest_dir, perms=0755)
3397+ mkdir(dest_dir, perms=0o755)
3398 try:
3399 self.clone(source, dest_dir, branch)
3400 except OSError as e:

Subscribers

People subscribed via source and target branches