Merge lp:~james-page/charms/trusty/rabbitmq-server/fixup-resync into lp:charms/trusty/rabbitmq-server

Proposed by James Page
Status: Merged
Merged at revision: 64
Proposed branch: lp:~james-page/charms/trusty/rabbitmq-server/fixup-resync
Merge into: lp:charms/trusty/rabbitmq-server
Diff against target: 3461 lines (+675/-1233)
20 files modified
charm-helpers.yaml (+1/-0)
hooks/charmhelpers/contrib/network/ip.py (+347/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+0/-94)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+0/-276)
hooks/charmhelpers/contrib/openstack/context.py (+166/-49)
hooks/charmhelpers/contrib/openstack/ip.py (+0/-79)
hooks/charmhelpers/contrib/openstack/utils.py (+52/-1)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+4/-3)
hooks/charmhelpers/core/fstab.py (+0/-116)
hooks/charmhelpers/core/hookenv.py (+6/-0)
hooks/charmhelpers/core/host.py (+8/-2)
hooks/charmhelpers/core/services/__init__.py (+0/-2)
hooks/charmhelpers/core/services/base.py (+0/-313)
hooks/charmhelpers/core/services/helpers.py (+0/-239)
hooks/charmhelpers/core/sysctl.py (+34/-0)
hooks/charmhelpers/core/templating.py (+0/-51)
hooks/charmhelpers/fetch/__init__.py (+5/-1)
hooks/charmhelpers/fetch/giturl.py (+44/-0)
hooks/rabbit_utils.py (+3/-2)
hooks/test_rabbitmq_server_relations.py (+5/-5)
To merge this branch: bzr merge lp:~james-page/charms/trusty/rabbitmq-server/fixup-resync
Reviewer Review Type Date Requested Status
Review Queue (community) automated testing Needs Fixing
Juan L. Negron (community) Approve
Review via email: mp+239998@code.launchpad.net

Description of the change

Resync with precise changes

To post a comment you must log in.
Revision history for this message
Juan L. Negron (negronjl) wrote :

Testing now.

-Juan

65. By James Page

Fixup tests for merge

Revision history for this message
Ryan Beisner (1chb1n) wrote :

UOSCI bot says:
charm_lint_check #815 trusty-rabbitmq-server for james-page mp239998
    LINT OK: believed to pass, but you should confirm results

LINT Results (max last 4 lines) from
/var/lib/jenkins/workspace/charm_lint_check/make-lint.815:
I: config.yaml: option key has no default value
I: config.yaml: option ssl_cert has no default value
I: config.yaml: option ssl_ca has no default value
I: config.yaml: option source has no default value

Full lint output: http://paste.ubuntu.com/8733779/
Build: http://10.98.191.181:8080/job/charm_lint_check/815/

Revision history for this message
Ryan Beisner (1chb1n) wrote :

UOSCI bot says:
charm_unit_test #623 trusty-rabbitmq-server for james-page mp239998
    UNIT OK: believed to pass, but you should confirm results

UNIT Results (max last 4 lines) from
/var/lib/jenkins/workspace/charm_unit_test/unit-test.623:
----------------------------------------------------------------------
Ran 1 test in 0.134s

OK

Full unit output: http://paste.ubuntu.com/8733780/
Build: http://10.98.191.181:8080/job/charm_unit_test/623/

Revision history for this message
Juan L. Negron (negronjl) wrote :

This works for me.

-Juan

review: Approve
Revision history for this message
Ryan Beisner (1chb1n) wrote :
Revision history for this message
Review Queue (review-queue) wrote :

This items has failed automated testing! Results available here http://reports.vapour.ws/charm-tests/charm-bundle-test-1361-results

review: Needs Fixing (automated testing)

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers.yaml'
2--- charm-helpers.yaml 2014-03-28 10:22:08 +0000
3+++ charm-helpers.yaml 2014-10-29 14:25:04 +0000
4@@ -9,3 +9,4 @@
5 - contrib.peerstorage
6 - contrib.ssl
7 - contrib.hahelpers.cluster
8+ - contrib.network.ip
9
10=== added directory 'hooks/charmhelpers/contrib/network'
11=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
12=== added file 'hooks/charmhelpers/contrib/network/ip.py'
13--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
14+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-29 14:25:04 +0000
15@@ -0,0 +1,347 @@
16+import glob
17+import re
18+import subprocess
19+import sys
20+
21+from functools import partial
22+
23+from charmhelpers.core.hookenv import unit_get
24+from charmhelpers.fetch import apt_install
25+from charmhelpers.core.hookenv import (
26+ ERROR,
27+ log
28+)
29+
30+try:
31+ import netifaces
32+except ImportError:
33+ apt_install('python-netifaces')
34+ import netifaces
35+
36+try:
37+ import netaddr
38+except ImportError:
39+ apt_install('python-netaddr')
40+ import netaddr
41+
42+
43+def _validate_cidr(network):
44+ try:
45+ netaddr.IPNetwork(network)
46+ except (netaddr.core.AddrFormatError, ValueError):
47+ raise ValueError("Network (%s) is not in CIDR presentation format" %
48+ network)
49+
50+
51+def get_address_in_network(network, fallback=None, fatal=False):
52+ """
53+ Get an IPv4 or IPv6 address within the network from the host.
54+
55+ :param network (str): CIDR presentation format. For example,
56+ '192.168.1.0/24'.
57+ :param fallback (str): If no address is found, return fallback.
58+ :param fatal (boolean): If no address is found, fallback is not
59+ set and fatal is True then exit(1).
60+
61+ """
62+
63+ def not_found_error_out():
64+ log("No IP address found in network: %s" % network,
65+ level=ERROR)
66+ sys.exit(1)
67+
68+ if network is None:
69+ if fallback is not None:
70+ return fallback
71+ else:
72+ if fatal:
73+ not_found_error_out()
74+ else:
75+ return None
76+
77+ _validate_cidr(network)
78+ network = netaddr.IPNetwork(network)
79+ for iface in netifaces.interfaces():
80+ addresses = netifaces.ifaddresses(iface)
81+ if network.version == 4 and netifaces.AF_INET in addresses:
82+ addr = addresses[netifaces.AF_INET][0]['addr']
83+ netmask = addresses[netifaces.AF_INET][0]['netmask']
84+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
85+ if cidr in network:
86+ return str(cidr.ip)
87+ if network.version == 6 and netifaces.AF_INET6 in addresses:
88+ for addr in addresses[netifaces.AF_INET6]:
89+ if not addr['addr'].startswith('fe80'):
90+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
91+ addr['netmask']))
92+ if cidr in network:
93+ return str(cidr.ip)
94+
95+ if fallback is not None:
96+ return fallback
97+
98+ if fatal:
99+ not_found_error_out()
100+
101+ return None
102+
103+
104+def is_ipv6(address):
105+ '''Determine whether provided address is IPv6 or not'''
106+ try:
107+ address = netaddr.IPAddress(address)
108+ except netaddr.AddrFormatError:
109+ # probably a hostname - so not an address at all!
110+ return False
111+ else:
112+ return address.version == 6
113+
114+
115+def is_address_in_network(network, address):
116+ """
117+ Determine whether the provided address is within a network range.
118+
119+ :param network (str): CIDR presentation format. For example,
120+ '192.168.1.0/24'.
121+ :param address: An individual IPv4 or IPv6 address without a net
122+ mask or subnet prefix. For example, '192.168.1.1'.
123+ :returns boolean: Flag indicating whether address is in network.
124+ """
125+ try:
126+ network = netaddr.IPNetwork(network)
127+ except (netaddr.core.AddrFormatError, ValueError):
128+ raise ValueError("Network (%s) is not in CIDR presentation format" %
129+ network)
130+ try:
131+ address = netaddr.IPAddress(address)
132+ except (netaddr.core.AddrFormatError, ValueError):
133+ raise ValueError("Address (%s) is not in correct presentation format" %
134+ address)
135+ if address in network:
136+ return True
137+ else:
138+ return False
139+
140+
141+def _get_for_address(address, key):
142+ """Retrieve an attribute of or the physical interface that
143+ the IP address provided could be bound to.
144+
145+ :param address (str): An individual IPv4 or IPv6 address without a net
146+ mask or subnet prefix. For example, '192.168.1.1'.
147+ :param key: 'iface' for the physical interface name or an attribute
148+ of the configured interface, for example 'netmask'.
149+ :returns str: Requested attribute or None if address is not bindable.
150+ """
151+ address = netaddr.IPAddress(address)
152+ for iface in netifaces.interfaces():
153+ addresses = netifaces.ifaddresses(iface)
154+ if address.version == 4 and netifaces.AF_INET in addresses:
155+ addr = addresses[netifaces.AF_INET][0]['addr']
156+ netmask = addresses[netifaces.AF_INET][0]['netmask']
157+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
158+ cidr = network.cidr
159+ if address in cidr:
160+ if key == 'iface':
161+ return iface
162+ else:
163+ return addresses[netifaces.AF_INET][0][key]
164+ if address.version == 6 and netifaces.AF_INET6 in addresses:
165+ for addr in addresses[netifaces.AF_INET6]:
166+ if not addr['addr'].startswith('fe80'):
167+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
168+ addr['netmask']))
169+ cidr = network.cidr
170+ if address in cidr:
171+ if key == 'iface':
172+ return iface
173+ elif key == 'netmask' and cidr:
174+ return str(cidr).split('/')[1]
175+ else:
176+ return addr[key]
177+ return None
178+
179+
180+get_iface_for_address = partial(_get_for_address, key='iface')
181+
182+get_netmask_for_address = partial(_get_for_address, key='netmask')
183+
184+
185+def format_ipv6_addr(address):
186+ """
187+ IPv6 needs to be wrapped with [] in url link to parse correctly.
188+ """
189+ if is_ipv6(address):
190+ address = "[%s]" % address
191+ else:
192+ address = None
193+
194+ return address
195+
196+
197+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
198+ fatal=True, exc_list=None):
199+ """
200+ Return the assigned IP address for a given interface, if any, or [].
201+ """
202+ # Extract nic if passed /dev/ethX
203+ if '/' in iface:
204+ iface = iface.split('/')[-1]
205+ if not exc_list:
206+ exc_list = []
207+ try:
208+ inet_num = getattr(netifaces, inet_type)
209+ except AttributeError:
210+ raise Exception('Unknown inet type ' + str(inet_type))
211+
212+ interfaces = netifaces.interfaces()
213+ if inc_aliases:
214+ ifaces = []
215+ for _iface in interfaces:
216+ if iface == _iface or _iface.split(':')[0] == iface:
217+ ifaces.append(_iface)
218+ if fatal and not ifaces:
219+ raise Exception("Invalid interface '%s'" % iface)
220+ ifaces.sort()
221+ else:
222+ if iface not in interfaces:
223+ if fatal:
224+ raise Exception("%s not found " % (iface))
225+ else:
226+ return []
227+ else:
228+ ifaces = [iface]
229+
230+ addresses = []
231+ for netiface in ifaces:
232+ net_info = netifaces.ifaddresses(netiface)
233+ if inet_num in net_info:
234+ for entry in net_info[inet_num]:
235+ if 'addr' in entry and entry['addr'] not in exc_list:
236+ addresses.append(entry['addr'])
237+ if fatal and not addresses:
238+ raise Exception("Interface '%s' doesn't have any %s addresses." %
239+ (iface, inet_type))
240+ return addresses
241+
242+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
243+
244+
245+def get_iface_from_addr(addr):
246+ """Work out on which interface the provided address is configured."""
247+ for iface in netifaces.interfaces():
248+ addresses = netifaces.ifaddresses(iface)
249+ for inet_type in addresses:
250+ for _addr in addresses[inet_type]:
251+ _addr = _addr['addr']
252+ # link local
253+ ll_key = re.compile("(.+)%.*")
254+ raw = re.match(ll_key, _addr)
255+ if raw:
256+ _addr = raw.group(1)
257+ if _addr == addr:
258+ log("Address '%s' is configured on iface '%s'" %
259+ (addr, iface))
260+ return iface
261+
262+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
263+ raise Exception(msg)
264+
265+
266+def sniff_iface(f):
267+ """If no iface provided, inject net iface inferred from unit private
268+ address.
269+ """
270+ def iface_sniffer(*args, **kwargs):
271+ if not kwargs.get('iface', None):
272+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
273+
274+ return f(*args, **kwargs)
275+
276+ return iface_sniffer
277+
278+
279+@sniff_iface
280+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
281+ dynamic_only=True):
282+ """Get assigned IPv6 address for a given interface.
283+
284+ Returns list of addresses found. If no address found, returns empty list.
285+
286+ If iface is None, we infer the current primary interface by doing a reverse
287+ lookup on the unit private-address.
288+
289+ We currently only support scope global IPv6 addresses i.e. non-temporary
290+ addresses. If no global IPv6 address is found, return the first one found
291+ in the ipv6 address list.
292+ """
293+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
294+ inc_aliases=inc_aliases, fatal=fatal,
295+ exc_list=exc_list)
296+
297+ if addresses:
298+ global_addrs = []
299+ for addr in addresses:
300+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
301+ m = re.match(key_scope_link_local, addr)
302+ if m:
303+ eui_64_mac = m.group(1)
304+ iface = m.group(2)
305+ else:
306+ global_addrs.append(addr)
307+
308+ if global_addrs:
309+ # Make sure any found global addresses are not temporary
310+ cmd = ['ip', 'addr', 'show', iface]
311+ out = subprocess.check_output(cmd)
312+ if dynamic_only:
313+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
314+ else:
315+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
316+
317+ addrs = []
318+ for line in out.split('\n'):
319+ line = line.strip()
320+ m = re.match(key, line)
321+ if m and 'temporary' not in line:
322+ # Return the first valid address we find
323+ for addr in global_addrs:
324+ if m.group(1) == addr:
325+ if not dynamic_only or \
326+ m.group(1).endswith(eui_64_mac):
327+ addrs.append(addr)
328+
329+ if addrs:
330+ return addrs
331+
332+ if fatal:
333+ raise Exception("Interface '%s' doesn't have a scope global "
334+ "non-temporary ipv6 address." % iface)
335+
336+ return []
337+
338+
339+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
340+ """
341+ Return a list of bridges on the system or []
342+ """
343+ b_rgex = vnic_dir + '/*/bridge'
344+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
345+
346+
347+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
348+ """
349+ Return a list of nics comprising a given bridge on the system or []
350+ """
351+ brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
352+ return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
353+
354+
355+def is_bridge_member(nic):
356+ """
357+ Check if a given nic is a member of a bridge
358+ """
359+ for bridge in get_bridges():
360+ if nic in get_bridge_nics(bridge):
361+ return True
362+ return False
363
364=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
365=== removed directory 'hooks/charmhelpers/contrib/openstack/amulet'
366=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
367=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
368=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
369--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
370+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-29 14:25:04 +0000
371@@ -0,0 +1,91 @@
372+from charmhelpers.contrib.amulet.deployment import (
373+ AmuletDeployment
374+)
375+
376+
377+class OpenStackAmuletDeployment(AmuletDeployment):
378+ """OpenStack amulet deployment.
379+
380+ This class inherits from AmuletDeployment and has additional support
381+ that is specifically for use by OpenStack charms.
382+ """
383+
384+ def __init__(self, series=None, openstack=None, source=None, stable=True):
385+ """Initialize the deployment environment."""
386+ super(OpenStackAmuletDeployment, self).__init__(series)
387+ self.openstack = openstack
388+ self.source = source
389+ self.stable = stable
390+ # Note(coreycb): this needs to be changed when new next branches come
391+ # out.
392+ self.current_next = "trusty"
393+
394+ def _determine_branch_locations(self, other_services):
395+ """Determine the branch locations for the other services.
396+
397+ Determine if the local branch being tested is derived from its
398+ stable or next (dev) branch, and based on this, use the corresonding
399+ stable or next branches for the other_services."""
400+ base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
401+
402+ if self.stable:
403+ for svc in other_services:
404+ temp = 'lp:charms/{}'
405+ svc['location'] = temp.format(svc['name'])
406+ else:
407+ for svc in other_services:
408+ if svc['name'] in base_charms:
409+ temp = 'lp:charms/{}'
410+ svc['location'] = temp.format(svc['name'])
411+ else:
412+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
413+ svc['location'] = temp.format(self.current_next,
414+ svc['name'])
415+ return other_services
416+
417+ def _add_services(self, this_service, other_services):
418+ """Add services to the deployment and set openstack-origin/source."""
419+ other_services = self._determine_branch_locations(other_services)
420+
421+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
422+ other_services)
423+
424+ services = other_services
425+ services.append(this_service)
426+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
427+ 'ceph-osd', 'ceph-radosgw']
428+
429+ if self.openstack:
430+ for svc in services:
431+ if svc['name'] not in use_source:
432+ config = {'openstack-origin': self.openstack}
433+ self.d.configure(svc['name'], config)
434+
435+ if self.source:
436+ for svc in services:
437+ if svc['name'] in use_source:
438+ config = {'source': self.source}
439+ self.d.configure(svc['name'], config)
440+
441+ def _configure_services(self, configs):
442+ """Configure all of the services."""
443+ for service, config in configs.iteritems():
444+ self.d.configure(service, config)
445+
446+ def _get_openstack_release(self):
447+ """Get openstack release.
448+
449+ Return an integer representing the enum value of the openstack
450+ release.
451+ """
452+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
453+ self.precise_havana, self.precise_icehouse,
454+ self.trusty_icehouse) = range(6)
455+ releases = {
456+ ('precise', None): self.precise_essex,
457+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
458+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
459+ ('precise', 'cloud:precise-havana'): self.precise_havana,
460+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
461+ ('trusty', None): self.trusty_icehouse}
462+ return releases[(self.series, self.openstack)]
463
464=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
465--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-26 08:06:25 +0000
466+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
467@@ -1,94 +0,0 @@
468-from bzrlib.branch import Branch
469-import os
470-import re
471-from charmhelpers.contrib.amulet.deployment import (
472- AmuletDeployment
473-)
474-
475-
476-class OpenStackAmuletDeployment(AmuletDeployment):
477- """OpenStack amulet deployment.
478-
479- This class inherits from AmuletDeployment and has additional support
480- that is specifically for use by OpenStack charms.
481- """
482-
483- def __init__(self, series=None, openstack=None, source=None):
484- """Initialize the deployment environment."""
485- super(OpenStackAmuletDeployment, self).__init__(series)
486- self.openstack = openstack
487- self.source = source
488-
489- def _is_dev_branch(self):
490- """Determine if branch being tested is a dev (i.e. next) branch."""
491- branch = Branch.open(os.getcwd())
492- parent = branch.get_parent()
493- pattern = re.compile("^.*/next/$")
494- if (pattern.match(parent)):
495- return True
496- else:
497- return False
498-
499- def _determine_branch_locations(self, other_services):
500- """Determine the branch locations for the other services.
501-
502- If the branch being tested is a dev branch, then determine the
503- development branch locations for the other services. Otherwise,
504- the default charm store branches will be used."""
505- name = 0
506- if self._is_dev_branch():
507- updated_services = []
508- for svc in other_services:
509- if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
510- location = 'lp:charms/{}'.format(svc[name])
511- else:
512- temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
513- location = temp.format(svc[name])
514- updated_services.append(svc + (location,))
515- other_services = updated_services
516- return other_services
517-
518- def _add_services(self, this_service, other_services):
519- """Add services to the deployment and set openstack-origin/source."""
520- name = 0
521- other_services = self._determine_branch_locations(other_services)
522- super(OpenStackAmuletDeployment, self)._add_services(this_service,
523- other_services)
524- services = other_services
525- services.append(this_service)
526- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
527-
528- if self.openstack:
529- for svc in services:
530- if svc[name] not in use_source:
531- config = {'openstack-origin': self.openstack}
532- self.d.configure(svc[name], config)
533-
534- if self.source:
535- for svc in services:
536- if svc[name] in use_source:
537- config = {'source': self.source}
538- self.d.configure(svc[name], config)
539-
540- def _configure_services(self, configs):
541- """Configure all of the services."""
542- for service, config in configs.iteritems():
543- self.d.configure(service, config)
544-
545- def _get_openstack_release(self):
546- """Get openstack release.
547-
548- Return an integer representing the enum value of the openstack
549- release.
550- """
551- (self.precise_essex, self.precise_folsom, self.precise_grizzly,
552- self.precise_havana, self.precise_icehouse,
553- self.trusty_icehouse) = range(6)
554- releases = {
555- ('precise', None): self.precise_essex,
556- ('precise', 'cloud:precise-folsom'): self.precise_folsom,
557- ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
558- ('precise', 'cloud:precise-havana'): self.precise_havana,
559- ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
560- ('trusty', None): self.trusty_icehouse}
561- return releases[(self.series, self.openstack)]
562
563=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
564--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
565+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-29 14:25:04 +0000
566@@ -0,0 +1,276 @@
567+import logging
568+import os
569+import time
570+import urllib
571+
572+import glanceclient.v1.client as glance_client
573+import keystoneclient.v2_0 as keystone_client
574+import novaclient.v1_1.client as nova_client
575+
576+from charmhelpers.contrib.amulet.utils import (
577+ AmuletUtils
578+)
579+
580+DEBUG = logging.DEBUG
581+ERROR = logging.ERROR
582+
583+
584+class OpenStackAmuletUtils(AmuletUtils):
585+ """OpenStack amulet utilities.
586+
587+ This class inherits from AmuletUtils and has additional support
588+ that is specifically for use by OpenStack charms.
589+ """
590+
591+ def __init__(self, log_level=ERROR):
592+ """Initialize the deployment environment."""
593+ super(OpenStackAmuletUtils, self).__init__(log_level)
594+
595+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
596+ public_port, expected):
597+ """Validate endpoint data.
598+
599+ Validate actual endpoint data vs expected endpoint data. The ports
600+ are used to find the matching endpoint.
601+ """
602+ found = False
603+ for ep in endpoints:
604+ self.log.debug('endpoint: {}'.format(repr(ep)))
605+ if (admin_port in ep.adminurl and
606+ internal_port in ep.internalurl and
607+ public_port in ep.publicurl):
608+ found = True
609+ actual = {'id': ep.id,
610+ 'region': ep.region,
611+ 'adminurl': ep.adminurl,
612+ 'internalurl': ep.internalurl,
613+ 'publicurl': ep.publicurl,
614+ 'service_id': ep.service_id}
615+ ret = self._validate_dict_data(expected, actual)
616+ if ret:
617+ return 'unexpected endpoint data - {}'.format(ret)
618+
619+ if not found:
620+ return 'endpoint not found'
621+
622+ def validate_svc_catalog_endpoint_data(self, expected, actual):
623+ """Validate service catalog endpoint data.
624+
625+ Validate a list of actual service catalog endpoints vs a list of
626+ expected service catalog endpoints.
627+ """
628+ self.log.debug('actual: {}'.format(repr(actual)))
629+ for k, v in expected.iteritems():
630+ if k in actual:
631+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
632+ if ret:
633+ return self.endpoint_error(k, ret)
634+ else:
635+ return "endpoint {} does not exist".format(k)
636+ return ret
637+
638+ def validate_tenant_data(self, expected, actual):
639+ """Validate tenant data.
640+
641+ Validate a list of actual tenant data vs list of expected tenant
642+ data.
643+ """
644+ self.log.debug('actual: {}'.format(repr(actual)))
645+ for e in expected:
646+ found = False
647+ for act in actual:
648+ a = {'enabled': act.enabled, 'description': act.description,
649+ 'name': act.name, 'id': act.id}
650+ if e['name'] == a['name']:
651+ found = True
652+ ret = self._validate_dict_data(e, a)
653+ if ret:
654+ return "unexpected tenant data - {}".format(ret)
655+ if not found:
656+ return "tenant {} does not exist".format(e['name'])
657+ return ret
658+
659+ def validate_role_data(self, expected, actual):
660+ """Validate role data.
661+
662+ Validate a list of actual role data vs a list of expected role
663+ data.
664+ """
665+ self.log.debug('actual: {}'.format(repr(actual)))
666+ for e in expected:
667+ found = False
668+ for act in actual:
669+ a = {'name': act.name, 'id': act.id}
670+ if e['name'] == a['name']:
671+ found = True
672+ ret = self._validate_dict_data(e, a)
673+ if ret:
674+ return "unexpected role data - {}".format(ret)
675+ if not found:
676+ return "role {} does not exist".format(e['name'])
677+ return ret
678+
679+ def validate_user_data(self, expected, actual):
680+ """Validate user data.
681+
682+ Validate a list of actual user data vs a list of expected user
683+ data.
684+ """
685+ self.log.debug('actual: {}'.format(repr(actual)))
686+ for e in expected:
687+ found = False
688+ for act in actual:
689+ a = {'enabled': act.enabled, 'name': act.name,
690+ 'email': act.email, 'tenantId': act.tenantId,
691+ 'id': act.id}
692+ if e['name'] == a['name']:
693+ found = True
694+ ret = self._validate_dict_data(e, a)
695+ if ret:
696+ return "unexpected user data - {}".format(ret)
697+ if not found:
698+ return "user {} does not exist".format(e['name'])
699+ return ret
700+
701+ def validate_flavor_data(self, expected, actual):
702+ """Validate flavor data.
703+
704+ Validate a list of actual flavors vs a list of expected flavors.
705+ """
706+ self.log.debug('actual: {}'.format(repr(actual)))
707+ act = [a.name for a in actual]
708+ return self._validate_list_data(expected, act)
709+
710+ def tenant_exists(self, keystone, tenant):
711+ """Return True if tenant exists."""
712+ return tenant in [t.name for t in keystone.tenants.list()]
713+
714+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
715+ tenant):
716+ """Authenticates admin user with the keystone admin endpoint."""
717+ unit = keystone_sentry
718+ service_ip = unit.relation('shared-db',
719+ 'mysql:shared-db')['private-address']
720+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
721+ return keystone_client.Client(username=user, password=password,
722+ tenant_name=tenant, auth_url=ep)
723+
724+ def authenticate_keystone_user(self, keystone, user, password, tenant):
725+ """Authenticates a regular user with the keystone public endpoint."""
726+ ep = keystone.service_catalog.url_for(service_type='identity',
727+ endpoint_type='publicURL')
728+ return keystone_client.Client(username=user, password=password,
729+ tenant_name=tenant, auth_url=ep)
730+
731+ def authenticate_glance_admin(self, keystone):
732+ """Authenticates admin user with glance."""
733+ ep = keystone.service_catalog.url_for(service_type='image',
734+ endpoint_type='adminURL')
735+ return glance_client.Client(ep, token=keystone.auth_token)
736+
737+ def authenticate_nova_user(self, keystone, user, password, tenant):
738+ """Authenticates a regular user with nova-api."""
739+ ep = keystone.service_catalog.url_for(service_type='identity',
740+ endpoint_type='publicURL')
741+ return nova_client.Client(username=user, api_key=password,
742+ project_id=tenant, auth_url=ep)
743+
744+ def create_cirros_image(self, glance, image_name):
745+ """Download the latest cirros image and upload it to glance."""
746+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
747+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
748+ if http_proxy:
749+ proxies = {'http': http_proxy}
750+ opener = urllib.FancyURLopener(proxies)
751+ else:
752+ opener = urllib.FancyURLopener()
753+
754+ f = opener.open("http://download.cirros-cloud.net/version/released")
755+ version = f.read().strip()
756+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
757+ local_path = os.path.join('tests', cirros_img)
758+
759+ if not os.path.exists(local_path):
760+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
761+ version, cirros_img)
762+ opener.retrieve(cirros_url, local_path)
763+ f.close()
764+
765+ with open(local_path) as f:
766+ image = glance.images.create(name=image_name, is_public=True,
767+ disk_format='qcow2',
768+ container_format='bare', data=f)
769+ count = 1
770+ status = image.status
771+ while status != 'active' and count < 10:
772+ time.sleep(3)
773+ image = glance.images.get(image.id)
774+ status = image.status
775+ self.log.debug('image status: {}'.format(status))
776+ count += 1
777+
778+ if status != 'active':
779+ self.log.error('image creation timed out')
780+ return None
781+
782+ return image
783+
784+ def delete_image(self, glance, image):
785+ """Delete the specified image."""
786+ num_before = len(list(glance.images.list()))
787+ glance.images.delete(image)
788+
789+ count = 1
790+ num_after = len(list(glance.images.list()))
791+ while num_after != (num_before - 1) and count < 10:
792+ time.sleep(3)
793+ num_after = len(list(glance.images.list()))
794+ self.log.debug('number of images: {}'.format(num_after))
795+ count += 1
796+
797+ if num_after != (num_before - 1):
798+ self.log.error('image deletion timed out')
799+ return False
800+
801+ return True
802+
803+ def create_instance(self, nova, image_name, instance_name, flavor):
804+ """Create the specified instance."""
805+ image = nova.images.find(name=image_name)
806+ flavor = nova.flavors.find(name=flavor)
807+ instance = nova.servers.create(name=instance_name, image=image,
808+ flavor=flavor)
809+
810+ count = 1
811+ status = instance.status
812+ while status != 'ACTIVE' and count < 60:
813+ time.sleep(3)
814+ instance = nova.servers.get(instance.id)
815+ status = instance.status
816+ self.log.debug('instance status: {}'.format(status))
817+ count += 1
818+
819+ if status != 'ACTIVE':
820+ self.log.error('instance creation timed out')
821+ return None
822+
823+ return instance
824+
825+ def delete_instance(self, nova, instance):
826+ """Delete the specified instance."""
827+ num_before = len(list(nova.servers.list()))
828+ nova.servers.delete(instance)
829+
830+ count = 1
831+ num_after = len(list(nova.servers.list()))
832+ while num_after != (num_before - 1) and count < 10:
833+ time.sleep(3)
834+ num_after = len(list(nova.servers.list()))
835+ self.log.debug('number of instances: {}'.format(num_after))
836+ count += 1
837+
838+ if num_after != (num_before - 1):
839+ self.log.error('instance deletion timed out')
840+ return False
841+
842+ return True
843
844=== removed file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
845--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-26 08:06:25 +0000
846+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
847@@ -1,276 +0,0 @@
848-import logging
849-import os
850-import time
851-import urllib
852-
853-import glanceclient.v1.client as glance_client
854-import keystoneclient.v2_0 as keystone_client
855-import novaclient.v1_1.client as nova_client
856-
857-from charmhelpers.contrib.amulet.utils import (
858- AmuletUtils
859-)
860-
861-DEBUG = logging.DEBUG
862-ERROR = logging.ERROR
863-
864-
865-class OpenStackAmuletUtils(AmuletUtils):
866- """OpenStack amulet utilities.
867-
868- This class inherits from AmuletUtils and has additional support
869- that is specifically for use by OpenStack charms.
870- """
871-
872- def __init__(self, log_level=ERROR):
873- """Initialize the deployment environment."""
874- super(OpenStackAmuletUtils, self).__init__(log_level)
875-
876- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
877- public_port, expected):
878- """Validate endpoint data.
879-
880- Validate actual endpoint data vs expected endpoint data. The ports
881- are used to find the matching endpoint.
882- """
883- found = False
884- for ep in endpoints:
885- self.log.debug('endpoint: {}'.format(repr(ep)))
886- if (admin_port in ep.adminurl and
887- internal_port in ep.internalurl and
888- public_port in ep.publicurl):
889- found = True
890- actual = {'id': ep.id,
891- 'region': ep.region,
892- 'adminurl': ep.adminurl,
893- 'internalurl': ep.internalurl,
894- 'publicurl': ep.publicurl,
895- 'service_id': ep.service_id}
896- ret = self._validate_dict_data(expected, actual)
897- if ret:
898- return 'unexpected endpoint data - {}'.format(ret)
899-
900- if not found:
901- return 'endpoint not found'
902-
903- def validate_svc_catalog_endpoint_data(self, expected, actual):
904- """Validate service catalog endpoint data.
905-
906- Validate a list of actual service catalog endpoints vs a list of
907- expected service catalog endpoints.
908- """
909- self.log.debug('actual: {}'.format(repr(actual)))
910- for k, v in expected.iteritems():
911- if k in actual:
912- ret = self._validate_dict_data(expected[k][0], actual[k][0])
913- if ret:
914- return self.endpoint_error(k, ret)
915- else:
916- return "endpoint {} does not exist".format(k)
917- return ret
918-
919- def validate_tenant_data(self, expected, actual):
920- """Validate tenant data.
921-
922- Validate a list of actual tenant data vs list of expected tenant
923- data.
924- """
925- self.log.debug('actual: {}'.format(repr(actual)))
926- for e in expected:
927- found = False
928- for act in actual:
929- a = {'enabled': act.enabled, 'description': act.description,
930- 'name': act.name, 'id': act.id}
931- if e['name'] == a['name']:
932- found = True
933- ret = self._validate_dict_data(e, a)
934- if ret:
935- return "unexpected tenant data - {}".format(ret)
936- if not found:
937- return "tenant {} does not exist".format(e['name'])
938- return ret
939-
940- def validate_role_data(self, expected, actual):
941- """Validate role data.
942-
943- Validate a list of actual role data vs a list of expected role
944- data.
945- """
946- self.log.debug('actual: {}'.format(repr(actual)))
947- for e in expected:
948- found = False
949- for act in actual:
950- a = {'name': act.name, 'id': act.id}
951- if e['name'] == a['name']:
952- found = True
953- ret = self._validate_dict_data(e, a)
954- if ret:
955- return "unexpected role data - {}".format(ret)
956- if not found:
957- return "role {} does not exist".format(e['name'])
958- return ret
959-
960- def validate_user_data(self, expected, actual):
961- """Validate user data.
962-
963- Validate a list of actual user data vs a list of expected user
964- data.
965- """
966- self.log.debug('actual: {}'.format(repr(actual)))
967- for e in expected:
968- found = False
969- for act in actual:
970- a = {'enabled': act.enabled, 'name': act.name,
971- 'email': act.email, 'tenantId': act.tenantId,
972- 'id': act.id}
973- if e['name'] == a['name']:
974- found = True
975- ret = self._validate_dict_data(e, a)
976- if ret:
977- return "unexpected user data - {}".format(ret)
978- if not found:
979- return "user {} does not exist".format(e['name'])
980- return ret
981-
982- def validate_flavor_data(self, expected, actual):
983- """Validate flavor data.
984-
985- Validate a list of actual flavors vs a list of expected flavors.
986- """
987- self.log.debug('actual: {}'.format(repr(actual)))
988- act = [a.name for a in actual]
989- return self._validate_list_data(expected, act)
990-
991- def tenant_exists(self, keystone, tenant):
992- """Return True if tenant exists."""
993- return tenant in [t.name for t in keystone.tenants.list()]
994-
995- def authenticate_keystone_admin(self, keystone_sentry, user, password,
996- tenant):
997- """Authenticates admin user with the keystone admin endpoint."""
998- unit = keystone_sentry
999- service_ip = unit.relation('shared-db',
1000- 'mysql:shared-db')['private-address']
1001- ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
1002- return keystone_client.Client(username=user, password=password,
1003- tenant_name=tenant, auth_url=ep)
1004-
1005- def authenticate_keystone_user(self, keystone, user, password, tenant):
1006- """Authenticates a regular user with the keystone public endpoint."""
1007- ep = keystone.service_catalog.url_for(service_type='identity',
1008- endpoint_type='publicURL')
1009- return keystone_client.Client(username=user, password=password,
1010- tenant_name=tenant, auth_url=ep)
1011-
1012- def authenticate_glance_admin(self, keystone):
1013- """Authenticates admin user with glance."""
1014- ep = keystone.service_catalog.url_for(service_type='image',
1015- endpoint_type='adminURL')
1016- return glance_client.Client(ep, token=keystone.auth_token)
1017-
1018- def authenticate_nova_user(self, keystone, user, password, tenant):
1019- """Authenticates a regular user with nova-api."""
1020- ep = keystone.service_catalog.url_for(service_type='identity',
1021- endpoint_type='publicURL')
1022- return nova_client.Client(username=user, api_key=password,
1023- project_id=tenant, auth_url=ep)
1024-
1025- def create_cirros_image(self, glance, image_name):
1026- """Download the latest cirros image and upload it to glance."""
1027- http_proxy = os.getenv('AMULET_HTTP_PROXY')
1028- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
1029- if http_proxy:
1030- proxies = {'http': http_proxy}
1031- opener = urllib.FancyURLopener(proxies)
1032- else:
1033- opener = urllib.FancyURLopener()
1034-
1035- f = opener.open("http://download.cirros-cloud.net/version/released")
1036- version = f.read().strip()
1037- cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1038- local_path = os.path.join('tests', cirros_img)
1039-
1040- if not os.path.exists(local_path):
1041- cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1042- version, cirros_img)
1043- opener.retrieve(cirros_url, local_path)
1044- f.close()
1045-
1046- with open(local_path) as f:
1047- image = glance.images.create(name=image_name, is_public=True,
1048- disk_format='qcow2',
1049- container_format='bare', data=f)
1050- count = 1
1051- status = image.status
1052- while status != 'active' and count < 10:
1053- time.sleep(3)
1054- image = glance.images.get(image.id)
1055- status = image.status
1056- self.log.debug('image status: {}'.format(status))
1057- count += 1
1058-
1059- if status != 'active':
1060- self.log.error('image creation timed out')
1061- return None
1062-
1063- return image
1064-
1065- def delete_image(self, glance, image):
1066- """Delete the specified image."""
1067- num_before = len(list(glance.images.list()))
1068- glance.images.delete(image)
1069-
1070- count = 1
1071- num_after = len(list(glance.images.list()))
1072- while num_after != (num_before - 1) and count < 10:
1073- time.sleep(3)
1074- num_after = len(list(glance.images.list()))
1075- self.log.debug('number of images: {}'.format(num_after))
1076- count += 1
1077-
1078- if num_after != (num_before - 1):
1079- self.log.error('image deletion timed out')
1080- return False
1081-
1082- return True
1083-
1084- def create_instance(self, nova, image_name, instance_name, flavor):
1085- """Create the specified instance."""
1086- image = nova.images.find(name=image_name)
1087- flavor = nova.flavors.find(name=flavor)
1088- instance = nova.servers.create(name=instance_name, image=image,
1089- flavor=flavor)
1090-
1091- count = 1
1092- status = instance.status
1093- while status != 'ACTIVE' and count < 60:
1094- time.sleep(3)
1095- instance = nova.servers.get(instance.id)
1096- status = instance.status
1097- self.log.debug('instance status: {}'.format(status))
1098- count += 1
1099-
1100- if status != 'ACTIVE':
1101- self.log.error('instance creation timed out')
1102- return None
1103-
1104- return instance
1105-
1106- def delete_instance(self, nova, instance):
1107- """Delete the specified instance."""
1108- num_before = len(list(nova.servers.list()))
1109- nova.servers.delete(instance)
1110-
1111- count = 1
1112- num_after = len(list(nova.servers.list()))
1113- while num_after != (num_before - 1) and count < 10:
1114- time.sleep(3)
1115- num_after = len(list(nova.servers.list()))
1116- self.log.debug('number of instances: {}'.format(num_after))
1117- count += 1
1118-
1119- if num_after != (num_before - 1):
1120- self.log.error('instance deletion timed out')
1121- return False
1122-
1123- return True
1124
1125=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1126--- hooks/charmhelpers/contrib/openstack/context.py 2014-10-01 12:47:13 +0000
1127+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-29 14:25:04 +0000
1128@@ -15,6 +15,7 @@
1129
1130 from charmhelpers.core.hookenv import (
1131 config,
1132+ is_relation_made,
1133 local_unit,
1134 log,
1135 relation_get,
1136@@ -24,7 +25,7 @@
1137 unit_get,
1138 unit_private_ip,
1139 ERROR,
1140- INFO
1141+ DEBUG
1142 )
1143
1144 from charmhelpers.core.host import (
1145@@ -52,9 +53,14 @@
1146 from charmhelpers.contrib.network.ip import (
1147 get_address_in_network,
1148 get_ipv6_addr,
1149+ get_netmask_for_address,
1150+ format_ipv6_addr,
1151 is_address_in_network
1152 )
1153
1154+from charmhelpers.contrib.openstack.utils import (
1155+ get_host_ip,
1156+)
1157 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1158
1159
1160@@ -174,8 +180,10 @@
1161 for rid in relation_ids('shared-db'):
1162 for unit in related_units(rid):
1163 rdata = relation_get(rid=rid, unit=unit)
1164+ host = rdata.get('db_host')
1165+ host = format_ipv6_addr(host) or host
1166 ctxt = {
1167- 'database_host': rdata.get('db_host'),
1168+ 'database_host': host,
1169 'database': self.database,
1170 'database_user': self.user,
1171 'database_password': rdata.get(password_setting),
1172@@ -251,10 +259,15 @@
1173 for rid in relation_ids('identity-service'):
1174 for unit in related_units(rid):
1175 rdata = relation_get(rid=rid, unit=unit)
1176+ serv_host = rdata.get('service_host')
1177+ serv_host = format_ipv6_addr(serv_host) or serv_host
1178+ auth_host = rdata.get('auth_host')
1179+ auth_host = format_ipv6_addr(auth_host) or auth_host
1180+
1181 ctxt = {
1182 'service_port': rdata.get('service_port'),
1183- 'service_host': rdata.get('service_host'),
1184- 'auth_host': rdata.get('auth_host'),
1185+ 'service_host': serv_host,
1186+ 'auth_host': auth_host,
1187 'auth_port': rdata.get('auth_port'),
1188 'admin_tenant_name': rdata.get('service_tenant'),
1189 'admin_user': rdata.get('service_username'),
1190@@ -303,11 +316,13 @@
1191 for unit in related_units(rid):
1192 if relation_get('clustered', rid=rid, unit=unit):
1193 ctxt['clustered'] = True
1194- ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
1195- unit=unit)
1196+ vip = relation_get('vip', rid=rid, unit=unit)
1197+ vip = format_ipv6_addr(vip) or vip
1198+ ctxt['rabbitmq_host'] = vip
1199 else:
1200- ctxt['rabbitmq_host'] = relation_get('private-address',
1201- rid=rid, unit=unit)
1202+ host = relation_get('private-address', rid=rid, unit=unit)
1203+ host = format_ipv6_addr(host) or host
1204+ ctxt['rabbitmq_host'] = host
1205 ctxt.update({
1206 'rabbitmq_user': username,
1207 'rabbitmq_password': relation_get('password', rid=rid,
1208@@ -346,8 +361,9 @@
1209 and len(related_units(rid)) > 1:
1210 rabbitmq_hosts = []
1211 for unit in related_units(rid):
1212- rabbitmq_hosts.append(relation_get('private-address',
1213- rid=rid, unit=unit))
1214+ host = relation_get('private-address', rid=rid, unit=unit)
1215+ host = format_ipv6_addr(host) or host
1216+ rabbitmq_hosts.append(host)
1217 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
1218 if not context_complete(ctxt):
1219 return {}
1220@@ -376,6 +392,7 @@
1221 ceph_addr = \
1222 relation_get('ceph-public-address', rid=rid, unit=unit) or \
1223 relation_get('private-address', rid=rid, unit=unit)
1224+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1225 mon_hosts.append(ceph_addr)
1226
1227 ctxt = {
1228@@ -396,6 +413,9 @@
1229 return ctxt
1230
1231
1232+ADDRESS_TYPES = ['admin', 'internal', 'public']
1233+
1234+
1235 class HAProxyContext(OSContextGenerator):
1236 interfaces = ['cluster']
1237
1238@@ -408,29 +428,62 @@
1239 if not relation_ids('cluster'):
1240 return {}
1241
1242+ l_unit = local_unit().replace('/', '-')
1243+
1244+ if config('prefer-ipv6'):
1245+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1246+ else:
1247+ addr = get_host_ip(unit_get('private-address'))
1248+
1249 cluster_hosts = {}
1250- l_unit = local_unit().replace('/', '-')
1251- if config('prefer-ipv6'):
1252- addr = get_ipv6_addr()
1253- else:
1254- addr = unit_get('private-address')
1255- cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
1256- addr)
1257-
1258- for rid in relation_ids('cluster'):
1259- for unit in related_units(rid):
1260- _unit = unit.replace('/', '-')
1261- addr = relation_get('private-address', rid=rid, unit=unit)
1262- cluster_hosts[_unit] = addr
1263+
1264+ # NOTE(jamespage): build out map of configured network endpoints
1265+ # and associated backends
1266+ for addr_type in ADDRESS_TYPES:
1267+ laddr = get_address_in_network(
1268+ config('os-{}-network'.format(addr_type)))
1269+ if laddr:
1270+ cluster_hosts[laddr] = {}
1271+ cluster_hosts[laddr]['network'] = "{}/{}".format(
1272+ laddr,
1273+ get_netmask_for_address(laddr)
1274+ )
1275+ cluster_hosts[laddr]['backends'] = {}
1276+ cluster_hosts[laddr]['backends'][l_unit] = laddr
1277+ for rid in relation_ids('cluster'):
1278+ for unit in related_units(rid):
1279+ _unit = unit.replace('/', '-')
1280+ _laddr = relation_get('{}-address'.format(addr_type),
1281+ rid=rid, unit=unit)
1282+ if _laddr:
1283+ cluster_hosts[laddr]['backends'][_unit] = _laddr
1284+
1285+ # NOTE(jamespage) no split configurations found, just use
1286+ # private addresses
1287+ if not cluster_hosts:
1288+ cluster_hosts[addr] = {}
1289+ cluster_hosts[addr]['network'] = "{}/{}".format(
1290+ addr,
1291+ get_netmask_for_address(addr)
1292+ )
1293+ cluster_hosts[addr]['backends'] = {}
1294+ cluster_hosts[addr]['backends'][l_unit] = addr
1295+ for rid in relation_ids('cluster'):
1296+ for unit in related_units(rid):
1297+ _unit = unit.replace('/', '-')
1298+ _laddr = relation_get('private-address',
1299+ rid=rid, unit=unit)
1300+ if _laddr:
1301+ cluster_hosts[addr]['backends'][_unit] = _laddr
1302
1303 ctxt = {
1304- 'units': cluster_hosts,
1305+ 'frontends': cluster_hosts,
1306 }
1307
1308 if config('haproxy-server-timeout'):
1309- ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout')
1310+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1311 if config('haproxy-client-timeout'):
1312- ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout')
1313+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1314
1315 if config('prefer-ipv6'):
1316 ctxt['local_host'] = 'ip6-localhost'
1317@@ -441,12 +494,13 @@
1318 ctxt['haproxy_host'] = '0.0.0.0'
1319 ctxt['stat_port'] = ':8888'
1320
1321- if len(cluster_hosts.keys()) > 1:
1322- # Enable haproxy when we have enough peers.
1323- log('Ensuring haproxy enabled in /etc/default/haproxy.')
1324- with open('/etc/default/haproxy', 'w') as out:
1325- out.write('ENABLED=1\n')
1326- return ctxt
1327+ for frontend in cluster_hosts:
1328+ if len(cluster_hosts[frontend]['backends']) > 1:
1329+ # Enable haproxy when we have enough peers.
1330+ log('Ensuring haproxy enabled in /etc/default/haproxy.')
1331+ with open('/etc/default/haproxy', 'w') as out:
1332+ out.write('ENABLED=1\n')
1333+ return ctxt
1334 log('HAProxy context is incomplete, this unit has no peers.')
1335 return {}
1336
1337@@ -708,22 +762,22 @@
1338
1339 class OSConfigFlagContext(OSContextGenerator):
1340
1341- """
1342- Responsible for adding user-defined config-flags in charm config to a
1343- template context.
1344-
1345- NOTE: the value of config-flags may be a comma-separated list of
1346- key=value pairs and some Openstack config files support
1347- comma-separated lists as values.
1348- """
1349-
1350- def __call__(self):
1351- config_flags = config('config-flags')
1352- if not config_flags:
1353- return {}
1354-
1355- flags = config_flags_parser(config_flags)
1356- return {'user_config_flags': flags}
1357+ """
1358+ Responsible for adding user-defined config-flags in charm config to a
1359+ template context.
1360+
1361+ NOTE: the value of config-flags may be a comma-separated list of
1362+ key=value pairs and some Openstack config files support
1363+ comma-separated lists as values.
1364+ """
1365+
1366+ def __call__(self):
1367+ config_flags = config('config-flags')
1368+ if not config_flags:
1369+ return {}
1370+
1371+ flags = config_flags_parser(config_flags)
1372+ return {'user_config_flags': flags}
1373
1374
1375 class SubordinateConfigContext(OSContextGenerator):
1376@@ -815,7 +869,7 @@
1377 else:
1378 ctxt[k] = v
1379
1380- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1381+ log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
1382
1383 return ctxt
1384
1385@@ -838,3 +892,66 @@
1386 'use_syslog': config('use-syslog')
1387 }
1388 return ctxt
1389+
1390+
1391+class BindHostContext(OSContextGenerator):
1392+
1393+ def __call__(self):
1394+ if config('prefer-ipv6'):
1395+ return {
1396+ 'bind_host': '::'
1397+ }
1398+ else:
1399+ return {
1400+ 'bind_host': '0.0.0.0'
1401+ }
1402+
1403+
1404+class WorkerConfigContext(OSContextGenerator):
1405+
1406+ @property
1407+ def num_cpus(self):
1408+ try:
1409+ from psutil import NUM_CPUS
1410+ except ImportError:
1411+ apt_install('python-psutil', fatal=True)
1412+ from psutil import NUM_CPUS
1413+ return NUM_CPUS
1414+
1415+ def __call__(self):
1416+ multiplier = config('worker-multiplier') or 1
1417+ ctxt = {
1418+ "workers": self.num_cpus * multiplier
1419+ }
1420+ return ctxt
1421+
1422+
1423+class ZeroMQContext(OSContextGenerator):
1424+ interfaces = ['zeromq-configuration']
1425+
1426+ def __call__(self):
1427+ ctxt = {}
1428+ if is_relation_made('zeromq-configuration', 'host'):
1429+ for rid in relation_ids('zeromq-configuration'):
1430+ for unit in related_units(rid):
1431+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
1432+ ctxt['zmq_host'] = relation_get('host', unit, rid)
1433+ return ctxt
1434+
1435+
1436+class NotificationDriverContext(OSContextGenerator):
1437+
1438+ def __init__(self, zmq_relation='zeromq-configuration', amqp_relation='amqp'):
1439+ """
1440+ :param zmq_relation : Name of Zeromq relation to check
1441+ """
1442+ self.zmq_relation = zmq_relation
1443+ self.amqp_relation = amqp_relation
1444+
1445+ def __call__(self):
1446+ ctxt = {
1447+ 'notifications': 'False',
1448+ }
1449+ if is_relation_made(self.amqp_relation):
1450+ ctxt['notifications'] = "True"
1451+ return ctxt
1452
1453=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
1454--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
1455+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-29 14:25:04 +0000
1456@@ -0,0 +1,79 @@
1457+from charmhelpers.core.hookenv import (
1458+ config,
1459+ unit_get,
1460+)
1461+
1462+from charmhelpers.contrib.network.ip import (
1463+ get_address_in_network,
1464+ is_address_in_network,
1465+ is_ipv6,
1466+ get_ipv6_addr,
1467+)
1468+
1469+from charmhelpers.contrib.hahelpers.cluster import is_clustered
1470+
1471+PUBLIC = 'public'
1472+INTERNAL = 'int'
1473+ADMIN = 'admin'
1474+
1475+_address_map = {
1476+ PUBLIC: {
1477+ 'config': 'os-public-network',
1478+ 'fallback': 'public-address'
1479+ },
1480+ INTERNAL: {
1481+ 'config': 'os-internal-network',
1482+ 'fallback': 'private-address'
1483+ },
1484+ ADMIN: {
1485+ 'config': 'os-admin-network',
1486+ 'fallback': 'private-address'
1487+ }
1488+}
1489+
1490+
1491+def canonical_url(configs, endpoint_type=PUBLIC):
1492+ '''
1493+ Returns the correct HTTP URL to this host given the state of HTTPS
1494+ configuration, hacluster and charm configuration.
1495+
1496+ :configs OSTemplateRenderer: A config tempating object to inspect for
1497+ a complete https context.
1498+ :endpoint_type str: The endpoint type to resolve.
1499+
1500+ :returns str: Base URL for services on the current service unit.
1501+ '''
1502+ scheme = 'http'
1503+ if 'https' in configs.complete_contexts():
1504+ scheme = 'https'
1505+ address = resolve_address(endpoint_type)
1506+ if is_ipv6(address):
1507+ address = "[{}]".format(address)
1508+ return '%s://%s' % (scheme, address)
1509+
1510+
1511+def resolve_address(endpoint_type=PUBLIC):
1512+ resolved_address = None
1513+ if is_clustered():
1514+ if config(_address_map[endpoint_type]['config']) is None:
1515+ # Assume vip is simple and pass back directly
1516+ resolved_address = config('vip')
1517+ else:
1518+ for vip in config('vip').split():
1519+ if is_address_in_network(
1520+ config(_address_map[endpoint_type]['config']),
1521+ vip):
1522+ resolved_address = vip
1523+ else:
1524+ if config('prefer-ipv6'):
1525+ fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1526+ else:
1527+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1528+ resolved_address = get_address_in_network(
1529+ config(_address_map[endpoint_type]['config']), fallback_addr)
1530+
1531+ if resolved_address is None:
1532+ raise ValueError('Unable to resolve a suitable IP address'
1533+ ' based on charm state and configuration')
1534+ else:
1535+ return resolved_address
1536
1537=== removed file 'hooks/charmhelpers/contrib/openstack/ip.py'
1538--- hooks/charmhelpers/contrib/openstack/ip.py 2014-09-26 08:06:25 +0000
1539+++ hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
1540@@ -1,79 +0,0 @@
1541-from charmhelpers.core.hookenv import (
1542- config,
1543- unit_get,
1544-)
1545-
1546-from charmhelpers.contrib.network.ip import (
1547- get_address_in_network,
1548- is_address_in_network,
1549- is_ipv6,
1550- get_ipv6_addr,
1551-)
1552-
1553-from charmhelpers.contrib.hahelpers.cluster import is_clustered
1554-
1555-PUBLIC = 'public'
1556-INTERNAL = 'int'
1557-ADMIN = 'admin'
1558-
1559-_address_map = {
1560- PUBLIC: {
1561- 'config': 'os-public-network',
1562- 'fallback': 'public-address'
1563- },
1564- INTERNAL: {
1565- 'config': 'os-internal-network',
1566- 'fallback': 'private-address'
1567- },
1568- ADMIN: {
1569- 'config': 'os-admin-network',
1570- 'fallback': 'private-address'
1571- }
1572-}
1573-
1574-
1575-def canonical_url(configs, endpoint_type=PUBLIC):
1576- '''
1577- Returns the correct HTTP URL to this host given the state of HTTPS
1578- configuration, hacluster and charm configuration.
1579-
1580- :configs OSTemplateRenderer: A config tempating object to inspect for
1581- a complete https context.
1582- :endpoint_type str: The endpoint type to resolve.
1583-
1584- :returns str: Base URL for services on the current service unit.
1585- '''
1586- scheme = 'http'
1587- if 'https' in configs.complete_contexts():
1588- scheme = 'https'
1589- address = resolve_address(endpoint_type)
1590- if is_ipv6(address):
1591- address = "[{}]".format(address)
1592- return '%s://%s' % (scheme, address)
1593-
1594-
1595-def resolve_address(endpoint_type=PUBLIC):
1596- resolved_address = None
1597- if is_clustered():
1598- if config(_address_map[endpoint_type]['config']) is None:
1599- # Assume vip is simple and pass back directly
1600- resolved_address = config('vip')
1601- else:
1602- for vip in config('vip').split():
1603- if is_address_in_network(
1604- config(_address_map[endpoint_type]['config']),
1605- vip):
1606- resolved_address = vip
1607- else:
1608- if config('prefer-ipv6'):
1609- fallback_addr = get_ipv6_addr()
1610- else:
1611- fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1612- resolved_address = get_address_in_network(
1613- config(_address_map[endpoint_type]['config']), fallback_addr)
1614-
1615- if resolved_address is None:
1616- raise ValueError('Unable to resolve a suitable IP address'
1617- ' based on charm state and configuration')
1618- else:
1619- return resolved_address
1620
1621=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1622--- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-01 12:47:13 +0000
1623+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-29 14:25:04 +0000
1624@@ -2,8 +2,10 @@
1625
1626 # Common python helper functions used for OpenStack charms.
1627 from collections import OrderedDict
1628+from functools import wraps
1629
1630 import subprocess
1631+import json
1632 import os
1633 import socket
1634 import sys
1635@@ -13,7 +15,9 @@
1636 log as juju_log,
1637 charm_dir,
1638 ERROR,
1639- INFO
1640+ INFO,
1641+ relation_ids,
1642+ relation_set
1643 )
1644
1645 from charmhelpers.contrib.storage.linux.lvm import (
1646@@ -22,6 +26,10 @@
1647 remove_lvm_physical_volume,
1648 )
1649
1650+from charmhelpers.contrib.network.ip import (
1651+ get_ipv6_addr
1652+)
1653+
1654 from charmhelpers.core.host import lsb_release, mounts, umount
1655 from charmhelpers.fetch import apt_install, apt_cache
1656 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1657@@ -71,6 +79,8 @@
1658 ('1.12.0', 'icehouse'),
1659 ('1.11.0', 'icehouse'),
1660 ('2.0.0', 'juno'),
1661+ ('2.1.0', 'juno'),
1662+ ('2.2.0', 'juno'),
1663 ])
1664
1665 DEFAULT_LOOPBACK_SIZE = '5G'
1666@@ -457,3 +467,44 @@
1667 return result
1668 else:
1669 return result.split('.')[0]
1670+
1671+
1672+def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
1673+ mm_map = {}
1674+ if os.path.isfile(mm_file):
1675+ with open(mm_file, 'r') as f:
1676+ mm_map = json.load(f)
1677+ return mm_map
1678+
1679+
1680+def sync_db_with_multi_ipv6_addresses(database, database_user,
1681+ relation_prefix=None):
1682+ hosts = get_ipv6_addr(dynamic_only=False)
1683+
1684+ kwargs = {'database': database,
1685+ 'username': database_user,
1686+ 'hostname': json.dumps(hosts)}
1687+
1688+ if relation_prefix:
1689+ keys = kwargs.keys()
1690+ for key in keys:
1691+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
1692+ del kwargs[key]
1693+
1694+ for rid in relation_ids('shared-db'):
1695+ relation_set(relation_id=rid, **kwargs)
1696+
1697+
1698+def os_requires_version(ostack_release, pkg):
1699+ """
1700+ Decorator for hook to specify minimum supported release
1701+ """
1702+ def wrap(f):
1703+ @wraps(f)
1704+ def wrapped_f(*args):
1705+ if os_release(pkg) < ostack_release:
1706+ raise Exception("This hook is not supported on releases"
1707+ " before %s" % ostack_release)
1708+ f(*args)
1709+ return wrapped_f
1710+ return wrap
1711
1712=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1713--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-01 12:47:13 +0000
1714+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-29 14:25:04 +0000
1715@@ -113,7 +113,7 @@
1716 return None
1717
1718
1719-def create_pool(service, name, replicas=2):
1720+def create_pool(service, name, replicas=3):
1721 ''' Create a new RADOS pool '''
1722 if pool_exists(service, name):
1723 log("Ceph pool {} already exists, skipping creation".format(name),
1724@@ -300,7 +300,8 @@
1725
1726
1727 def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
1728- blk_device, fstype, system_services=[]):
1729+ blk_device, fstype, system_services=[],
1730+ replicas=3):
1731 """
1732 NOTE: This function must only be called from a single service unit for
1733 the same rbd_img otherwise data loss will occur.
1734@@ -317,7 +318,7 @@
1735 # Ensure pool, RBD image, RBD mappings are in place.
1736 if not pool_exists(service, pool):
1737 log('ceph: Creating new pool {}.'.format(pool))
1738- create_pool(service, pool)
1739+ create_pool(service, pool, replicas=replicas)
1740
1741 if not rbd_exists(service, pool, rbd_img):
1742 log('ceph: Creating RBD image ({}).'.format(rbd_img))
1743
1744=== added file 'hooks/charmhelpers/core/fstab.py'
1745--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
1746+++ hooks/charmhelpers/core/fstab.py 2014-10-29 14:25:04 +0000
1747@@ -0,0 +1,116 @@
1748+#!/usr/bin/env python
1749+# -*- coding: utf-8 -*-
1750+
1751+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
1752+
1753+import os
1754+
1755+
1756+class Fstab(file):
1757+ """This class extends file in order to implement a file reader/writer
1758+ for file `/etc/fstab`
1759+ """
1760+
1761+ class Entry(object):
1762+ """Entry class represents a non-comment line on the `/etc/fstab` file
1763+ """
1764+ def __init__(self, device, mountpoint, filesystem,
1765+ options, d=0, p=0):
1766+ self.device = device
1767+ self.mountpoint = mountpoint
1768+ self.filesystem = filesystem
1769+
1770+ if not options:
1771+ options = "defaults"
1772+
1773+ self.options = options
1774+ self.d = d
1775+ self.p = p
1776+
1777+ def __eq__(self, o):
1778+ return str(self) == str(o)
1779+
1780+ def __str__(self):
1781+ return "{} {} {} {} {} {}".format(self.device,
1782+ self.mountpoint,
1783+ self.filesystem,
1784+ self.options,
1785+ self.d,
1786+ self.p)
1787+
1788+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
1789+
1790+ def __init__(self, path=None):
1791+ if path:
1792+ self._path = path
1793+ else:
1794+ self._path = self.DEFAULT_PATH
1795+ file.__init__(self, self._path, 'r+')
1796+
1797+ def _hydrate_entry(self, line):
1798+ # NOTE: use split with no arguments to split on any
1799+ # whitespace including tabs
1800+ return Fstab.Entry(*filter(
1801+ lambda x: x not in ('', None),
1802+ line.strip("\n").split()))
1803+
1804+ @property
1805+ def entries(self):
1806+ self.seek(0)
1807+ for line in self.readlines():
1808+ try:
1809+ if not line.startswith("#"):
1810+ yield self._hydrate_entry(line)
1811+ except ValueError:
1812+ pass
1813+
1814+ def get_entry_by_attr(self, attr, value):
1815+ for entry in self.entries:
1816+ e_attr = getattr(entry, attr)
1817+ if e_attr == value:
1818+ return entry
1819+ return None
1820+
1821+ def add_entry(self, entry):
1822+ if self.get_entry_by_attr('device', entry.device):
1823+ return False
1824+
1825+ self.write(str(entry) + '\n')
1826+ self.truncate()
1827+ return entry
1828+
1829+ def remove_entry(self, entry):
1830+ self.seek(0)
1831+
1832+ lines = self.readlines()
1833+
1834+ found = False
1835+ for index, line in enumerate(lines):
1836+ if not line.startswith("#"):
1837+ if self._hydrate_entry(line) == entry:
1838+ found = True
1839+ break
1840+
1841+ if not found:
1842+ return False
1843+
1844+ lines.remove(line)
1845+
1846+ self.seek(0)
1847+ self.write(''.join(lines))
1848+ self.truncate()
1849+ return True
1850+
1851+ @classmethod
1852+ def remove_by_mountpoint(cls, mountpoint, path=None):
1853+ fstab = cls(path=path)
1854+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
1855+ if entry:
1856+ return fstab.remove_entry(entry)
1857+ return False
1858+
1859+ @classmethod
1860+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
1861+ return cls(path=path).add_entry(Fstab.Entry(device,
1862+ mountpoint, filesystem,
1863+ options=options))
1864
1865=== removed file 'hooks/charmhelpers/core/fstab.py'
1866--- hooks/charmhelpers/core/fstab.py 2014-09-26 08:06:25 +0000
1867+++ hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
1868@@ -1,116 +0,0 @@
1869-#!/usr/bin/env python
1870-# -*- coding: utf-8 -*-
1871-
1872-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
1873-
1874-import os
1875-
1876-
1877-class Fstab(file):
1878- """This class extends file in order to implement a file reader/writer
1879- for file `/etc/fstab`
1880- """
1881-
1882- class Entry(object):
1883- """Entry class represents a non-comment line on the `/etc/fstab` file
1884- """
1885- def __init__(self, device, mountpoint, filesystem,
1886- options, d=0, p=0):
1887- self.device = device
1888- self.mountpoint = mountpoint
1889- self.filesystem = filesystem
1890-
1891- if not options:
1892- options = "defaults"
1893-
1894- self.options = options
1895- self.d = d
1896- self.p = p
1897-
1898- def __eq__(self, o):
1899- return str(self) == str(o)
1900-
1901- def __str__(self):
1902- return "{} {} {} {} {} {}".format(self.device,
1903- self.mountpoint,
1904- self.filesystem,
1905- self.options,
1906- self.d,
1907- self.p)
1908-
1909- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
1910-
1911- def __init__(self, path=None):
1912- if path:
1913- self._path = path
1914- else:
1915- self._path = self.DEFAULT_PATH
1916- file.__init__(self, self._path, 'r+')
1917-
1918- def _hydrate_entry(self, line):
1919- # NOTE: use split with no arguments to split on any
1920- # whitespace including tabs
1921- return Fstab.Entry(*filter(
1922- lambda x: x not in ('', None),
1923- line.strip("\n").split()))
1924-
1925- @property
1926- def entries(self):
1927- self.seek(0)
1928- for line in self.readlines():
1929- try:
1930- if not line.startswith("#"):
1931- yield self._hydrate_entry(line)
1932- except ValueError:
1933- pass
1934-
1935- def get_entry_by_attr(self, attr, value):
1936- for entry in self.entries:
1937- e_attr = getattr(entry, attr)
1938- if e_attr == value:
1939- return entry
1940- return None
1941-
1942- def add_entry(self, entry):
1943- if self.get_entry_by_attr('device', entry.device):
1944- return False
1945-
1946- self.write(str(entry) + '\n')
1947- self.truncate()
1948- return entry
1949-
1950- def remove_entry(self, entry):
1951- self.seek(0)
1952-
1953- lines = self.readlines()
1954-
1955- found = False
1956- for index, line in enumerate(lines):
1957- if not line.startswith("#"):
1958- if self._hydrate_entry(line) == entry:
1959- found = True
1960- break
1961-
1962- if not found:
1963- return False
1964-
1965- lines.remove(line)
1966-
1967- self.seek(0)
1968- self.write(''.join(lines))
1969- self.truncate()
1970- return True
1971-
1972- @classmethod
1973- def remove_by_mountpoint(cls, mountpoint, path=None):
1974- fstab = cls(path=path)
1975- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
1976- if entry:
1977- return fstab.remove_entry(entry)
1978- return False
1979-
1980- @classmethod
1981- def add(cls, device, mountpoint, filesystem, options=None, path=None):
1982- return cls(path=path).add_entry(Fstab.Entry(device,
1983- mountpoint, filesystem,
1984- options=options))
1985
1986=== modified file 'hooks/charmhelpers/core/hookenv.py'
1987--- hooks/charmhelpers/core/hookenv.py 2014-10-01 12:47:13 +0000
1988+++ hooks/charmhelpers/core/hookenv.py 2014-10-29 14:25:04 +0000
1989@@ -214,6 +214,12 @@
1990 except KeyError:
1991 return (self._prev_dict or {})[key]
1992
1993+ def keys(self):
1994+ prev_keys = []
1995+ if self._prev_dict is not None:
1996+ prev_keys = self._prev_dict.keys()
1997+ return list(set(prev_keys + dict.keys(self)))
1998+
1999 def load_previous(self, path=None):
2000 """Load previous copy of config from disk.
2001
2002
2003=== modified file 'hooks/charmhelpers/core/host.py'
2004--- hooks/charmhelpers/core/host.py 2014-10-01 12:47:13 +0000
2005+++ hooks/charmhelpers/core/host.py 2014-10-29 14:25:04 +0000
2006@@ -6,13 +6,13 @@
2007 # Matthew Wedgwood <matthew.wedgwood@canonical.com>
2008
2009 import os
2010+import re
2011 import pwd
2012 import grp
2013 import random
2014 import string
2015 import subprocess
2016 import hashlib
2017-import shutil
2018 from contextlib import contextmanager
2019
2020 from collections import OrderedDict
2021@@ -317,7 +317,13 @@
2022 ip_output = (line for line in ip_output if line)
2023 for line in ip_output:
2024 if line.split()[1].startswith(int_type):
2025- interfaces.append(line.split()[1].replace(":", ""))
2026+ matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
2027+ if matched:
2028+ interface = matched.groups()[0]
2029+ else:
2030+ interface = line.split()[1].replace(":", "")
2031+ interfaces.append(interface)
2032+
2033 return interfaces
2034
2035
2036
2037=== added directory 'hooks/charmhelpers/core/services'
2038=== removed directory 'hooks/charmhelpers/core/services'
2039=== added file 'hooks/charmhelpers/core/services/__init__.py'
2040--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
2041+++ hooks/charmhelpers/core/services/__init__.py 2014-10-29 14:25:04 +0000
2042@@ -0,0 +1,2 @@
2043+from .base import * # NOQA
2044+from .helpers import * # NOQA
2045
2046=== removed file 'hooks/charmhelpers/core/services/__init__.py'
2047--- hooks/charmhelpers/core/services/__init__.py 2014-09-26 08:06:25 +0000
2048+++ hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
2049@@ -1,2 +0,0 @@
2050-from .base import *
2051-from .helpers import *
2052
2053=== added file 'hooks/charmhelpers/core/services/base.py'
2054--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
2055+++ hooks/charmhelpers/core/services/base.py 2014-10-29 14:25:04 +0000
2056@@ -0,0 +1,313 @@
2057+import os
2058+import re
2059+import json
2060+from collections import Iterable
2061+
2062+from charmhelpers.core import host
2063+from charmhelpers.core import hookenv
2064+
2065+
2066+__all__ = ['ServiceManager', 'ManagerCallback',
2067+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
2068+ 'service_restart', 'service_stop']
2069+
2070+
2071+class ServiceManager(object):
2072+ def __init__(self, services=None):
2073+ """
2074+ Register a list of services, given their definitions.
2075+
2076+ Service definitions are dicts in the following formats (all keys except
2077+ 'service' are optional)::
2078+
2079+ {
2080+ "service": <service name>,
2081+ "required_data": <list of required data contexts>,
2082+ "provided_data": <list of provided data contexts>,
2083+ "data_ready": <one or more callbacks>,
2084+ "data_lost": <one or more callbacks>,
2085+ "start": <one or more callbacks>,
2086+ "stop": <one or more callbacks>,
2087+ "ports": <list of ports to manage>,
2088+ }
2089+
2090+ The 'required_data' list should contain dicts of required data (or
2091+ dependency managers that act like dicts and know how to collect the data).
2092+ Only when all items in the 'required_data' list are populated are the list
2093+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
2094+ information.
2095+
2096+ The 'provided_data' list should contain relation data providers, most likely
2097+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
2098+ that will indicate a set of data to set on a given relation.
2099+
2100+ The 'data_ready' value should be either a single callback, or a list of
2101+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
2102+ Each callback will be called with the service name as the only parameter.
2103+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
2104+ are fired.
2105+
2106+ The 'data_lost' value should be either a single callback, or a list of
2107+ callbacks, to be called when a 'required_data' item no longer passes
2108+ `is_ready()`. Each callback will be called with the service name as the
2109+ only parameter. After all of the 'data_lost' callbacks are called,
2110+ the 'stop' callbacks are fired.
2111+
2112+ The 'start' value should be either a single callback, or a list of
2113+ callbacks, to be called when starting the service, after the 'data_ready'
2114+ callbacks are complete. Each callback will be called with the service
2115+ name as the only parameter. This defaults to
2116+ `[host.service_start, services.open_ports]`.
2117+
2118+ The 'stop' value should be either a single callback, or a list of
2119+ callbacks, to be called when stopping the service. If the service is
2120+ being stopped because it no longer has all of its 'required_data', this
2121+ will be called after all of the 'data_lost' callbacks are complete.
2122+ Each callback will be called with the service name as the only parameter.
2123+ This defaults to `[services.close_ports, host.service_stop]`.
2124+
2125+ The 'ports' value should be a list of ports to manage. The default
2126+ 'start' handler will open the ports after the service is started,
2127+ and the default 'stop' handler will close the ports prior to stopping
2128+ the service.
2129+
2130+
2131+ Examples:
2132+
2133+ The following registers an Upstart service called bingod that depends on
2134+ a mongodb relation and which runs a custom `db_migrate` function prior to
2135+ restarting the service, and a Runit service called spadesd::
2136+
2137+ manager = services.ServiceManager([
2138+ {
2139+ 'service': 'bingod',
2140+ 'ports': [80, 443],
2141+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
2142+ 'data_ready': [
2143+ services.template(source='bingod.conf'),
2144+ services.template(source='bingod.ini',
2145+ target='/etc/bingod.ini',
2146+ owner='bingo', perms=0400),
2147+ ],
2148+ },
2149+ {
2150+ 'service': 'spadesd',
2151+ 'data_ready': services.template(source='spadesd_run.j2',
2152+ target='/etc/sv/spadesd/run',
2153+ perms=0555),
2154+ 'start': runit_start,
2155+ 'stop': runit_stop,
2156+ },
2157+ ])
2158+ manager.manage()
2159+ """
2160+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
2161+ self._ready = None
2162+ self.services = {}
2163+ for service in services or []:
2164+ service_name = service['service']
2165+ self.services[service_name] = service
2166+
2167+ def manage(self):
2168+ """
2169+ Handle the current hook by doing The Right Thing with the registered services.
2170+ """
2171+ hook_name = hookenv.hook_name()
2172+ if hook_name == 'stop':
2173+ self.stop_services()
2174+ else:
2175+ self.provide_data()
2176+ self.reconfigure_services()
2177+ cfg = hookenv.config()
2178+ if cfg.implicit_save:
2179+ cfg.save()
2180+
2181+ def provide_data(self):
2182+ """
2183+ Set the relation data for each provider in the ``provided_data`` list.
2184+
2185+ A provider must have a `name` attribute, which indicates which relation
2186+ to set data on, and a `provide_data()` method, which returns a dict of
2187+ data to set.
2188+ """
2189+ hook_name = hookenv.hook_name()
2190+ for service in self.services.values():
2191+ for provider in service.get('provided_data', []):
2192+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
2193+ data = provider.provide_data()
2194+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
2195+ if _ready:
2196+ hookenv.relation_set(None, data)
2197+
2198+ def reconfigure_services(self, *service_names):
2199+ """
2200+ Update all files for one or more registered services, and,
2201+ if ready, optionally restart them.
2202+
2203+ If no service names are given, reconfigures all registered services.
2204+ """
2205+ for service_name in service_names or self.services.keys():
2206+ if self.is_ready(service_name):
2207+ self.fire_event('data_ready', service_name)
2208+ self.fire_event('start', service_name, default=[
2209+ service_restart,
2210+ manage_ports])
2211+ self.save_ready(service_name)
2212+ else:
2213+ if self.was_ready(service_name):
2214+ self.fire_event('data_lost', service_name)
2215+ self.fire_event('stop', service_name, default=[
2216+ manage_ports,
2217+ service_stop])
2218+ self.save_lost(service_name)
2219+
2220+ def stop_services(self, *service_names):
2221+ """
2222+ Stop one or more registered services, by name.
2223+
2224+ If no service names are given, stops all registered services.
2225+ """
2226+ for service_name in service_names or self.services.keys():
2227+ self.fire_event('stop', service_name, default=[
2228+ manage_ports,
2229+ service_stop])
2230+
2231+ def get_service(self, service_name):
2232+ """
2233+ Given the name of a registered service, return its service definition.
2234+ """
2235+ service = self.services.get(service_name)
2236+ if not service:
2237+ raise KeyError('Service not registered: %s' % service_name)
2238+ return service
2239+
2240+ def fire_event(self, event_name, service_name, default=None):
2241+ """
2242+ Fire a data_ready, data_lost, start, or stop event on a given service.
2243+ """
2244+ service = self.get_service(service_name)
2245+ callbacks = service.get(event_name, default)
2246+ if not callbacks:
2247+ return
2248+ if not isinstance(callbacks, Iterable):
2249+ callbacks = [callbacks]
2250+ for callback in callbacks:
2251+ if isinstance(callback, ManagerCallback):
2252+ callback(self, service_name, event_name)
2253+ else:
2254+ callback(service_name)
2255+
2256+ def is_ready(self, service_name):
2257+ """
2258+ Determine if a registered service is ready, by checking its 'required_data'.
2259+
2260+ A 'required_data' item can be any mapping type, and is considered ready
2261+ if `bool(item)` evaluates as True.
2262+ """
2263+ service = self.get_service(service_name)
2264+ reqs = service.get('required_data', [])
2265+ return all(bool(req) for req in reqs)
2266+
2267+ def _load_ready_file(self):
2268+ if self._ready is not None:
2269+ return
2270+ if os.path.exists(self._ready_file):
2271+ with open(self._ready_file) as fp:
2272+ self._ready = set(json.load(fp))
2273+ else:
2274+ self._ready = set()
2275+
2276+ def _save_ready_file(self):
2277+ if self._ready is None:
2278+ return
2279+ with open(self._ready_file, 'w') as fp:
2280+ json.dump(list(self._ready), fp)
2281+
2282+ def save_ready(self, service_name):
2283+ """
2284+ Save an indicator that the given service is now data_ready.
2285+ """
2286+ self._load_ready_file()
2287+ self._ready.add(service_name)
2288+ self._save_ready_file()
2289+
2290+ def save_lost(self, service_name):
2291+ """
2292+ Save an indicator that the given service is no longer data_ready.
2293+ """
2294+ self._load_ready_file()
2295+ self._ready.discard(service_name)
2296+ self._save_ready_file()
2297+
2298+ def was_ready(self, service_name):
2299+ """
2300+ Determine if the given service was previously data_ready.
2301+ """
2302+ self._load_ready_file()
2303+ return service_name in self._ready
2304+
2305+
2306+class ManagerCallback(object):
2307+ """
2308+ Special case of a callback that takes the `ServiceManager` instance
2309+ in addition to the service name.
2310+
2311+ Subclasses should implement `__call__` which should accept three parameters:
2312+
2313+ * `manager` The `ServiceManager` instance
2314+ * `service_name` The name of the service it's being triggered for
2315+ * `event_name` The name of the event that this callback is handling
2316+ """
2317+ def __call__(self, manager, service_name, event_name):
2318+ raise NotImplementedError()
2319+
2320+
2321+class PortManagerCallback(ManagerCallback):
2322+ """
2323+ Callback class that will open or close ports, for use as either
2324+ a start or stop action.
2325+ """
2326+ def __call__(self, manager, service_name, event_name):
2327+ service = manager.get_service(service_name)
2328+ new_ports = service.get('ports', [])
2329+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2330+ if os.path.exists(port_file):
2331+ with open(port_file) as fp:
2332+ old_ports = fp.read().split(',')
2333+ for old_port in old_ports:
2334+ if bool(old_port):
2335+ old_port = int(old_port)
2336+ if old_port not in new_ports:
2337+ hookenv.close_port(old_port)
2338+ with open(port_file, 'w') as fp:
2339+ fp.write(','.join(str(port) for port in new_ports))
2340+ for port in new_ports:
2341+ if event_name == 'start':
2342+ hookenv.open_port(port)
2343+ elif event_name == 'stop':
2344+ hookenv.close_port(port)
2345+
2346+
2347+def service_stop(service_name):
2348+ """
2349+ Wrapper around host.service_stop to prevent spurious "unknown service"
2350+ messages in the logs.
2351+ """
2352+ if host.service_running(service_name):
2353+ host.service_stop(service_name)
2354+
2355+
2356+def service_restart(service_name):
2357+ """
2358+ Wrapper around host.service_restart to prevent spurious "unknown service"
2359+ messages in the logs.
2360+ """
2361+ if host.service_available(service_name):
2362+ if host.service_running(service_name):
2363+ host.service_restart(service_name)
2364+ else:
2365+ host.service_start(service_name)
2366+
2367+
2368+# Convenience aliases
2369+open_ports = close_ports = manage_ports = PortManagerCallback()
2370
2371=== removed file 'hooks/charmhelpers/core/services/base.py'
2372--- hooks/charmhelpers/core/services/base.py 2014-09-26 08:06:25 +0000
2373+++ hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
2374@@ -1,313 +0,0 @@
2375-import os
2376-import re
2377-import json
2378-from collections import Iterable
2379-
2380-from charmhelpers.core import host
2381-from charmhelpers.core import hookenv
2382-
2383-
2384-__all__ = ['ServiceManager', 'ManagerCallback',
2385- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
2386- 'service_restart', 'service_stop']
2387-
2388-
2389-class ServiceManager(object):
2390- def __init__(self, services=None):
2391- """
2392- Register a list of services, given their definitions.
2393-
2394- Service definitions are dicts in the following formats (all keys except
2395- 'service' are optional)::
2396-
2397- {
2398- "service": <service name>,
2399- "required_data": <list of required data contexts>,
2400- "provided_data": <list of provided data contexts>,
2401- "data_ready": <one or more callbacks>,
2402- "data_lost": <one or more callbacks>,
2403- "start": <one or more callbacks>,
2404- "stop": <one or more callbacks>,
2405- "ports": <list of ports to manage>,
2406- }
2407-
2408- The 'required_data' list should contain dicts of required data (or
2409- dependency managers that act like dicts and know how to collect the data).
2410- Only when all items in the 'required_data' list are populated are the list
2411- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
2412- information.
2413-
2414- The 'provided_data' list should contain relation data providers, most likely
2415- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
2416- that will indicate a set of data to set on a given relation.
2417-
2418- The 'data_ready' value should be either a single callback, or a list of
2419- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
2420- Each callback will be called with the service name as the only parameter.
2421- After all of the 'data_ready' callbacks are called, the 'start' callbacks
2422- are fired.
2423-
2424- The 'data_lost' value should be either a single callback, or a list of
2425- callbacks, to be called when a 'required_data' item no longer passes
2426- `is_ready()`. Each callback will be called with the service name as the
2427- only parameter. After all of the 'data_lost' callbacks are called,
2428- the 'stop' callbacks are fired.
2429-
2430- The 'start' value should be either a single callback, or a list of
2431- callbacks, to be called when starting the service, after the 'data_ready'
2432- callbacks are complete. Each callback will be called with the service
2433- name as the only parameter. This defaults to
2434- `[host.service_start, services.open_ports]`.
2435-
2436- The 'stop' value should be either a single callback, or a list of
2437- callbacks, to be called when stopping the service. If the service is
2438- being stopped because it no longer has all of its 'required_data', this
2439- will be called after all of the 'data_lost' callbacks are complete.
2440- Each callback will be called with the service name as the only parameter.
2441- This defaults to `[services.close_ports, host.service_stop]`.
2442-
2443- The 'ports' value should be a list of ports to manage. The default
2444- 'start' handler will open the ports after the service is started,
2445- and the default 'stop' handler will close the ports prior to stopping
2446- the service.
2447-
2448-
2449- Examples:
2450-
2451- The following registers an Upstart service called bingod that depends on
2452- a mongodb relation and which runs a custom `db_migrate` function prior to
2453- restarting the service, and a Runit service called spadesd::
2454-
2455- manager = services.ServiceManager([
2456- {
2457- 'service': 'bingod',
2458- 'ports': [80, 443],
2459- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
2460- 'data_ready': [
2461- services.template(source='bingod.conf'),
2462- services.template(source='bingod.ini',
2463- target='/etc/bingod.ini',
2464- owner='bingo', perms=0400),
2465- ],
2466- },
2467- {
2468- 'service': 'spadesd',
2469- 'data_ready': services.template(source='spadesd_run.j2',
2470- target='/etc/sv/spadesd/run',
2471- perms=0555),
2472- 'start': runit_start,
2473- 'stop': runit_stop,
2474- },
2475- ])
2476- manager.manage()
2477- """
2478- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
2479- self._ready = None
2480- self.services = {}
2481- for service in services or []:
2482- service_name = service['service']
2483- self.services[service_name] = service
2484-
2485- def manage(self):
2486- """
2487- Handle the current hook by doing The Right Thing with the registered services.
2488- """
2489- hook_name = hookenv.hook_name()
2490- if hook_name == 'stop':
2491- self.stop_services()
2492- else:
2493- self.provide_data()
2494- self.reconfigure_services()
2495- cfg = hookenv.config()
2496- if cfg.implicit_save:
2497- cfg.save()
2498-
2499- def provide_data(self):
2500- """
2501- Set the relation data for each provider in the ``provided_data`` list.
2502-
2503- A provider must have a `name` attribute, which indicates which relation
2504- to set data on, and a `provide_data()` method, which returns a dict of
2505- data to set.
2506- """
2507- hook_name = hookenv.hook_name()
2508- for service in self.services.values():
2509- for provider in service.get('provided_data', []):
2510- if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
2511- data = provider.provide_data()
2512- _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
2513- if _ready:
2514- hookenv.relation_set(None, data)
2515-
2516- def reconfigure_services(self, *service_names):
2517- """
2518- Update all files for one or more registered services, and,
2519- if ready, optionally restart them.
2520-
2521- If no service names are given, reconfigures all registered services.
2522- """
2523- for service_name in service_names or self.services.keys():
2524- if self.is_ready(service_name):
2525- self.fire_event('data_ready', service_name)
2526- self.fire_event('start', service_name, default=[
2527- service_restart,
2528- manage_ports])
2529- self.save_ready(service_name)
2530- else:
2531- if self.was_ready(service_name):
2532- self.fire_event('data_lost', service_name)
2533- self.fire_event('stop', service_name, default=[
2534- manage_ports,
2535- service_stop])
2536- self.save_lost(service_name)
2537-
2538- def stop_services(self, *service_names):
2539- """
2540- Stop one or more registered services, by name.
2541-
2542- If no service names are given, stops all registered services.
2543- """
2544- for service_name in service_names or self.services.keys():
2545- self.fire_event('stop', service_name, default=[
2546- manage_ports,
2547- service_stop])
2548-
2549- def get_service(self, service_name):
2550- """
2551- Given the name of a registered service, return its service definition.
2552- """
2553- service = self.services.get(service_name)
2554- if not service:
2555- raise KeyError('Service not registered: %s' % service_name)
2556- return service
2557-
2558- def fire_event(self, event_name, service_name, default=None):
2559- """
2560- Fire a data_ready, data_lost, start, or stop event on a given service.
2561- """
2562- service = self.get_service(service_name)
2563- callbacks = service.get(event_name, default)
2564- if not callbacks:
2565- return
2566- if not isinstance(callbacks, Iterable):
2567- callbacks = [callbacks]
2568- for callback in callbacks:
2569- if isinstance(callback, ManagerCallback):
2570- callback(self, service_name, event_name)
2571- else:
2572- callback(service_name)
2573-
2574- def is_ready(self, service_name):
2575- """
2576- Determine if a registered service is ready, by checking its 'required_data'.
2577-
2578- A 'required_data' item can be any mapping type, and is considered ready
2579- if `bool(item)` evaluates as True.
2580- """
2581- service = self.get_service(service_name)
2582- reqs = service.get('required_data', [])
2583- return all(bool(req) for req in reqs)
2584-
2585- def _load_ready_file(self):
2586- if self._ready is not None:
2587- return
2588- if os.path.exists(self._ready_file):
2589- with open(self._ready_file) as fp:
2590- self._ready = set(json.load(fp))
2591- else:
2592- self._ready = set()
2593-
2594- def _save_ready_file(self):
2595- if self._ready is None:
2596- return
2597- with open(self._ready_file, 'w') as fp:
2598- json.dump(list(self._ready), fp)
2599-
2600- def save_ready(self, service_name):
2601- """
2602- Save an indicator that the given service is now data_ready.
2603- """
2604- self._load_ready_file()
2605- self._ready.add(service_name)
2606- self._save_ready_file()
2607-
2608- def save_lost(self, service_name):
2609- """
2610- Save an indicator that the given service is no longer data_ready.
2611- """
2612- self._load_ready_file()
2613- self._ready.discard(service_name)
2614- self._save_ready_file()
2615-
2616- def was_ready(self, service_name):
2617- """
2618- Determine if the given service was previously data_ready.
2619- """
2620- self._load_ready_file()
2621- return service_name in self._ready
2622-
2623-
2624-class ManagerCallback(object):
2625- """
2626- Special case of a callback that takes the `ServiceManager` instance
2627- in addition to the service name.
2628-
2629- Subclasses should implement `__call__` which should accept three parameters:
2630-
2631- * `manager` The `ServiceManager` instance
2632- * `service_name` The name of the service it's being triggered for
2633- * `event_name` The name of the event that this callback is handling
2634- """
2635- def __call__(self, manager, service_name, event_name):
2636- raise NotImplementedError()
2637-
2638-
2639-class PortManagerCallback(ManagerCallback):
2640- """
2641- Callback class that will open or close ports, for use as either
2642- a start or stop action.
2643- """
2644- def __call__(self, manager, service_name, event_name):
2645- service = manager.get_service(service_name)
2646- new_ports = service.get('ports', [])
2647- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2648- if os.path.exists(port_file):
2649- with open(port_file) as fp:
2650- old_ports = fp.read().split(',')
2651- for old_port in old_ports:
2652- if bool(old_port):
2653- old_port = int(old_port)
2654- if old_port not in new_ports:
2655- hookenv.close_port(old_port)
2656- with open(port_file, 'w') as fp:
2657- fp.write(','.join(str(port) for port in new_ports))
2658- for port in new_ports:
2659- if event_name == 'start':
2660- hookenv.open_port(port)
2661- elif event_name == 'stop':
2662- hookenv.close_port(port)
2663-
2664-
2665-def service_stop(service_name):
2666- """
2667- Wrapper around host.service_stop to prevent spurious "unknown service"
2668- messages in the logs.
2669- """
2670- if host.service_running(service_name):
2671- host.service_stop(service_name)
2672-
2673-
2674-def service_restart(service_name):
2675- """
2676- Wrapper around host.service_restart to prevent spurious "unknown service"
2677- messages in the logs.
2678- """
2679- if host.service_available(service_name):
2680- if host.service_running(service_name):
2681- host.service_restart(service_name)
2682- else:
2683- host.service_start(service_name)
2684-
2685-
2686-# Convenience aliases
2687-open_ports = close_ports = manage_ports = PortManagerCallback()
2688
2689=== added file 'hooks/charmhelpers/core/services/helpers.py'
2690--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2691+++ hooks/charmhelpers/core/services/helpers.py 2014-10-29 14:25:04 +0000
2692@@ -0,0 +1,239 @@
2693+import os
2694+import yaml
2695+from charmhelpers.core import hookenv
2696+from charmhelpers.core import templating
2697+
2698+from charmhelpers.core.services.base import ManagerCallback
2699+
2700+
2701+__all__ = ['RelationContext', 'TemplateCallback',
2702+ 'render_template', 'template']
2703+
2704+
2705+class RelationContext(dict):
2706+ """
2707+ Base class for a context generator that gets relation data from juju.
2708+
2709+ Subclasses must provide the attributes `name`, which is the name of the
2710+ interface of interest, `interface`, which is the type of the interface of
2711+ interest, and `required_keys`, which is the set of keys required for the
2712+ relation to be considered complete. The data for all interfaces matching
2713+ the `name` attribute that are complete will used to populate the dictionary
2714+ values (see `get_data`, below).
2715+
2716+ The generated context will be namespaced under the relation :attr:`name`,
2717+ to prevent potential naming conflicts.
2718+
2719+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2720+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2721+ """
2722+ name = None
2723+ interface = None
2724+ required_keys = []
2725+
2726+ def __init__(self, name=None, additional_required_keys=None):
2727+ if name is not None:
2728+ self.name = name
2729+ if additional_required_keys is not None:
2730+ self.required_keys.extend(additional_required_keys)
2731+ self.get_data()
2732+
2733+ def __bool__(self):
2734+ """
2735+ Returns True if all of the required_keys are available.
2736+ """
2737+ return self.is_ready()
2738+
2739+ __nonzero__ = __bool__
2740+
2741+ def __repr__(self):
2742+ return super(RelationContext, self).__repr__()
2743+
2744+ def is_ready(self):
2745+ """
2746+ Returns True if all of the `required_keys` are available from any units.
2747+ """
2748+ ready = len(self.get(self.name, [])) > 0
2749+ if not ready:
2750+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2751+ return ready
2752+
2753+ def _is_ready(self, unit_data):
2754+ """
2755+ Helper method that tests a set of relation data and returns True if
2756+ all of the `required_keys` are present.
2757+ """
2758+ return set(unit_data.keys()).issuperset(set(self.required_keys))
2759+
2760+ def get_data(self):
2761+ """
2762+ Retrieve the relation data for each unit involved in a relation and,
2763+ if complete, store it in a list under `self[self.name]`. This
2764+ is automatically called when the RelationContext is instantiated.
2765+
2766+ The units are sorted lexographically first by the service ID, then by
2767+ the unit ID. Thus, if an interface has two other services, 'db:1'
2768+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
2769+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
2770+ set of data, the relation data for the units will be stored in the
2771+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
2772+
2773+ If you only care about a single unit on the relation, you can just
2774+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
2775+ support multiple units on a relation, you should iterate over the list,
2776+ like::
2777+
2778+ {% for unit in interface -%}
2779+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
2780+ {%- endfor %}
2781+
2782+ Note that since all sets of relation data from all related services and
2783+ units are in a single list, if you need to know which service or unit a
2784+ set of data came from, you'll need to extend this class to preserve
2785+ that information.
2786+ """
2787+ if not hookenv.relation_ids(self.name):
2788+ return
2789+
2790+ ns = self.setdefault(self.name, [])
2791+ for rid in sorted(hookenv.relation_ids(self.name)):
2792+ for unit in sorted(hookenv.related_units(rid)):
2793+ reldata = hookenv.relation_get(rid=rid, unit=unit)
2794+ if self._is_ready(reldata):
2795+ ns.append(reldata)
2796+
2797+ def provide_data(self):
2798+ """
2799+ Return data to be relation_set for this interface.
2800+ """
2801+ return {}
2802+
2803+
2804+class MysqlRelation(RelationContext):
2805+ """
2806+ Relation context for the `mysql` interface.
2807+
2808+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2809+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2810+ """
2811+ name = 'db'
2812+ interface = 'mysql'
2813+ required_keys = ['host', 'user', 'password', 'database']
2814+
2815+
2816+class HttpRelation(RelationContext):
2817+ """
2818+ Relation context for the `http` interface.
2819+
2820+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2821+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2822+ """
2823+ name = 'website'
2824+ interface = 'http'
2825+ required_keys = ['host', 'port']
2826+
2827+ def provide_data(self):
2828+ return {
2829+ 'host': hookenv.unit_get('private-address'),
2830+ 'port': 80,
2831+ }
2832+
2833+
2834+class RequiredConfig(dict):
2835+ """
2836+ Data context that loads config options with one or more mandatory options.
2837+
2838+ Once the required options have been changed from their default values, all
2839+ config options will be available, namespaced under `config` to prevent
2840+ potential naming conflicts (for example, between a config option and a
2841+ relation property).
2842+
2843+ :param list *args: List of options that must be changed from their default values.
2844+ """
2845+
2846+ def __init__(self, *args):
2847+ self.required_options = args
2848+ self['config'] = hookenv.config()
2849+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
2850+ self.config = yaml.load(fp).get('options', {})
2851+
2852+ def __bool__(self):
2853+ for option in self.required_options:
2854+ if option not in self['config']:
2855+ return False
2856+ current_value = self['config'][option]
2857+ default_value = self.config[option].get('default')
2858+ if current_value == default_value:
2859+ return False
2860+ if current_value in (None, '') and default_value in (None, ''):
2861+ return False
2862+ return True
2863+
2864+ def __nonzero__(self):
2865+ return self.__bool__()
2866+
2867+
2868+class StoredContext(dict):
2869+ """
2870+ A data context that always returns the data that it was first created with.
2871+
2872+ This is useful to do a one-time generation of things like passwords, that
2873+ will thereafter use the same value that was originally generated, instead
2874+ of generating a new value each time it is run.
2875+ """
2876+ def __init__(self, file_name, config_data):
2877+ """
2878+ If the file exists, populate `self` with the data from the file.
2879+ Otherwise, populate with the given data and persist it to the file.
2880+ """
2881+ if os.path.exists(file_name):
2882+ self.update(self.read_context(file_name))
2883+ else:
2884+ self.store_context(file_name, config_data)
2885+ self.update(config_data)
2886+
2887+ def store_context(self, file_name, config_data):
2888+ if not os.path.isabs(file_name):
2889+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2890+ with open(file_name, 'w') as file_stream:
2891+ os.fchmod(file_stream.fileno(), 0600)
2892+ yaml.dump(config_data, file_stream)
2893+
2894+ def read_context(self, file_name):
2895+ if not os.path.isabs(file_name):
2896+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2897+ with open(file_name, 'r') as file_stream:
2898+ data = yaml.load(file_stream)
2899+ if not data:
2900+ raise OSError("%s is empty" % file_name)
2901+ return data
2902+
2903+
2904+class TemplateCallback(ManagerCallback):
2905+ """
2906+ Callback class that will render a Jinja2 template, for use as a ready action.
2907+
2908+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
2909+ :param str target: The target to write the rendered template to
2910+ :param str owner: The owner of the rendered file
2911+ :param str group: The group of the rendered file
2912+ :param int perms: The permissions of the rendered file
2913+ """
2914+ def __init__(self, source, target, owner='root', group='root', perms=0444):
2915+ self.source = source
2916+ self.target = target
2917+ self.owner = owner
2918+ self.group = group
2919+ self.perms = perms
2920+
2921+ def __call__(self, manager, service_name, event_name):
2922+ service = manager.get_service(service_name)
2923+ context = {}
2924+ for ctx in service.get('required_data', []):
2925+ context.update(ctx)
2926+ templating.render(self.source, self.target, context,
2927+ self.owner, self.group, self.perms)
2928+
2929+
2930+# Convenience aliases for templates
2931+render_template = template = TemplateCallback
2932
2933=== removed file 'hooks/charmhelpers/core/services/helpers.py'
2934--- hooks/charmhelpers/core/services/helpers.py 2014-09-26 08:06:25 +0000
2935+++ hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2936@@ -1,239 +0,0 @@
2937-import os
2938-import yaml
2939-from charmhelpers.core import hookenv
2940-from charmhelpers.core import templating
2941-
2942-from charmhelpers.core.services.base import ManagerCallback
2943-
2944-
2945-__all__ = ['RelationContext', 'TemplateCallback',
2946- 'render_template', 'template']
2947-
2948-
2949-class RelationContext(dict):
2950- """
2951- Base class for a context generator that gets relation data from juju.
2952-
2953- Subclasses must provide the attributes `name`, which is the name of the
2954- interface of interest, `interface`, which is the type of the interface of
2955- interest, and `required_keys`, which is the set of keys required for the
2956- relation to be considered complete. The data for all interfaces matching
2957- the `name` attribute that are complete will used to populate the dictionary
2958- values (see `get_data`, below).
2959-
2960- The generated context will be namespaced under the relation :attr:`name`,
2961- to prevent potential naming conflicts.
2962-
2963- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2964- :param list additional_required_keys: Extend the list of :attr:`required_keys`
2965- """
2966- name = None
2967- interface = None
2968- required_keys = []
2969-
2970- def __init__(self, name=None, additional_required_keys=None):
2971- if name is not None:
2972- self.name = name
2973- if additional_required_keys is not None:
2974- self.required_keys.extend(additional_required_keys)
2975- self.get_data()
2976-
2977- def __bool__(self):
2978- """
2979- Returns True if all of the required_keys are available.
2980- """
2981- return self.is_ready()
2982-
2983- __nonzero__ = __bool__
2984-
2985- def __repr__(self):
2986- return super(RelationContext, self).__repr__()
2987-
2988- def is_ready(self):
2989- """
2990- Returns True if all of the `required_keys` are available from any units.
2991- """
2992- ready = len(self.get(self.name, [])) > 0
2993- if not ready:
2994- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2995- return ready
2996-
2997- def _is_ready(self, unit_data):
2998- """
2999- Helper method that tests a set of relation data and returns True if
3000- all of the `required_keys` are present.
3001- """
3002- return set(unit_data.keys()).issuperset(set(self.required_keys))
3003-
3004- def get_data(self):
3005- """
3006- Retrieve the relation data for each unit involved in a relation and,
3007- if complete, store it in a list under `self[self.name]`. This
3008- is automatically called when the RelationContext is instantiated.
3009-
3010- The units are sorted lexographically first by the service ID, then by
3011- the unit ID. Thus, if an interface has two other services, 'db:1'
3012- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
3013- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
3014- set of data, the relation data for the units will be stored in the
3015- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
3016-
3017- If you only care about a single unit on the relation, you can just
3018- access it as `{{ interface[0]['key'] }}`. However, if you can at all
3019- support multiple units on a relation, you should iterate over the list,
3020- like::
3021-
3022- {% for unit in interface -%}
3023- {{ unit['key'] }}{% if not loop.last %},{% endif %}
3024- {%- endfor %}
3025-
3026- Note that since all sets of relation data from all related services and
3027- units are in a single list, if you need to know which service or unit a
3028- set of data came from, you'll need to extend this class to preserve
3029- that information.
3030- """
3031- if not hookenv.relation_ids(self.name):
3032- return
3033-
3034- ns = self.setdefault(self.name, [])
3035- for rid in sorted(hookenv.relation_ids(self.name)):
3036- for unit in sorted(hookenv.related_units(rid)):
3037- reldata = hookenv.relation_get(rid=rid, unit=unit)
3038- if self._is_ready(reldata):
3039- ns.append(reldata)
3040-
3041- def provide_data(self):
3042- """
3043- Return data to be relation_set for this interface.
3044- """
3045- return {}
3046-
3047-
3048-class MysqlRelation(RelationContext):
3049- """
3050- Relation context for the `mysql` interface.
3051-
3052- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
3053- :param list additional_required_keys: Extend the list of :attr:`required_keys`
3054- """
3055- name = 'db'
3056- interface = 'mysql'
3057- required_keys = ['host', 'user', 'password', 'database']
3058-
3059-
3060-class HttpRelation(RelationContext):
3061- """
3062- Relation context for the `http` interface.
3063-
3064- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
3065- :param list additional_required_keys: Extend the list of :attr:`required_keys`
3066- """
3067- name = 'website'
3068- interface = 'http'
3069- required_keys = ['host', 'port']
3070-
3071- def provide_data(self):
3072- return {
3073- 'host': hookenv.unit_get('private-address'),
3074- 'port': 80,
3075- }
3076-
3077-
3078-class RequiredConfig(dict):
3079- """
3080- Data context that loads config options with one or more mandatory options.
3081-
3082- Once the required options have been changed from their default values, all
3083- config options will be available, namespaced under `config` to prevent
3084- potential naming conflicts (for example, between a config option and a
3085- relation property).
3086-
3087- :param list *args: List of options that must be changed from their default values.
3088- """
3089-
3090- def __init__(self, *args):
3091- self.required_options = args
3092- self['config'] = hookenv.config()
3093- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
3094- self.config = yaml.load(fp).get('options', {})
3095-
3096- def __bool__(self):
3097- for option in self.required_options:
3098- if option not in self['config']:
3099- return False
3100- current_value = self['config'][option]
3101- default_value = self.config[option].get('default')
3102- if current_value == default_value:
3103- return False
3104- if current_value in (None, '') and default_value in (None, ''):
3105- return False
3106- return True
3107-
3108- def __nonzero__(self):
3109- return self.__bool__()
3110-
3111-
3112-class StoredContext(dict):
3113- """
3114- A data context that always returns the data that it was first created with.
3115-
3116- This is useful to do a one-time generation of things like passwords, that
3117- will thereafter use the same value that was originally generated, instead
3118- of generating a new value each time it is run.
3119- """
3120- def __init__(self, file_name, config_data):
3121- """
3122- If the file exists, populate `self` with the data from the file.
3123- Otherwise, populate with the given data and persist it to the file.
3124- """
3125- if os.path.exists(file_name):
3126- self.update(self.read_context(file_name))
3127- else:
3128- self.store_context(file_name, config_data)
3129- self.update(config_data)
3130-
3131- def store_context(self, file_name, config_data):
3132- if not os.path.isabs(file_name):
3133- file_name = os.path.join(hookenv.charm_dir(), file_name)
3134- with open(file_name, 'w') as file_stream:
3135- os.fchmod(file_stream.fileno(), 0600)
3136- yaml.dump(config_data, file_stream)
3137-
3138- def read_context(self, file_name):
3139- if not os.path.isabs(file_name):
3140- file_name = os.path.join(hookenv.charm_dir(), file_name)
3141- with open(file_name, 'r') as file_stream:
3142- data = yaml.load(file_stream)
3143- if not data:
3144- raise OSError("%s is empty" % file_name)
3145- return data
3146-
3147-
3148-class TemplateCallback(ManagerCallback):
3149- """
3150- Callback class that will render a Jinja2 template, for use as a ready action.
3151-
3152- :param str source: The template source file, relative to `$CHARM_DIR/templates`
3153- :param str target: The target to write the rendered template to
3154- :param str owner: The owner of the rendered file
3155- :param str group: The group of the rendered file
3156- :param int perms: The permissions of the rendered file
3157- """
3158- def __init__(self, source, target, owner='root', group='root', perms=0444):
3159- self.source = source
3160- self.target = target
3161- self.owner = owner
3162- self.group = group
3163- self.perms = perms
3164-
3165- def __call__(self, manager, service_name, event_name):
3166- service = manager.get_service(service_name)
3167- context = {}
3168- for ctx in service.get('required_data', []):
3169- context.update(ctx)
3170- templating.render(self.source, self.target, context,
3171- self.owner, self.group, self.perms)
3172-
3173-
3174-# Convenience aliases for templates
3175-render_template = template = TemplateCallback
3176
3177=== added file 'hooks/charmhelpers/core/sysctl.py'
3178--- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000
3179+++ hooks/charmhelpers/core/sysctl.py 2014-10-29 14:25:04 +0000
3180@@ -0,0 +1,34 @@
3181+#!/usr/bin/env python
3182+# -*- coding: utf-8 -*-
3183+
3184+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
3185+
3186+import yaml
3187+
3188+from subprocess import check_call
3189+
3190+from charmhelpers.core.hookenv import (
3191+ log,
3192+ DEBUG,
3193+)
3194+
3195+
3196+def create(sysctl_dict, sysctl_file):
3197+ """Creates a sysctl.conf file from a YAML associative array
3198+
3199+ :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
3200+ :type sysctl_dict: dict
3201+ :param sysctl_file: path to the sysctl file to be saved
3202+ :type sysctl_file: str or unicode
3203+ :returns: None
3204+ """
3205+ sysctl_dict = yaml.load(sysctl_dict)
3206+
3207+ with open(sysctl_file, "w") as fd:
3208+ for key, value in sysctl_dict.items():
3209+ fd.write("{}={}\n".format(key, value))
3210+
3211+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
3212+ level=DEBUG)
3213+
3214+ check_call(["sysctl", "-p", sysctl_file])
3215
3216=== added file 'hooks/charmhelpers/core/templating.py'
3217--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
3218+++ hooks/charmhelpers/core/templating.py 2014-10-29 14:25:04 +0000
3219@@ -0,0 +1,51 @@
3220+import os
3221+
3222+from charmhelpers.core import host
3223+from charmhelpers.core import hookenv
3224+
3225+
3226+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3227+ """
3228+ Render a template.
3229+
3230+ The `source` path, if not absolute, is relative to the `templates_dir`.
3231+
3232+ The `target` path should be absolute.
3233+
3234+ The context should be a dict containing the values to be replaced in the
3235+ template.
3236+
3237+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
3238+
3239+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
3240+
3241+ Note: Using this requires python-jinja2; if it is not installed, calling
3242+ this will attempt to use charmhelpers.fetch.apt_install to install it.
3243+ """
3244+ try:
3245+ from jinja2 import FileSystemLoader, Environment, exceptions
3246+ except ImportError:
3247+ try:
3248+ from charmhelpers.fetch import apt_install
3249+ except ImportError:
3250+ hookenv.log('Could not import jinja2, and could not import '
3251+ 'charmhelpers.fetch to install it',
3252+ level=hookenv.ERROR)
3253+ raise
3254+ apt_install('python-jinja2', fatal=True)
3255+ from jinja2 import FileSystemLoader, Environment, exceptions
3256+
3257+ if templates_dir is None:
3258+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3259+ loader = Environment(loader=FileSystemLoader(templates_dir))
3260+ try:
3261+ source = source
3262+ template = loader.get_template(source)
3263+ except exceptions.TemplateNotFound as e:
3264+ hookenv.log('Could not load template %s from %s.' %
3265+ (source, templates_dir),
3266+ level=hookenv.ERROR)
3267+ raise e
3268+ content = template.render(context)
3269+ host.mkdir(os.path.dirname(target))
3270+ host.write_file(target, content, owner, group, perms)
3271
3272=== removed file 'hooks/charmhelpers/core/templating.py'
3273--- hooks/charmhelpers/core/templating.py 2014-09-26 08:06:25 +0000
3274+++ hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
3275@@ -1,51 +0,0 @@
3276-import os
3277-
3278-from charmhelpers.core import host
3279-from charmhelpers.core import hookenv
3280-
3281-
3282-def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3283- """
3284- Render a template.
3285-
3286- The `source` path, if not absolute, is relative to the `templates_dir`.
3287-
3288- The `target` path should be absolute.
3289-
3290- The context should be a dict containing the values to be replaced in the
3291- template.
3292-
3293- The `owner`, `group`, and `perms` options will be passed to `write_file`.
3294-
3295- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
3296-
3297- Note: Using this requires python-jinja2; if it is not installed, calling
3298- this will attempt to use charmhelpers.fetch.apt_install to install it.
3299- """
3300- try:
3301- from jinja2 import FileSystemLoader, Environment, exceptions
3302- except ImportError:
3303- try:
3304- from charmhelpers.fetch import apt_install
3305- except ImportError:
3306- hookenv.log('Could not import jinja2, and could not import '
3307- 'charmhelpers.fetch to install it',
3308- level=hookenv.ERROR)
3309- raise
3310- apt_install('python-jinja2', fatal=True)
3311- from jinja2 import FileSystemLoader, Environment, exceptions
3312-
3313- if templates_dir is None:
3314- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3315- loader = Environment(loader=FileSystemLoader(templates_dir))
3316- try:
3317- source = source
3318- template = loader.get_template(source)
3319- except exceptions.TemplateNotFound as e:
3320- hookenv.log('Could not load template %s from %s.' %
3321- (source, templates_dir),
3322- level=hookenv.ERROR)
3323- raise e
3324- content = template.render(context)
3325- host.mkdir(os.path.dirname(target))
3326- host.write_file(target, content, owner, group, perms)
3327
3328=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3329--- hooks/charmhelpers/fetch/__init__.py 2014-10-01 12:47:13 +0000
3330+++ hooks/charmhelpers/fetch/__init__.py 2014-10-29 14:25:04 +0000
3331@@ -72,6 +72,7 @@
3332 FETCH_HANDLERS = (
3333 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
3334 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
3335+ 'charmhelpers.fetch.giturl.GitUrlFetchHandler',
3336 )
3337
3338 APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
3339@@ -218,6 +219,7 @@
3340 pocket for the release.
3341 'cloud:' may be used to activate official cloud archive pockets,
3342 such as 'cloud:icehouse'
3343+ 'distro' may be used as a noop
3344
3345 @param key: A key to be added to the system's APT keyring and used
3346 to verify the signatures on packages. Ideally, this should be an
3347@@ -251,8 +253,10 @@
3348 release = lsb_release()['DISTRIB_CODENAME']
3349 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
3350 apt.write(PROPOSED_POCKET.format(release))
3351+ elif source == 'distro':
3352+ pass
3353 else:
3354- raise SourceConfigError("Unknown source: {!r}".format(source))
3355+ log("Unknown source: {!r}".format(source))
3356
3357 if key:
3358 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
3359
3360=== added file 'hooks/charmhelpers/fetch/giturl.py'
3361--- hooks/charmhelpers/fetch/giturl.py 1970-01-01 00:00:00 +0000
3362+++ hooks/charmhelpers/fetch/giturl.py 2014-10-29 14:25:04 +0000
3363@@ -0,0 +1,44 @@
3364+import os
3365+from charmhelpers.fetch import (
3366+ BaseFetchHandler,
3367+ UnhandledSource
3368+)
3369+from charmhelpers.core.host import mkdir
3370+
3371+try:
3372+ from git import Repo
3373+except ImportError:
3374+ from charmhelpers.fetch import apt_install
3375+ apt_install("python-git")
3376+ from git import Repo
3377+
3378+
3379+class GitUrlFetchHandler(BaseFetchHandler):
3380+ """Handler for git branches via generic and github URLs"""
3381+ def can_handle(self, source):
3382+ url_parts = self.parse_url(source)
3383+ #TODO (mattyw) no support for ssh git@ yet
3384+ if url_parts.scheme not in ('http', 'https', 'git'):
3385+ return False
3386+ else:
3387+ return True
3388+
3389+ def clone(self, source, dest, branch):
3390+ if not self.can_handle(source):
3391+ raise UnhandledSource("Cannot handle {}".format(source))
3392+
3393+ repo = Repo.clone_from(source, dest)
3394+ repo.git.checkout(branch)
3395+
3396+ def install(self, source, branch="master"):
3397+ url_parts = self.parse_url(source)
3398+ branch_name = url_parts.path.strip("/").split("/")[-1]
3399+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
3400+ branch_name)
3401+ if not os.path.exists(dest_dir):
3402+ mkdir(dest_dir, perms=0755)
3403+ try:
3404+ self.clone(source, dest_dir, branch)
3405+ except OSError as e:
3406+ raise UnhandledSource(e.strerror)
3407+ return dest_dir
3408
3409=== modified file 'hooks/rabbit_utils.py'
3410--- hooks/rabbit_utils.py 2014-09-26 08:10:36 +0000
3411+++ hooks/rabbit_utils.py 2014-10-29 14:25:04 +0000
3412@@ -90,10 +90,11 @@
3413
3414 if admin:
3415 cmd = [RABBITMQ_CTL, 'set_user_tags', user, 'administrator']
3416- log('Granting user (%s) admin access.')
3417+ log('Granting user (%s) admin access.' % user)
3418 else:
3419 cmd = [RABBITMQ_CTL, 'set_user_tags', user]
3420- log('Revoking user (%s) admin access.')
3421+ log('Revoking user (%s) admin access.' % user)
3422+ subprocess.check_call(cmd)
3423
3424
3425 def grant_permissions(user, vhost):
3426
3427=== modified file 'hooks/test_rabbitmq_server_relations.py'
3428--- hooks/test_rabbitmq_server_relations.py 2014-09-29 04:26:41 +0000
3429+++ hooks/test_rabbitmq_server_relations.py 2014-10-29 14:25:04 +0000
3430@@ -31,7 +31,7 @@
3431 cache.__getitem__.side_effect = cache_get
3432 return cache
3433
3434- @patch('rabbitmq_server_relations.relation_set')
3435+ @patch('rabbitmq_server_relations.peer_store_and_set')
3436 @patch('apt_pkg.Cache')
3437 @patch('rabbitmq_server_relations.is_clustered')
3438 @patch('rabbitmq_server_relations.configure_client_ssl')
3439@@ -41,7 +41,7 @@
3440 def test_amqp_changed_compare_versions_ha_queues(
3441 self,
3442 eligible_leader, relation_get, unit_get, configure_client_ssl,
3443- is_clustered, apt_cache, relation_set):
3444+ is_clustered, apt_cache, peer_store_and_set):
3445 """
3446 Compare version above and below 3.0.1.
3447 Make sure ha_queues is set correctly on each side.
3448@@ -54,10 +54,10 @@
3449
3450 self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0'}}
3451 rabbitmq_server_relations.amqp_changed(None, None)
3452- relation_set.assert_called_with(
3453- relation_settings={'hostname': 'UNIT_TEST', 'ha_queues': True})
3454+ peer_store_and_set.assert_called_with(
3455+ relation_settings={'ha_queues': True, 'hostname': 'UNIT_TEST'})
3456
3457 self.fake_repo = {'rabbitmq-server': {'pkg_vers': '3.0.2'}}
3458 rabbitmq_server_relations.amqp_changed(None, None)
3459- relation_set.assert_called_with(
3460+ peer_store_and_set.assert_called_with(
3461 relation_settings={'hostname': 'UNIT_TEST'})

Subscribers

People subscribed via source and target branches