Merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup into lp:charms/trusty/openstack-zeromq

Proposed by Liam Young
Status: Rejected
Rejected by: James Page
Proposed branch: lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup
Merge into: lp:charms/trusty/openstack-zeromq
Diff against target: 2562 lines (+1786/-195)
24 files modified
charm-helpers-hooks.yaml (+2/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+225/-0)
hooks/charmhelpers/contrib/network/ip.py (+194/-19)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-8)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+5/-4)
hooks/charmhelpers/contrib/openstack/context.py (+204/-69)
hooks/charmhelpers/contrib/openstack/ip.py (+1/-1)
hooks/charmhelpers/contrib/openstack/utils.py (+28/-1)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+388/-0)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+88/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+53/-0)
hooks/charmhelpers/core/hookenv.py (+17/-4)
hooks/charmhelpers/core/host.py (+38/-6)
hooks/charmhelpers/core/services/helpers.py (+119/-5)
hooks/charmhelpers/core/sysctl.py (+34/-0)
hooks/charmhelpers/fetch/__init__.py (+19/-5)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/zeromq_context.py (+41/-0)
hooks/zeromq_hooks.py (+17/-56)
hooks/zeromq_utils.py (+78/-0)
templates/matchmaker_ring.json (+1/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+19/-13)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup
Reviewer Review Type Date Requested Status
James Page Needs Fixing
Review via email: mp+238712@code.launchpad.net
To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Needs Fixing

Unmerged revisions

22. By Liam Young

Tidy up charm and bring format inline with other os charms

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'charm-helpers-hooks.yaml'
2--- charm-helpers-hooks.yaml 2014-09-08 13:55:11 +0000
3+++ charm-helpers-hooks.yaml 2014-10-17 13:06:36 +0000
4@@ -5,3 +5,5 @@
5 - fetch
6 - contrib.openstack
7 - contrib.network
8+ - contrib.hahelpers
9+ - contrib.storage
10
11=== added directory 'hooks/charmhelpers/contrib/hahelpers'
12=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
13=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
14--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
15+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-17 13:06:36 +0000
16@@ -0,0 +1,66 @@
17+#
18+# Copyright 2012 Canonical Ltd.
19+#
20+# This file is sourced from lp:openstack-charm-helpers
21+#
22+# Authors:
23+# James Page <james.page@ubuntu.com>
24+# Adam Gandelman <adamg@ubuntu.com>
25+#
26+
27+import subprocess
28+
29+from charmhelpers.core.hookenv import (
30+ config as config_get,
31+ relation_get,
32+ relation_ids,
33+ related_units as relation_list,
34+ log,
35+ INFO,
36+)
37+
38+
39+def get_cert(cn=None):
40+ # TODO: deal with multiple https endpoints via charm config
41+ cert = config_get('ssl_cert')
42+ key = config_get('ssl_key')
43+ if not (cert and key):
44+ log("Inspecting identity-service relations for SSL certificate.",
45+ level=INFO)
46+ cert = key = None
47+ if cn:
48+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
49+ ssl_key_attr = 'ssl_key_{}'.format(cn)
50+ else:
51+ ssl_cert_attr = 'ssl_cert'
52+ ssl_key_attr = 'ssl_key'
53+ for r_id in relation_ids('identity-service'):
54+ for unit in relation_list(r_id):
55+ if not cert:
56+ cert = relation_get(ssl_cert_attr,
57+ rid=r_id, unit=unit)
58+ if not key:
59+ key = relation_get(ssl_key_attr,
60+ rid=r_id, unit=unit)
61+ return (cert, key)
62+
63+
64+def get_ca_cert():
65+ ca_cert = config_get('ssl_ca')
66+ if ca_cert is None:
67+ log("Inspecting identity-service relations for CA SSL certificate.",
68+ level=INFO)
69+ for r_id in relation_ids('identity-service'):
70+ for unit in relation_list(r_id):
71+ if ca_cert is None:
72+ ca_cert = relation_get('ca_cert',
73+ rid=r_id, unit=unit)
74+ return ca_cert
75+
76+
77+def install_ca_cert(ca_cert):
78+ if ca_cert:
79+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
80+ 'w') as crt:
81+ crt.write(ca_cert)
82+ subprocess.check_call(['update-ca-certificates', '--fresh'])
83
84=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
85--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
86+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-17 13:06:36 +0000
87@@ -0,0 +1,225 @@
88+#
89+# Copyright 2012 Canonical Ltd.
90+#
91+# Authors:
92+# James Page <james.page@ubuntu.com>
93+# Adam Gandelman <adamg@ubuntu.com>
94+#
95+
96+"""
97+Helpers for clustering and determining "cluster leadership" and other
98+clustering-related helpers.
99+"""
100+
101+import subprocess
102+import os
103+
104+from socket import gethostname as get_unit_hostname
105+
106+from charmhelpers.core.hookenv import (
107+ log,
108+ relation_ids,
109+ related_units as relation_list,
110+ relation_get,
111+ config as config_get,
112+ INFO,
113+ ERROR,
114+ WARNING,
115+ unit_get,
116+)
117+
118+
119+class HAIncompleteConfig(Exception):
120+ pass
121+
122+
123+def is_elected_leader(resource):
124+ """
125+ Returns True if the charm executing this is the elected cluster leader.
126+
127+ It relies on two mechanisms to determine leadership:
128+ 1. If the charm is part of a corosync cluster, call corosync to
129+ determine leadership.
130+ 2. If the charm is not part of a corosync cluster, the leader is
131+ determined as being "the alive unit with the lowest unit numer". In
132+ other words, the oldest surviving unit.
133+ """
134+ if is_clustered():
135+ if not is_crm_leader(resource):
136+ log('Deferring action to CRM leader.', level=INFO)
137+ return False
138+ else:
139+ peers = peer_units()
140+ if peers and not oldest_peer(peers):
141+ log('Deferring action to oldest service unit.', level=INFO)
142+ return False
143+ return True
144+
145+
146+def is_clustered():
147+ for r_id in (relation_ids('ha') or []):
148+ for unit in (relation_list(r_id) or []):
149+ clustered = relation_get('clustered',
150+ rid=r_id,
151+ unit=unit)
152+ if clustered:
153+ return True
154+ return False
155+
156+
157+def is_crm_leader(resource):
158+ """
159+ Returns True if the charm calling this is the elected corosync leader,
160+ as returned by calling the external "crm" command.
161+ """
162+ cmd = [
163+ "crm", "resource",
164+ "show", resource
165+ ]
166+ try:
167+ status = subprocess.check_output(cmd)
168+ except subprocess.CalledProcessError:
169+ return False
170+ else:
171+ if get_unit_hostname() in status:
172+ return True
173+ else:
174+ return False
175+
176+
177+def is_leader(resource):
178+ log("is_leader is deprecated. Please consider using is_crm_leader "
179+ "instead.", level=WARNING)
180+ return is_crm_leader(resource)
181+
182+
183+def peer_units(peer_relation="cluster"):
184+ peers = []
185+ for r_id in (relation_ids(peer_relation) or []):
186+ for unit in (relation_list(r_id) or []):
187+ peers.append(unit)
188+ return peers
189+
190+
191+def peer_ips(peer_relation='cluster', addr_key='private-address'):
192+ '''Return a dict of peers and their private-address'''
193+ peers = {}
194+ for r_id in relation_ids(peer_relation):
195+ for unit in relation_list(r_id):
196+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
197+ return peers
198+
199+
200+def oldest_peer(peers):
201+ """Determines who the oldest peer is by comparing unit numbers."""
202+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
203+ for peer in peers:
204+ remote_unit_no = int(peer.split('/')[1])
205+ if remote_unit_no < local_unit_no:
206+ return False
207+ return True
208+
209+
210+def eligible_leader(resource):
211+ log("eligible_leader is deprecated. Please consider using "
212+ "is_elected_leader instead.", level=WARNING)
213+ return is_elected_leader(resource)
214+
215+
216+def https():
217+ '''
218+ Determines whether enough data has been provided in configuration
219+ or relation data to configure HTTPS
220+ .
221+ returns: boolean
222+ '''
223+ if config_get('use-https') == "yes":
224+ return True
225+ if config_get('ssl_cert') and config_get('ssl_key'):
226+ return True
227+ for r_id in relation_ids('identity-service'):
228+ for unit in relation_list(r_id):
229+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
230+ rel_state = [
231+ relation_get('https_keystone', rid=r_id, unit=unit),
232+ relation_get('ca_cert', rid=r_id, unit=unit),
233+ ]
234+ # NOTE: works around (LP: #1203241)
235+ if (None not in rel_state) and ('' not in rel_state):
236+ return True
237+ return False
238+
239+
240+def determine_api_port(public_port):
241+ '''
242+ Determine correct API server listening port based on
243+ existence of HTTPS reverse proxy and/or haproxy.
244+
245+ public_port: int: standard public port for given service
246+
247+ returns: int: the correct listening port for the API service
248+ '''
249+ i = 0
250+ if len(peer_units()) > 0 or is_clustered():
251+ i += 1
252+ if https():
253+ i += 1
254+ return public_port - (i * 10)
255+
256+
257+def determine_apache_port(public_port):
258+ '''
259+ Description: Determine correct apache listening port based on public IP +
260+ state of the cluster.
261+
262+ public_port: int: standard public port for given service
263+
264+ returns: int: the correct listening port for the HAProxy service
265+ '''
266+ i = 0
267+ if len(peer_units()) > 0 or is_clustered():
268+ i += 1
269+ return public_port - (i * 10)
270+
271+
272+def get_hacluster_config():
273+ '''
274+ Obtains all relevant configuration from charm configuration required
275+ for initiating a relation to hacluster:
276+
277+ ha-bindiface, ha-mcastport, vip
278+
279+ returns: dict: A dict containing settings keyed by setting name.
280+ raises: HAIncompleteConfig if settings are missing.
281+ '''
282+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
283+ conf = {}
284+ for setting in settings:
285+ conf[setting] = config_get(setting)
286+ missing = []
287+ [missing.append(s) for s, v in conf.iteritems() if v is None]
288+ if missing:
289+ log('Insufficient config data to configure hacluster.', level=ERROR)
290+ raise HAIncompleteConfig
291+ return conf
292+
293+
294+def canonical_url(configs, vip_setting='vip'):
295+ '''
296+ Returns the correct HTTP URL to this host given the state of HTTPS
297+ configuration and hacluster.
298+
299+ :configs : OSTemplateRenderer: A config tempating object to inspect for
300+ a complete https context.
301+
302+ :vip_setting: str: Setting in charm config that specifies
303+ VIP address.
304+ '''
305+ scheme = 'http'
306+ if 'https' in configs.complete_contexts():
307+ scheme = 'https'
308+ if is_clustered():
309+ addr = config_get(vip_setting)
310+ else:
311+ addr = unit_get('private-address')
312+ return '%s://%s' % (scheme, addr)
313
314=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
315--- hooks/charmhelpers/contrib/network/ip.py 2014-09-08 14:18:52 +0000
316+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-17 13:06:36 +0000
317@@ -1,10 +1,16 @@
318+import glob
319+import re
320+import subprocess
321 import sys
322
323 from functools import partial
324
325+from charmhelpers.core.hookenv import unit_get
326 from charmhelpers.fetch import apt_install
327 from charmhelpers.core.hookenv import (
328- ERROR, log, config,
329+ WARNING,
330+ ERROR,
331+ log
332 )
333
334 try:
335@@ -51,6 +57,8 @@
336 else:
337 if fatal:
338 not_found_error_out()
339+ else:
340+ return None
341
342 _validate_cidr(network)
343 network = netaddr.IPNetwork(network)
344@@ -132,7 +140,8 @@
345 if address.version == 4 and netifaces.AF_INET in addresses:
346 addr = addresses[netifaces.AF_INET][0]['addr']
347 netmask = addresses[netifaces.AF_INET][0]['netmask']
348- cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
349+ network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
350+ cidr = network.cidr
351 if address in cidr:
352 if key == 'iface':
353 return iface
354@@ -141,11 +150,14 @@
355 if address.version == 6 and netifaces.AF_INET6 in addresses:
356 for addr in addresses[netifaces.AF_INET6]:
357 if not addr['addr'].startswith('fe80'):
358- cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
359- addr['netmask']))
360+ network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
361+ addr['netmask']))
362+ cidr = network.cidr
363 if address in cidr:
364 if key == 'iface':
365 return iface
366+ elif key == 'netmask' and cidr:
367+ return str(cidr).split('/')[1]
368 else:
369 return addr[key]
370 return None
371@@ -156,19 +168,182 @@
372 get_netmask_for_address = partial(_get_for_address, key='netmask')
373
374
375-def get_ipv6_addr(iface="eth0"):
376+def format_ipv6_addr(address):
377+ """
378+ IPv6 needs to be wrapped with [] in url link to parse correctly.
379+ """
380+ if is_ipv6(address):
381+ address = "[%s]" % address
382+ else:
383+ log("Not a valid ipv6 address: %s" % address, level=WARNING)
384+ address = None
385+
386+ return address
387+
388+
389+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
390+ fatal=True, exc_list=None):
391+ """
392+ Return the assigned IP address for a given interface, if any, or [].
393+ """
394+ # Extract nic if passed /dev/ethX
395+ if '/' in iface:
396+ iface = iface.split('/')[-1]
397+ if not exc_list:
398+ exc_list = []
399 try:
400- iface_addrs = netifaces.ifaddresses(iface)
401- if netifaces.AF_INET6 not in iface_addrs:
402- raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
403-
404- addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
405- ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
406- and config('vip') != a['addr']]
407- if not ipv6_addr:
408- raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
409-
410- return ipv6_addr[0]
411-
412- except ValueError:
413- raise ValueError("Invalid interface '%s'" % iface)
414+ inet_num = getattr(netifaces, inet_type)
415+ except AttributeError:
416+ raise Exception('Unknown inet type ' + str(inet_type))
417+
418+ interfaces = netifaces.interfaces()
419+ if inc_aliases:
420+ ifaces = []
421+ for _iface in interfaces:
422+ if iface == _iface or _iface.split(':')[0] == iface:
423+ ifaces.append(_iface)
424+ if fatal and not ifaces:
425+ raise Exception("Invalid interface '%s'" % iface)
426+ ifaces.sort()
427+ else:
428+ if iface not in interfaces:
429+ if fatal:
430+ raise Exception("%s not found " % (iface))
431+ else:
432+ return []
433+ else:
434+ ifaces = [iface]
435+
436+ addresses = []
437+ for netiface in ifaces:
438+ net_info = netifaces.ifaddresses(netiface)
439+ if inet_num in net_info:
440+ for entry in net_info[inet_num]:
441+ if 'addr' in entry and entry['addr'] not in exc_list:
442+ addresses.append(entry['addr'])
443+ if fatal and not addresses:
444+ raise Exception("Interface '%s' doesn't have any %s addresses." %
445+ (iface, inet_type))
446+ return addresses
447+
448+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
449+
450+
451+def get_iface_from_addr(addr):
452+ """Work out on which interface the provided address is configured."""
453+ for iface in netifaces.interfaces():
454+ addresses = netifaces.ifaddresses(iface)
455+ for inet_type in addresses:
456+ for _addr in addresses[inet_type]:
457+ _addr = _addr['addr']
458+ # link local
459+ ll_key = re.compile("(.+)%.*")
460+ raw = re.match(ll_key, _addr)
461+ if raw:
462+ _addr = raw.group(1)
463+ if _addr == addr:
464+ log("Address '%s' is configured on iface '%s'" %
465+ (addr, iface))
466+ return iface
467+
468+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
469+ raise Exception(msg)
470+
471+
472+def sniff_iface(f):
473+ """If no iface provided, inject net iface inferred from unit private
474+ address.
475+ """
476+ def iface_sniffer(*args, **kwargs):
477+ if not kwargs.get('iface', None):
478+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
479+
480+ return f(*args, **kwargs)
481+
482+ return iface_sniffer
483+
484+
485+@sniff_iface
486+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
487+ dynamic_only=True):
488+ """Get assigned IPv6 address for a given interface.
489+
490+ Returns list of addresses found. If no address found, returns empty list.
491+
492+ If iface is None, we infer the current primary interface by doing a reverse
493+ lookup on the unit private-address.
494+
495+ We currently only support scope global IPv6 addresses i.e. non-temporary
496+ addresses. If no global IPv6 address is found, return the first one found
497+ in the ipv6 address list.
498+ """
499+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
500+ inc_aliases=inc_aliases, fatal=fatal,
501+ exc_list=exc_list)
502+
503+ if addresses:
504+ global_addrs = []
505+ for addr in addresses:
506+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
507+ m = re.match(key_scope_link_local, addr)
508+ if m:
509+ eui_64_mac = m.group(1)
510+ iface = m.group(2)
511+ else:
512+ global_addrs.append(addr)
513+
514+ if global_addrs:
515+ # Make sure any found global addresses are not temporary
516+ cmd = ['ip', 'addr', 'show', iface]
517+ out = subprocess.check_output(cmd)
518+ if dynamic_only:
519+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
520+ else:
521+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
522+
523+ addrs = []
524+ for line in out.split('\n'):
525+ line = line.strip()
526+ m = re.match(key, line)
527+ if m and 'temporary' not in line:
528+ # Return the first valid address we find
529+ for addr in global_addrs:
530+ if m.group(1) == addr:
531+ if not dynamic_only or \
532+ m.group(1).endswith(eui_64_mac):
533+ addrs.append(addr)
534+
535+ if addrs:
536+ return addrs
537+
538+ if fatal:
539+ raise Exception("Interface '%s' doesn't have a scope global "
540+ "non-temporary ipv6 address." % iface)
541+
542+ return []
543+
544+
545+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
546+ """
547+ Return a list of bridges on the system or []
548+ """
549+ b_rgex = vnic_dir + '/*/bridge'
550+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
551+
552+
553+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
554+ """
555+ Return a list of nics comprising a given bridge on the system or []
556+ """
557+ brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
558+ return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
559+
560+
561+def is_bridge_member(nic):
562+ """
563+ Check if a given nic is a member of a bridge
564+ """
565+ for bridge in get_bridges():
566+ if nic in get_bridge_nics(bridge):
567+ return True
568+ return False
569
570=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
571--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-02 11:19:19 +0000
572+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-17 13:06:36 +0000
573@@ -10,32 +10,62 @@
574 that is specifically for use by OpenStack charms.
575 """
576
577- def __init__(self, series=None, openstack=None, source=None):
578+ def __init__(self, series=None, openstack=None, source=None, stable=True):
579 """Initialize the deployment environment."""
580 super(OpenStackAmuletDeployment, self).__init__(series)
581 self.openstack = openstack
582 self.source = source
583+ self.stable = stable
584+ # Note(coreycb): this needs to be changed when new next branches come
585+ # out.
586+ self.current_next = "trusty"
587+
588+ def _determine_branch_locations(self, other_services):
589+ """Determine the branch locations for the other services.
590+
591+ Determine if the local branch being tested is derived from its
592+ stable or next (dev) branch, and based on this, use the corresonding
593+ stable or next branches for the other_services."""
594+ base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
595+
596+ if self.stable:
597+ for svc in other_services:
598+ temp = 'lp:charms/{}'
599+ svc['location'] = temp.format(svc['name'])
600+ else:
601+ for svc in other_services:
602+ if svc['name'] in base_charms:
603+ temp = 'lp:charms/{}'
604+ svc['location'] = temp.format(svc['name'])
605+ else:
606+ temp = 'lp:~openstack-charmers/charms/{}/{}/next'
607+ svc['location'] = temp.format(self.current_next,
608+ svc['name'])
609+ return other_services
610
611 def _add_services(self, this_service, other_services):
612- """Add services to the deployment and set openstack-origin."""
613+ """Add services to the deployment and set openstack-origin/source."""
614+ other_services = self._determine_branch_locations(other_services)
615+
616 super(OpenStackAmuletDeployment, self)._add_services(this_service,
617 other_services)
618- name = 0
619+
620 services = other_services
621 services.append(this_service)
622- use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
623+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
624+ 'ceph-osd', 'ceph-radosgw']
625
626 if self.openstack:
627 for svc in services:
628- if svc[name] not in use_source:
629+ if svc['name'] not in use_source:
630 config = {'openstack-origin': self.openstack}
631- self.d.configure(svc[name], config)
632+ self.d.configure(svc['name'], config)
633
634 if self.source:
635 for svc in services:
636- if svc[name] in use_source:
637+ if svc['name'] in use_source:
638 config = {'source': self.source}
639- self.d.configure(svc[name], config)
640+ self.d.configure(svc['name'], config)
641
642 def _configure_services(self, configs):
643 """Configure all of the services."""
644
645=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
646--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-02 11:19:19 +0000
647+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-17 13:06:36 +0000
648@@ -187,15 +187,16 @@
649
650 f = opener.open("http://download.cirros-cloud.net/version/released")
651 version = f.read().strip()
652- cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
653+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
654+ local_path = os.path.join('tests', cirros_img)
655
656- if not os.path.exists(cirros_img):
657+ if not os.path.exists(local_path):
658 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
659 version, cirros_img)
660- opener.retrieve(cirros_url, cirros_img)
661+ opener.retrieve(cirros_url, local_path)
662 f.close()
663
664- with open(cirros_img) as f:
665+ with open(local_path) as f:
666 image = glance.images.create(name=image_name, is_public=True,
667 disk_format='qcow2',
668 container_format='bare', data=f)
669
670=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
671--- hooks/charmhelpers/contrib/openstack/context.py 2014-09-02 11:19:19 +0000
672+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-17 13:06:36 +0000
673@@ -8,7 +8,6 @@
674 check_call
675 )
676
677-
678 from charmhelpers.fetch import (
679 apt_install,
680 filter_installed_packages,
681@@ -28,6 +27,11 @@
682 INFO
683 )
684
685+from charmhelpers.core.host import (
686+ mkdir,
687+ write_file
688+)
689+
690 from charmhelpers.contrib.hahelpers.cluster import (
691 determine_apache_port,
692 determine_api_port,
693@@ -38,6 +42,7 @@
694 from charmhelpers.contrib.hahelpers.apache import (
695 get_cert,
696 get_ca_cert,
697+ install_ca_cert,
698 )
699
700 from charmhelpers.contrib.openstack.neutron import (
701@@ -47,8 +52,13 @@
702 from charmhelpers.contrib.network.ip import (
703 get_address_in_network,
704 get_ipv6_addr,
705+ get_netmask_for_address,
706+ format_ipv6_addr,
707+ is_address_in_network
708 )
709
710+from charmhelpers.contrib.openstack.utils import get_host_ip
711+
712 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
713
714
715@@ -168,8 +178,10 @@
716 for rid in relation_ids('shared-db'):
717 for unit in related_units(rid):
718 rdata = relation_get(rid=rid, unit=unit)
719+ host = rdata.get('db_host')
720+ host = format_ipv6_addr(host) or host
721 ctxt = {
722- 'database_host': rdata.get('db_host'),
723+ 'database_host': host,
724 'database': self.database,
725 'database_user': self.user,
726 'database_password': rdata.get(password_setting),
727@@ -245,10 +257,15 @@
728 for rid in relation_ids('identity-service'):
729 for unit in related_units(rid):
730 rdata = relation_get(rid=rid, unit=unit)
731+ serv_host = rdata.get('service_host')
732+ serv_host = format_ipv6_addr(serv_host) or serv_host
733+ auth_host = rdata.get('auth_host')
734+ auth_host = format_ipv6_addr(auth_host) or auth_host
735+
736 ctxt = {
737 'service_port': rdata.get('service_port'),
738- 'service_host': rdata.get('service_host'),
739- 'auth_host': rdata.get('auth_host'),
740+ 'service_host': serv_host,
741+ 'auth_host': auth_host,
742 'auth_port': rdata.get('auth_port'),
743 'admin_tenant_name': rdata.get('service_tenant'),
744 'admin_user': rdata.get('service_username'),
745@@ -297,11 +314,13 @@
746 for unit in related_units(rid):
747 if relation_get('clustered', rid=rid, unit=unit):
748 ctxt['clustered'] = True
749- ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
750- unit=unit)
751+ vip = relation_get('vip', rid=rid, unit=unit)
752+ vip = format_ipv6_addr(vip) or vip
753+ ctxt['rabbitmq_host'] = vip
754 else:
755- ctxt['rabbitmq_host'] = relation_get('private-address',
756- rid=rid, unit=unit)
757+ host = relation_get('private-address', rid=rid, unit=unit)
758+ host = format_ipv6_addr(host) or host
759+ ctxt['rabbitmq_host'] = host
760 ctxt.update({
761 'rabbitmq_user': username,
762 'rabbitmq_password': relation_get('password', rid=rid,
763@@ -340,8 +359,9 @@
764 and len(related_units(rid)) > 1:
765 rabbitmq_hosts = []
766 for unit in related_units(rid):
767- rabbitmq_hosts.append(relation_get('private-address',
768- rid=rid, unit=unit))
769+ host = relation_get('private-address', rid=rid, unit=unit)
770+ host = format_ipv6_addr(host) or host
771+ rabbitmq_hosts.append(host)
772 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
773 if not context_complete(ctxt):
774 return {}
775@@ -370,6 +390,7 @@
776 ceph_addr = \
777 relation_get('ceph-public-address', rid=rid, unit=unit) or \
778 relation_get('private-address', rid=rid, unit=unit)
779+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
780 mon_hosts.append(ceph_addr)
781
782 ctxt = {
783@@ -390,6 +411,9 @@
784 return ctxt
785
786
787+ADDRESS_TYPES = ['admin', 'internal', 'public']
788+
789+
790 class HAProxyContext(OSContextGenerator):
791 interfaces = ['cluster']
792
793@@ -402,25 +426,63 @@
794 if not relation_ids('cluster'):
795 return {}
796
797+ l_unit = local_unit().replace('/', '-')
798+
799+ if config('prefer-ipv6'):
800+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
801+ else:
802+ addr = get_host_ip(unit_get('private-address'))
803+
804 cluster_hosts = {}
805- l_unit = local_unit().replace('/', '-')
806- if config('prefer-ipv6'):
807- addr = get_ipv6_addr()
808- else:
809- addr = unit_get('private-address')
810- cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
811- addr)
812-
813- for rid in relation_ids('cluster'):
814- for unit in related_units(rid):
815- _unit = unit.replace('/', '-')
816- addr = relation_get('private-address', rid=rid, unit=unit)
817- cluster_hosts[_unit] = addr
818+
819+ # NOTE(jamespage): build out map of configured network endpoints
820+ # and associated backends
821+ for addr_type in ADDRESS_TYPES:
822+ laddr = get_address_in_network(
823+ config('os-{}-network'.format(addr_type)))
824+ if laddr:
825+ cluster_hosts[laddr] = {}
826+ cluster_hosts[laddr]['network'] = "{}/{}".format(
827+ laddr,
828+ get_netmask_for_address(laddr)
829+ )
830+ cluster_hosts[laddr]['backends'] = {}
831+ cluster_hosts[laddr]['backends'][l_unit] = laddr
832+ for rid in relation_ids('cluster'):
833+ for unit in related_units(rid):
834+ _unit = unit.replace('/', '-')
835+ _laddr = relation_get('{}-address'.format(addr_type),
836+ rid=rid, unit=unit)
837+ if _laddr:
838+ cluster_hosts[laddr]['backends'][_unit] = _laddr
839+
840+ # NOTE(jamespage) no split configurations found, just use
841+ # private addresses
842+ if not cluster_hosts:
843+ cluster_hosts[addr] = {}
844+ cluster_hosts[addr]['network'] = "{}/{}".format(
845+ addr,
846+ get_netmask_for_address(addr)
847+ )
848+ cluster_hosts[addr]['backends'] = {}
849+ cluster_hosts[addr]['backends'][l_unit] = addr
850+ for rid in relation_ids('cluster'):
851+ for unit in related_units(rid):
852+ _unit = unit.replace('/', '-')
853+ _laddr = relation_get('private-address',
854+ rid=rid, unit=unit)
855+ if _laddr:
856+ cluster_hosts[addr]['backends'][_unit] = _laddr
857
858 ctxt = {
859- 'units': cluster_hosts,
860+ 'frontends': cluster_hosts,
861 }
862
863+ if config('haproxy-server-timeout'):
864+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
865+ if config('haproxy-client-timeout'):
866+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
867+
868 if config('prefer-ipv6'):
869 ctxt['local_host'] = 'ip6-localhost'
870 ctxt['haproxy_host'] = '::'
871@@ -430,12 +492,13 @@
872 ctxt['haproxy_host'] = '0.0.0.0'
873 ctxt['stat_port'] = ':8888'
874
875- if len(cluster_hosts.keys()) > 1:
876- # Enable haproxy when we have enough peers.
877- log('Ensuring haproxy enabled in /etc/default/haproxy.')
878- with open('/etc/default/haproxy', 'w') as out:
879- out.write('ENABLED=1\n')
880- return ctxt
881+ for frontend in cluster_hosts:
882+ if len(cluster_hosts[frontend]['backends']) > 1:
883+ # Enable haproxy when we have enough peers.
884+ log('Ensuring haproxy enabled in /etc/default/haproxy.')
885+ with open('/etc/default/haproxy', 'w') as out:
886+ out.write('ENABLED=1\n')
887+ return ctxt
888 log('HAProxy context is incomplete, this unit has no peers.')
889 return {}
890
891@@ -490,22 +553,36 @@
892 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
893 check_call(cmd)
894
895- def configure_cert(self):
896- if not os.path.isdir('/etc/apache2/ssl'):
897- os.mkdir('/etc/apache2/ssl')
898+ def configure_cert(self, cn=None):
899 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
900- if not os.path.isdir(ssl_dir):
901- os.mkdir(ssl_dir)
902- cert, key = get_cert()
903- with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
904- cert_out.write(b64decode(cert))
905- with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
906- key_out.write(b64decode(key))
907+ mkdir(path=ssl_dir)
908+ cert, key = get_cert(cn)
909+ if cn:
910+ cert_filename = 'cert_{}'.format(cn)
911+ key_filename = 'key_{}'.format(cn)
912+ else:
913+ cert_filename = 'cert'
914+ key_filename = 'key'
915+ write_file(path=os.path.join(ssl_dir, cert_filename),
916+ content=b64decode(cert))
917+ write_file(path=os.path.join(ssl_dir, key_filename),
918+ content=b64decode(key))
919+
920+ def configure_ca(self):
921 ca_cert = get_ca_cert()
922 if ca_cert:
923- with open(CA_CERT_PATH, 'w') as ca_out:
924- ca_out.write(b64decode(ca_cert))
925- check_call(['update-ca-certificates'])
926+ install_ca_cert(b64decode(ca_cert))
927+
928+ def canonical_names(self):
929+ '''Figure out which canonical names clients will access this service'''
930+ cns = []
931+ for r_id in relation_ids('identity-service'):
932+ for unit in related_units(r_id):
933+ rdata = relation_get(rid=r_id, unit=unit)
934+ for k in rdata:
935+ if k.startswith('ssl_key_'):
936+ cns.append(k.lstrip('ssl_key_'))
937+ return list(set(cns))
938
939 def __call__(self):
940 if isinstance(self.external_ports, basestring):
941@@ -513,21 +590,47 @@
942 if (not self.external_ports or not https()):
943 return {}
944
945- self.configure_cert()
946+ self.configure_ca()
947 self.enable_modules()
948
949 ctxt = {
950 'namespace': self.service_namespace,
951- 'private_address': unit_get('private-address'),
952- 'endpoints': []
953+ 'endpoints': [],
954+ 'ext_ports': []
955 }
956- if is_clustered():
957- ctxt['private_address'] = config('vip')
958- for api_port in self.external_ports:
959- ext_port = determine_apache_port(api_port)
960- int_port = determine_api_port(api_port)
961- portmap = (int(ext_port), int(int_port))
962- ctxt['endpoints'].append(portmap)
963+
964+ for cn in self.canonical_names():
965+ self.configure_cert(cn)
966+
967+ addresses = []
968+ vips = []
969+ if config('vip'):
970+ vips = config('vip').split()
971+
972+ for network_type in ['os-internal-network',
973+ 'os-admin-network',
974+ 'os-public-network']:
975+ address = get_address_in_network(config(network_type),
976+ unit_get('private-address'))
977+ if len(vips) > 0 and is_clustered():
978+ for vip in vips:
979+ if is_address_in_network(config(network_type),
980+ vip):
981+ addresses.append((address, vip))
982+ break
983+ elif is_clustered():
984+ addresses.append((address, config('vip')))
985+ else:
986+ addresses.append((address, address))
987+
988+ for address, endpoint in set(addresses):
989+ for api_port in self.external_ports:
990+ ext_port = determine_apache_port(api_port)
991+ int_port = determine_api_port(api_port)
992+ portmap = (address, endpoint, int(ext_port), int(int_port))
993+ ctxt['endpoints'].append(portmap)
994+ ctxt['ext_ports'].append(int(ext_port))
995+ ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
996 return ctxt
997
998
999@@ -657,22 +760,22 @@
1000
1001 class OSConfigFlagContext(OSContextGenerator):
1002
1003- """
1004- Responsible for adding user-defined config-flags in charm config to a
1005- template context.
1006-
1007- NOTE: the value of config-flags may be a comma-separated list of
1008- key=value pairs and some Openstack config files support
1009- comma-separated lists as values.
1010- """
1011-
1012- def __call__(self):
1013- config_flags = config('config-flags')
1014- if not config_flags:
1015- return {}
1016-
1017- flags = config_flags_parser(config_flags)
1018- return {'user_config_flags': flags}
1019+ """
1020+ Responsible for adding user-defined config-flags in charm config to a
1021+ template context.
1022+
1023+ NOTE: the value of config-flags may be a comma-separated list of
1024+ key=value pairs and some Openstack config files support
1025+ comma-separated lists as values.
1026+ """
1027+
1028+ def __call__(self):
1029+ config_flags = config('config-flags')
1030+ if not config_flags:
1031+ return {}
1032+
1033+ flags = config_flags_parser(config_flags)
1034+ return {'user_config_flags': flags}
1035
1036
1037 class SubordinateConfigContext(OSContextGenerator):
1038@@ -787,3 +890,35 @@
1039 'use_syslog': config('use-syslog')
1040 }
1041 return ctxt
1042+
1043+
1044+class BindHostContext(OSContextGenerator):
1045+
1046+ def __call__(self):
1047+ if config('prefer-ipv6'):
1048+ return {
1049+ 'bind_host': '::'
1050+ }
1051+ else:
1052+ return {
1053+ 'bind_host': '0.0.0.0'
1054+ }
1055+
1056+
1057+class WorkerConfigContext(OSContextGenerator):
1058+
1059+ @property
1060+ def num_cpus(self):
1061+ try:
1062+ from psutil import NUM_CPUS
1063+ except ImportError:
1064+ apt_install('python-psutil', fatal=True)
1065+ from psutil import NUM_CPUS
1066+ return NUM_CPUS
1067+
1068+ def __call__(self):
1069+ multiplier = config('worker-multiplier') or 1
1070+ ctxt = {
1071+ "workers": self.num_cpus * multiplier
1072+ }
1073+ return ctxt
1074
1075=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
1076--- hooks/charmhelpers/contrib/openstack/ip.py 2014-09-02 11:19:19 +0000
1077+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-17 13:06:36 +0000
1078@@ -66,7 +66,7 @@
1079 resolved_address = vip
1080 else:
1081 if config('prefer-ipv6'):
1082- fallback_addr = get_ipv6_addr()
1083+ fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1084 else:
1085 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1086 resolved_address = get_address_in_network(
1087
1088=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1089--- hooks/charmhelpers/contrib/openstack/utils.py 2014-09-08 13:55:11 +0000
1090+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-17 13:06:36 +0000
1091@@ -4,6 +4,7 @@
1092 from collections import OrderedDict
1093
1094 import subprocess
1095+import json
1096 import os
1097 import socket
1098 import sys
1099@@ -13,7 +14,9 @@
1100 log as juju_log,
1101 charm_dir,
1102 ERROR,
1103- INFO
1104+ INFO,
1105+ relation_ids,
1106+ relation_set
1107 )
1108
1109 from charmhelpers.contrib.storage.linux.lvm import (
1110@@ -22,6 +25,10 @@
1111 remove_lvm_physical_volume,
1112 )
1113
1114+from charmhelpers.contrib.network.ip import (
1115+ get_ipv6_addr
1116+)
1117+
1118 from charmhelpers.core.host import lsb_release, mounts, umount
1119 from charmhelpers.fetch import apt_install, apt_cache
1120 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1121@@ -71,6 +78,8 @@
1122 ('1.12.0', 'icehouse'),
1123 ('1.11.0', 'icehouse'),
1124 ('2.0.0', 'juno'),
1125+ ('2.1.0', 'juno'),
1126+ ('2.2.0', 'juno'),
1127 ])
1128
1129 DEFAULT_LOOPBACK_SIZE = '5G'
1130@@ -457,3 +466,21 @@
1131 return result
1132 else:
1133 return result.split('.')[0]
1134+
1135+
1136+def sync_db_with_multi_ipv6_addresses(database, database_user,
1137+ relation_prefix=None):
1138+ hosts = get_ipv6_addr(dynamic_only=False)
1139+
1140+ kwargs = {'database': database,
1141+ 'username': database_user,
1142+ 'hostname': json.dumps(hosts)}
1143+
1144+ if relation_prefix:
1145+ keys = kwargs.keys()
1146+ for key in keys:
1147+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
1148+ del kwargs[key]
1149+
1150+ for rid in relation_ids('shared-db'):
1151+ relation_set(relation_id=rid, **kwargs)
1152
1153=== added directory 'hooks/charmhelpers/contrib/storage'
1154=== added file 'hooks/charmhelpers/contrib/storage/__init__.py'
1155=== added directory 'hooks/charmhelpers/contrib/storage/linux'
1156=== added file 'hooks/charmhelpers/contrib/storage/linux/__init__.py'
1157=== added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
1158--- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
1159+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-17 13:06:36 +0000
1160@@ -0,0 +1,388 @@
1161+#
1162+# Copyright 2012 Canonical Ltd.
1163+#
1164+# This file is sourced from lp:openstack-charm-helpers
1165+#
1166+# Authors:
1167+# James Page <james.page@ubuntu.com>
1168+# Adam Gandelman <adamg@ubuntu.com>
1169+#
1170+
1171+import os
1172+import shutil
1173+import json
1174+import time
1175+
1176+from subprocess import (
1177+ check_call,
1178+ check_output,
1179+ CalledProcessError
1180+)
1181+
1182+from charmhelpers.core.hookenv import (
1183+ relation_get,
1184+ relation_ids,
1185+ related_units,
1186+ log,
1187+ INFO,
1188+ WARNING,
1189+ ERROR
1190+)
1191+
1192+from charmhelpers.core.host import (
1193+ mount,
1194+ mounts,
1195+ service_start,
1196+ service_stop,
1197+ service_running,
1198+ umount,
1199+)
1200+
1201+from charmhelpers.fetch import (
1202+ apt_install,
1203+)
1204+
1205+KEYRING = '/etc/ceph/ceph.client.{}.keyring'
1206+KEYFILE = '/etc/ceph/ceph.client.{}.key'
1207+
1208+CEPH_CONF = """[global]
1209+ auth supported = {auth}
1210+ keyring = {keyring}
1211+ mon host = {mon_hosts}
1212+ log to syslog = {use_syslog}
1213+ err to syslog = {use_syslog}
1214+ clog to syslog = {use_syslog}
1215+"""
1216+
1217+
1218+def install():
1219+ ''' Basic Ceph client installation '''
1220+ ceph_dir = "/etc/ceph"
1221+ if not os.path.exists(ceph_dir):
1222+ os.mkdir(ceph_dir)
1223+ apt_install('ceph-common', fatal=True)
1224+
1225+
1226+def rbd_exists(service, pool, rbd_img):
1227+ ''' Check to see if a RADOS block device exists '''
1228+ try:
1229+ out = check_output(['rbd', 'list', '--id', service,
1230+ '--pool', pool])
1231+ except CalledProcessError:
1232+ return False
1233+ else:
1234+ return rbd_img in out
1235+
1236+
1237+def create_rbd_image(service, pool, image, sizemb):
1238+ ''' Create a new RADOS block device '''
1239+ cmd = [
1240+ 'rbd',
1241+ 'create',
1242+ image,
1243+ '--size',
1244+ str(sizemb),
1245+ '--id',
1246+ service,
1247+ '--pool',
1248+ pool
1249+ ]
1250+ check_call(cmd)
1251+
1252+
1253+def pool_exists(service, name):
1254+ ''' Check to see if a RADOS pool already exists '''
1255+ try:
1256+ out = check_output(['rados', '--id', service, 'lspools'])
1257+ except CalledProcessError:
1258+ return False
1259+ else:
1260+ return name in out
1261+
1262+
1263+def get_osds(service):
1264+ '''
1265+ Return a list of all Ceph Object Storage Daemons
1266+ currently in the cluster
1267+ '''
1268+ version = ceph_version()
1269+ if version and version >= '0.56':
1270+ return json.loads(check_output(['ceph', '--id', service,
1271+ 'osd', 'ls', '--format=json']))
1272+ else:
1273+ return None
1274+
1275+
1276+def create_pool(service, name, replicas=3):
1277+ ''' Create a new RADOS pool '''
1278+ if pool_exists(service, name):
1279+ log("Ceph pool {} already exists, skipping creation".format(name),
1280+ level=WARNING)
1281+ return
1282+ # Calculate the number of placement groups based
1283+ # on upstream recommended best practices.
1284+ osds = get_osds(service)
1285+ if osds:
1286+ pgnum = (len(osds) * 100 / replicas)
1287+ else:
1288+ # NOTE(james-page): Default to 200 for older ceph versions
1289+ # which don't support OSD query from cli
1290+ pgnum = 200
1291+ cmd = [
1292+ 'ceph', '--id', service,
1293+ 'osd', 'pool', 'create',
1294+ name, str(pgnum)
1295+ ]
1296+ check_call(cmd)
1297+ cmd = [
1298+ 'ceph', '--id', service,
1299+ 'osd', 'pool', 'set', name,
1300+ 'size', str(replicas)
1301+ ]
1302+ check_call(cmd)
1303+
1304+
1305+def delete_pool(service, name):
1306+ ''' Delete a RADOS pool from ceph '''
1307+ cmd = [
1308+ 'ceph', '--id', service,
1309+ 'osd', 'pool', 'delete',
1310+ name, '--yes-i-really-really-mean-it'
1311+ ]
1312+ check_call(cmd)
1313+
1314+
1315+def _keyfile_path(service):
1316+ return KEYFILE.format(service)
1317+
1318+
1319+def _keyring_path(service):
1320+ return KEYRING.format(service)
1321+
1322+
1323+def create_keyring(service, key):
1324+ ''' Create a new Ceph keyring containing key'''
1325+ keyring = _keyring_path(service)
1326+ if os.path.exists(keyring):
1327+ log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
1328+ return
1329+ cmd = [
1330+ 'ceph-authtool',
1331+ keyring,
1332+ '--create-keyring',
1333+ '--name=client.{}'.format(service),
1334+ '--add-key={}'.format(key)
1335+ ]
1336+ check_call(cmd)
1337+ log('ceph: Created new ring at %s.' % keyring, level=INFO)
1338+
1339+
1340+def create_key_file(service, key):
1341+ ''' Create a file containing key '''
1342+ keyfile = _keyfile_path(service)
1343+ if os.path.exists(keyfile):
1344+ log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
1345+ return
1346+ with open(keyfile, 'w') as fd:
1347+ fd.write(key)
1348+ log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
1349+
1350+
1351+def get_ceph_nodes():
1352+ ''' Query named relation 'ceph' to detemine current nodes '''
1353+ hosts = []
1354+ for r_id in relation_ids('ceph'):
1355+ for unit in related_units(r_id):
1356+ hosts.append(relation_get('private-address', unit=unit, rid=r_id))
1357+ return hosts
1358+
1359+
1360+def configure(service, key, auth, use_syslog):
1361+ ''' Perform basic configuration of Ceph '''
1362+ create_keyring(service, key)
1363+ create_key_file(service, key)
1364+ hosts = get_ceph_nodes()
1365+ with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
1366+ ceph_conf.write(CEPH_CONF.format(auth=auth,
1367+ keyring=_keyring_path(service),
1368+ mon_hosts=",".join(map(str, hosts)),
1369+ use_syslog=use_syslog))
1370+ modprobe('rbd')
1371+
1372+
1373+def image_mapped(name):
1374+ ''' Determine whether a RADOS block device is mapped locally '''
1375+ try:
1376+ out = check_output(['rbd', 'showmapped'])
1377+ except CalledProcessError:
1378+ return False
1379+ else:
1380+ return name in out
1381+
1382+
1383+def map_block_storage(service, pool, image):
1384+ ''' Map a RADOS block device for local use '''
1385+ cmd = [
1386+ 'rbd',
1387+ 'map',
1388+ '{}/{}'.format(pool, image),
1389+ '--user',
1390+ service,
1391+ '--secret',
1392+ _keyfile_path(service),
1393+ ]
1394+ check_call(cmd)
1395+
1396+
1397+def filesystem_mounted(fs):
1398+ ''' Determine whether a filesytems is already mounted '''
1399+ return fs in [f for f, m in mounts()]
1400+
1401+
1402+def make_filesystem(blk_device, fstype='ext4', timeout=10):
1403+ ''' Make a new filesystem on the specified block device '''
1404+ count = 0
1405+ e_noent = os.errno.ENOENT
1406+ while not os.path.exists(blk_device):
1407+ if count >= timeout:
1408+ log('ceph: gave up waiting on block device %s' % blk_device,
1409+ level=ERROR)
1410+ raise IOError(e_noent, os.strerror(e_noent), blk_device)
1411+ log('ceph: waiting for block device %s to appear' % blk_device,
1412+ level=INFO)
1413+ count += 1
1414+ time.sleep(1)
1415+ else:
1416+ log('ceph: Formatting block device %s as filesystem %s.' %
1417+ (blk_device, fstype), level=INFO)
1418+ check_call(['mkfs', '-t', fstype, blk_device])
1419+
1420+
1421+def place_data_on_block_device(blk_device, data_src_dst):
1422+ ''' Migrate data in data_src_dst to blk_device and then remount '''
1423+ # mount block device into /mnt
1424+ mount(blk_device, '/mnt')
1425+ # copy data to /mnt
1426+ copy_files(data_src_dst, '/mnt')
1427+ # umount block device
1428+ umount('/mnt')
1429+ # Grab user/group ID's from original source
1430+ _dir = os.stat(data_src_dst)
1431+ uid = _dir.st_uid
1432+ gid = _dir.st_gid
1433+ # re-mount where the data should originally be
1434+ # TODO: persist is currently a NO-OP in core.host
1435+ mount(blk_device, data_src_dst, persist=True)
1436+ # ensure original ownership of new mount.
1437+ os.chown(data_src_dst, uid, gid)
1438+
1439+
1440+# TODO: re-use
1441+def modprobe(module):
1442+ ''' Load a kernel module and configure for auto-load on reboot '''
1443+ log('ceph: Loading kernel module', level=INFO)
1444+ cmd = ['modprobe', module]
1445+ check_call(cmd)
1446+ with open('/etc/modules', 'r+') as modules:
1447+ if module not in modules.read():
1448+ modules.write(module)
1449+
1450+
1451+def copy_files(src, dst, symlinks=False, ignore=None):
1452+ ''' Copy files from src to dst '''
1453+ for item in os.listdir(src):
1454+ s = os.path.join(src, item)
1455+ d = os.path.join(dst, item)
1456+ if os.path.isdir(s):
1457+ shutil.copytree(s, d, symlinks, ignore)
1458+ else:
1459+ shutil.copy2(s, d)
1460+
1461+
1462+def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
1463+ blk_device, fstype, system_services=[],
1464+ replicas=3):
1465+ """
1466+ NOTE: This function must only be called from a single service unit for
1467+ the same rbd_img otherwise data loss will occur.
1468+
1469+ Ensures given pool and RBD image exists, is mapped to a block device,
1470+ and the device is formatted and mounted at the given mount_point.
1471+
1472+ If formatting a device for the first time, data existing at mount_point
1473+ will be migrated to the RBD device before being re-mounted.
1474+
1475+ All services listed in system_services will be stopped prior to data
1476+ migration and restarted when complete.
1477+ """
1478+ # Ensure pool, RBD image, RBD mappings are in place.
1479+ if not pool_exists(service, pool):
1480+ log('ceph: Creating new pool {}.'.format(pool))
1481+ create_pool(service, pool, replicas=replicas)
1482+
1483+ if not rbd_exists(service, pool, rbd_img):
1484+ log('ceph: Creating RBD image ({}).'.format(rbd_img))
1485+ create_rbd_image(service, pool, rbd_img, sizemb)
1486+
1487+ if not image_mapped(rbd_img):
1488+ log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
1489+ map_block_storage(service, pool, rbd_img)
1490+
1491+ # make file system
1492+ # TODO: What happens if for whatever reason this is run again and
1493+ # the data is already in the rbd device and/or is mounted??
1494+ # When it is mounted already, it will fail to make the fs
1495+ # XXX: This is really sketchy! Need to at least add an fstab entry
1496+ # otherwise this hook will blow away existing data if its executed
1497+ # after a reboot.
1498+ if not filesystem_mounted(mount_point):
1499+ make_filesystem(blk_device, fstype)
1500+
1501+ for svc in system_services:
1502+ if service_running(svc):
1503+ log('ceph: Stopping services {} prior to migrating data.'
1504+ .format(svc))
1505+ service_stop(svc)
1506+
1507+ place_data_on_block_device(blk_device, mount_point)
1508+
1509+ for svc in system_services:
1510+ log('ceph: Starting service {} after migrating data.'
1511+ .format(svc))
1512+ service_start(svc)
1513+
1514+
1515+def ensure_ceph_keyring(service, user=None, group=None):
1516+ '''
1517+ Ensures a ceph keyring is created for a named service
1518+ and optionally ensures user and group ownership.
1519+
1520+ Returns False if no ceph key is available in relation state.
1521+ '''
1522+ key = None
1523+ for rid in relation_ids('ceph'):
1524+ for unit in related_units(rid):
1525+ key = relation_get('key', rid=rid, unit=unit)
1526+ if key:
1527+ break
1528+ if not key:
1529+ return False
1530+ create_keyring(service=service, key=key)
1531+ keyring = _keyring_path(service)
1532+ if user and group:
1533+ check_call(['chown', '%s.%s' % (user, group), keyring])
1534+ return True
1535+
1536+
1537+def ceph_version():
1538+ ''' Retrieve the local version of ceph '''
1539+ if os.path.exists('/usr/bin/ceph'):
1540+ cmd = ['ceph', '-v']
1541+ output = check_output(cmd)
1542+ output = output.split()
1543+ if len(output) > 3:
1544+ return output[2]
1545+ else:
1546+ return None
1547+ else:
1548+ return None
1549
1550=== added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
1551--- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
1552+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-10-17 13:06:36 +0000
1553@@ -0,0 +1,62 @@
1554+
1555+import os
1556+import re
1557+
1558+from subprocess import (
1559+ check_call,
1560+ check_output,
1561+)
1562+
1563+
1564+##################################################
1565+# loopback device helpers.
1566+##################################################
1567+def loopback_devices():
1568+ '''
1569+ Parse through 'losetup -a' output to determine currently mapped
1570+ loopback devices. Output is expected to look like:
1571+
1572+ /dev/loop0: [0807]:961814 (/tmp/my.img)
1573+
1574+ :returns: dict: a dict mapping {loopback_dev: backing_file}
1575+ '''
1576+ loopbacks = {}
1577+ cmd = ['losetup', '-a']
1578+ devs = [d.strip().split(' ') for d in
1579+ check_output(cmd).splitlines() if d != '']
1580+ for dev, _, f in devs:
1581+ loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
1582+ return loopbacks
1583+
1584+
1585+def create_loopback(file_path):
1586+ '''
1587+ Create a loopback device for a given backing file.
1588+
1589+ :returns: str: Full path to new loopback device (eg, /dev/loop0)
1590+ '''
1591+ file_path = os.path.abspath(file_path)
1592+ check_call(['losetup', '--find', file_path])
1593+ for d, f in loopback_devices().iteritems():
1594+ if f == file_path:
1595+ return d
1596+
1597+
1598+def ensure_loopback_device(path, size):
1599+ '''
1600+ Ensure a loopback device exists for a given backing file path and size.
1601+ If it a loopback device is not mapped to file, a new one will be created.
1602+
1603+ TODO: Confirm size of found loopback device.
1604+
1605+ :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
1606+ '''
1607+ for d, f in loopback_devices().iteritems():
1608+ if f == path:
1609+ return d
1610+
1611+ if not os.path.exists(path):
1612+ cmd = ['truncate', '--size', size, path]
1613+ check_call(cmd)
1614+
1615+ return create_loopback(path)
1616
1617=== added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
1618--- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
1619+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-10-17 13:06:36 +0000
1620@@ -0,0 +1,88 @@
1621+from subprocess import (
1622+ CalledProcessError,
1623+ check_call,
1624+ check_output,
1625+ Popen,
1626+ PIPE,
1627+)
1628+
1629+
1630+##################################################
1631+# LVM helpers.
1632+##################################################
1633+def deactivate_lvm_volume_group(block_device):
1634+ '''
1635+ Deactivate any volume gruop associated with an LVM physical volume.
1636+
1637+ :param block_device: str: Full path to LVM physical volume
1638+ '''
1639+ vg = list_lvm_volume_group(block_device)
1640+ if vg:
1641+ cmd = ['vgchange', '-an', vg]
1642+ check_call(cmd)
1643+
1644+
1645+def is_lvm_physical_volume(block_device):
1646+ '''
1647+ Determine whether a block device is initialized as an LVM PV.
1648+
1649+ :param block_device: str: Full path of block device to inspect.
1650+
1651+ :returns: boolean: True if block device is a PV, False if not.
1652+ '''
1653+ try:
1654+ check_output(['pvdisplay', block_device])
1655+ return True
1656+ except CalledProcessError:
1657+ return False
1658+
1659+
1660+def remove_lvm_physical_volume(block_device):
1661+ '''
1662+ Remove LVM PV signatures from a given block device.
1663+
1664+ :param block_device: str: Full path of block device to scrub.
1665+ '''
1666+ p = Popen(['pvremove', '-ff', block_device],
1667+ stdin=PIPE)
1668+ p.communicate(input='y\n')
1669+
1670+
1671+def list_lvm_volume_group(block_device):
1672+ '''
1673+ List LVM volume group associated with a given block device.
1674+
1675+ Assumes block device is a valid LVM PV.
1676+
1677+ :param block_device: str: Full path of block device to inspect.
1678+
1679+ :returns: str: Name of volume group associated with block device or None
1680+ '''
1681+ vg = None
1682+ pvd = check_output(['pvdisplay', block_device]).splitlines()
1683+ for l in pvd:
1684+ if l.strip().startswith('VG Name'):
1685+ vg = ' '.join(l.strip().split()[2:])
1686+ return vg
1687+
1688+
1689+def create_lvm_physical_volume(block_device):
1690+ '''
1691+ Initialize a block device as an LVM physical volume.
1692+
1693+ :param block_device: str: Full path of block device to initialize.
1694+
1695+ '''
1696+ check_call(['pvcreate', block_device])
1697+
1698+
1699+def create_lvm_volume_group(volume_group, block_device):
1700+ '''
1701+ Create an LVM volume group backed by a given block device.
1702+
1703+ Assumes block device has already been initialized as an LVM PV.
1704+
1705+ :param volume_group: str: Name of volume group to create.
1706+ :block_device: str: Full path of PV-initialized block device.
1707+ '''
1708+ check_call(['vgcreate', volume_group, block_device])
1709
1710=== added file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
1711--- hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000
1712+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-10-17 13:06:36 +0000
1713@@ -0,0 +1,53 @@
1714+import os
1715+import re
1716+from stat import S_ISBLK
1717+
1718+from subprocess import (
1719+ check_call,
1720+ check_output,
1721+ call
1722+)
1723+
1724+
1725+def is_block_device(path):
1726+ '''
1727+ Confirm device at path is a valid block device node.
1728+
1729+ :returns: boolean: True if path is a block device, False if not.
1730+ '''
1731+ if not os.path.exists(path):
1732+ return False
1733+ return S_ISBLK(os.stat(path).st_mode)
1734+
1735+
1736+def zap_disk(block_device):
1737+ '''
1738+ Clear a block device of partition table. Relies on sgdisk, which is
1739+ installed as pat of the 'gdisk' package in Ubuntu.
1740+
1741+ :param block_device: str: Full path of block device to clean.
1742+ '''
1743+ # sometimes sgdisk exits non-zero; this is OK, dd will clean up
1744+ call(['sgdisk', '--zap-all', '--mbrtogpt',
1745+ '--clear', block_device])
1746+ dev_end = check_output(['blockdev', '--getsz', block_device])
1747+ gpt_end = int(dev_end.split()[0]) - 100
1748+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
1749+ 'bs=1M', 'count=1'])
1750+ check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
1751+ 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
1752+
1753+
1754+def is_device_mounted(device):
1755+ '''Given a device path, return True if that device is mounted, and False
1756+ if it isn't.
1757+
1758+ :param device: str: Full path of the device to check.
1759+ :returns: boolean: True if the path represents a mounted device, False if
1760+ it doesn't.
1761+ '''
1762+ is_partition = bool(re.search(r".*[0-9]+\b", device))
1763+ out = check_output(['mount'])
1764+ if is_partition:
1765+ return bool(re.search(device + r"\b", out))
1766+ return bool(re.search(device + r"[0-9]+\b", out))
1767
1768=== modified file 'hooks/charmhelpers/core/hookenv.py'
1769--- hooks/charmhelpers/core/hookenv.py 2014-09-02 11:17:14 +0000
1770+++ hooks/charmhelpers/core/hookenv.py 2014-10-17 13:06:36 +0000
1771@@ -203,6 +203,17 @@
1772 if os.path.exists(self.path):
1773 self.load_previous()
1774
1775+ def __getitem__(self, key):
1776+ """For regular dict lookups, check the current juju config first,
1777+ then the previous (saved) copy. This ensures that user-saved values
1778+ will be returned by a dict lookup.
1779+
1780+ """
1781+ try:
1782+ return dict.__getitem__(self, key)
1783+ except KeyError:
1784+ return (self._prev_dict or {})[key]
1785+
1786 def load_previous(self, path=None):
1787 """Load previous copy of config from disk.
1788
1789@@ -475,9 +486,10 @@
1790 hooks.execute(sys.argv)
1791 """
1792
1793- def __init__(self):
1794+ def __init__(self, config_save=True):
1795 super(Hooks, self).__init__()
1796 self._hooks = {}
1797+ self._config_save = config_save
1798
1799 def register(self, name, function):
1800 """Register a hook"""
1801@@ -488,9 +500,10 @@
1802 hook_name = os.path.basename(args[0])
1803 if hook_name in self._hooks:
1804 self._hooks[hook_name]()
1805- cfg = config()
1806- if cfg.implicit_save:
1807- cfg.save()
1808+ if self._config_save:
1809+ cfg = config()
1810+ if cfg.implicit_save:
1811+ cfg.save()
1812 else:
1813 raise UnregisteredHookError(hook_name)
1814
1815
1816=== modified file 'hooks/charmhelpers/core/host.py'
1817--- hooks/charmhelpers/core/host.py 2014-09-02 11:17:14 +0000
1818+++ hooks/charmhelpers/core/host.py 2014-10-17 13:06:36 +0000
1819@@ -6,6 +6,7 @@
1820 # Matthew Wedgwood <matthew.wedgwood@canonical.com>
1821
1822 import os
1823+import re
1824 import pwd
1825 import grp
1826 import random
1827@@ -68,8 +69,8 @@
1828 """Determine whether a system service is available"""
1829 try:
1830 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
1831- except subprocess.CalledProcessError:
1832- return False
1833+ except subprocess.CalledProcessError as e:
1834+ return 'unrecognized service' not in e.output
1835 else:
1836 return True
1837
1838@@ -209,10 +210,15 @@
1839 return system_mounts
1840
1841
1842-def file_hash(path):
1843- """Generate a md5 hash of the contents of 'path' or None if not found """
1844+def file_hash(path, hash_type='md5'):
1845+ """
1846+ Generate a hash checksum of the contents of 'path' or None if not found.
1847+
1848+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
1849+ such as md5, sha1, sha256, sha512, etc.
1850+ """
1851 if os.path.exists(path):
1852- h = hashlib.md5()
1853+ h = getattr(hashlib, hash_type)()
1854 with open(path, 'r') as source:
1855 h.update(source.read()) # IGNORE:E1101 - it does have update
1856 return h.hexdigest()
1857@@ -220,6 +226,26 @@
1858 return None
1859
1860
1861+def check_hash(path, checksum, hash_type='md5'):
1862+ """
1863+ Validate a file using a cryptographic checksum.
1864+
1865+ :param str checksum: Value of the checksum used to validate the file.
1866+ :param str hash_type: Hash algorithm used to generate `checksum`.
1867+ Can be any hash alrgorithm supported by :mod:`hashlib`,
1868+ such as md5, sha1, sha256, sha512, etc.
1869+ :raises ChecksumError: If the file fails the checksum
1870+
1871+ """
1872+ actual_checksum = file_hash(path, hash_type)
1873+ if checksum != actual_checksum:
1874+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
1875+
1876+
1877+class ChecksumError(ValueError):
1878+ pass
1879+
1880+
1881 def restart_on_change(restart_map, stopstart=False):
1882 """Restart services based on configuration files changing
1883
1884@@ -292,7 +318,13 @@
1885 ip_output = (line for line in ip_output if line)
1886 for line in ip_output:
1887 if line.split()[1].startswith(int_type):
1888- interfaces.append(line.split()[1].replace(":", ""))
1889+ matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
1890+ if matched:
1891+ interface = matched.groups()[0]
1892+ else:
1893+ interface = line.split()[1].replace(":", "")
1894+ interfaces.append(interface)
1895+
1896 return interfaces
1897
1898
1899
1900=== modified file 'hooks/charmhelpers/core/services/helpers.py'
1901--- hooks/charmhelpers/core/services/helpers.py 2014-09-02 11:17:14 +0000
1902+++ hooks/charmhelpers/core/services/helpers.py 2014-10-17 13:06:36 +0000
1903@@ -1,3 +1,5 @@
1904+import os
1905+import yaml
1906 from charmhelpers.core import hookenv
1907 from charmhelpers.core import templating
1908
1909@@ -19,15 +21,21 @@
1910 the `name` attribute that are complete will used to populate the dictionary
1911 values (see `get_data`, below).
1912
1913- The generated context will be namespaced under the interface type, to prevent
1914- potential naming conflicts.
1915+ The generated context will be namespaced under the relation :attr:`name`,
1916+ to prevent potential naming conflicts.
1917+
1918+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
1919+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
1920 """
1921 name = None
1922 interface = None
1923 required_keys = []
1924
1925- def __init__(self, *args, **kwargs):
1926- super(RelationContext, self).__init__(*args, **kwargs)
1927+ def __init__(self, name=None, additional_required_keys=None):
1928+ if name is not None:
1929+ self.name = name
1930+ if additional_required_keys is not None:
1931+ self.required_keys.extend(additional_required_keys)
1932 self.get_data()
1933
1934 def __bool__(self):
1935@@ -101,9 +109,115 @@
1936 return {}
1937
1938
1939+class MysqlRelation(RelationContext):
1940+ """
1941+ Relation context for the `mysql` interface.
1942+
1943+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
1944+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
1945+ """
1946+ name = 'db'
1947+ interface = 'mysql'
1948+ required_keys = ['host', 'user', 'password', 'database']
1949+
1950+
1951+class HttpRelation(RelationContext):
1952+ """
1953+ Relation context for the `http` interface.
1954+
1955+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
1956+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
1957+ """
1958+ name = 'website'
1959+ interface = 'http'
1960+ required_keys = ['host', 'port']
1961+
1962+ def provide_data(self):
1963+ return {
1964+ 'host': hookenv.unit_get('private-address'),
1965+ 'port': 80,
1966+ }
1967+
1968+
1969+class RequiredConfig(dict):
1970+ """
1971+ Data context that loads config options with one or more mandatory options.
1972+
1973+ Once the required options have been changed from their default values, all
1974+ config options will be available, namespaced under `config` to prevent
1975+ potential naming conflicts (for example, between a config option and a
1976+ relation property).
1977+
1978+ :param list *args: List of options that must be changed from their default values.
1979+ """
1980+
1981+ def __init__(self, *args):
1982+ self.required_options = args
1983+ self['config'] = hookenv.config()
1984+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
1985+ self.config = yaml.load(fp).get('options', {})
1986+
1987+ def __bool__(self):
1988+ for option in self.required_options:
1989+ if option not in self['config']:
1990+ return False
1991+ current_value = self['config'][option]
1992+ default_value = self.config[option].get('default')
1993+ if current_value == default_value:
1994+ return False
1995+ if current_value in (None, '') and default_value in (None, ''):
1996+ return False
1997+ return True
1998+
1999+ def __nonzero__(self):
2000+ return self.__bool__()
2001+
2002+
2003+class StoredContext(dict):
2004+ """
2005+ A data context that always returns the data that it was first created with.
2006+
2007+ This is useful to do a one-time generation of things like passwords, that
2008+ will thereafter use the same value that was originally generated, instead
2009+ of generating a new value each time it is run.
2010+ """
2011+ def __init__(self, file_name, config_data):
2012+ """
2013+ If the file exists, populate `self` with the data from the file.
2014+ Otherwise, populate with the given data and persist it to the file.
2015+ """
2016+ if os.path.exists(file_name):
2017+ self.update(self.read_context(file_name))
2018+ else:
2019+ self.store_context(file_name, config_data)
2020+ self.update(config_data)
2021+
2022+ def store_context(self, file_name, config_data):
2023+ if not os.path.isabs(file_name):
2024+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2025+ with open(file_name, 'w') as file_stream:
2026+ os.fchmod(file_stream.fileno(), 0600)
2027+ yaml.dump(config_data, file_stream)
2028+
2029+ def read_context(self, file_name):
2030+ if not os.path.isabs(file_name):
2031+ file_name = os.path.join(hookenv.charm_dir(), file_name)
2032+ with open(file_name, 'r') as file_stream:
2033+ data = yaml.load(file_stream)
2034+ if not data:
2035+ raise OSError("%s is empty" % file_name)
2036+ return data
2037+
2038+
2039 class TemplateCallback(ManagerCallback):
2040 """
2041- Callback class that will render a template, for use as a ready action.
2042+ Callback class that will render a Jinja2 template, for use as a ready action.
2043+
2044+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
2045+ :param str target: The target to write the rendered template to
2046+ :param str owner: The owner of the rendered file
2047+ :param str group: The group of the rendered file
2048+ :param int perms: The permissions of the rendered file
2049 """
2050 def __init__(self, source, target, owner='root', group='root', perms=0444):
2051 self.source = source
2052
2053=== added file 'hooks/charmhelpers/core/sysctl.py'
2054--- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000
2055+++ hooks/charmhelpers/core/sysctl.py 2014-10-17 13:06:36 +0000
2056@@ -0,0 +1,34 @@
2057+#!/usr/bin/env python
2058+# -*- coding: utf-8 -*-
2059+
2060+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2061+
2062+import yaml
2063+
2064+from subprocess import check_call
2065+
2066+from charmhelpers.core.hookenv import (
2067+ log,
2068+ DEBUG,
2069+)
2070+
2071+
2072+def create(sysctl_dict, sysctl_file):
2073+ """Creates a sysctl.conf file from a YAML associative array
2074+
2075+ :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
2076+ :type sysctl_dict: dict
2077+ :param sysctl_file: path to the sysctl file to be saved
2078+ :type sysctl_file: str or unicode
2079+ :returns: None
2080+ """
2081+ sysctl_dict = yaml.load(sysctl_dict)
2082+
2083+ with open(sysctl_file, "w") as fd:
2084+ for key, value in sysctl_dict.items():
2085+ fd.write("{}={}\n".format(key, value))
2086+
2087+ log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
2088+ level=DEBUG)
2089+
2090+ check_call(["sysctl", "-p", sysctl_file])
2091
2092=== modified file 'hooks/charmhelpers/fetch/__init__.py'
2093--- hooks/charmhelpers/fetch/__init__.py 2014-09-02 11:17:14 +0000
2094+++ hooks/charmhelpers/fetch/__init__.py 2014-10-17 13:06:36 +0000
2095@@ -208,7 +208,8 @@
2096 """Add a package source to this system.
2097
2098 @param source: a URL or sources.list entry, as supported by
2099- add-apt-repository(1). Examples:
2100+ add-apt-repository(1). Examples::
2101+
2102 ppa:charmers/example
2103 deb https://stub:key@private.example.com/ubuntu trusty main
2104
2105@@ -311,22 +312,35 @@
2106 apt_update(fatal=True)
2107
2108
2109-def install_remote(source):
2110+def install_remote(source, *args, **kwargs):
2111 """
2112 Install a file tree from a remote source
2113
2114 The specified source should be a url of the form:
2115 scheme://[host]/path[#[option=value][&...]]
2116
2117- Schemes supported are based on this modules submodules
2118- Options supported are submodule-specific"""
2119+ Schemes supported are based on this modules submodules.
2120+ Options supported are submodule-specific.
2121+ Additional arguments are passed through to the submodule.
2122+
2123+ For example::
2124+
2125+ dest = install_remote('http://example.com/archive.tgz',
2126+ checksum='deadbeef',
2127+ hash_type='sha1')
2128+
2129+ This will download `archive.tgz`, validate it using SHA1 and, if
2130+ the file is ok, extract it and return the directory in which it
2131+ was extracted. If the checksum fails, it will raise
2132+ :class:`charmhelpers.core.host.ChecksumError`.
2133+ """
2134 # We ONLY check for True here because can_handle may return a string
2135 # explaining why it can't handle a given source.
2136 handlers = [h for h in plugins() if h.can_handle(source) is True]
2137 installed_to = None
2138 for handler in handlers:
2139 try:
2140- installed_to = handler.install(source)
2141+ installed_to = handler.install(source, *args, **kwargs)
2142 except UnhandledSource:
2143 pass
2144 if not installed_to:
2145
2146=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
2147--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-02 11:17:14 +0000
2148+++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-17 13:06:36 +0000
2149@@ -1,6 +1,8 @@
2150 import os
2151 import urllib2
2152+from urllib import urlretrieve
2153 import urlparse
2154+import hashlib
2155
2156 from charmhelpers.fetch import (
2157 BaseFetchHandler,
2158@@ -10,11 +12,19 @@
2159 get_archive_handler,
2160 extract,
2161 )
2162-from charmhelpers.core.host import mkdir
2163+from charmhelpers.core.host import mkdir, check_hash
2164
2165
2166 class ArchiveUrlFetchHandler(BaseFetchHandler):
2167- """Handler for archives via generic URLs"""
2168+ """
2169+ Handler to download archive files from arbitrary URLs.
2170+
2171+ Can fetch from http, https, ftp, and file URLs.
2172+
2173+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
2174+
2175+ Installs the contents of the archive in $CHARM_DIR/fetched/.
2176+ """
2177 def can_handle(self, source):
2178 url_parts = self.parse_url(source)
2179 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
2180@@ -24,6 +34,12 @@
2181 return False
2182
2183 def download(self, source, dest):
2184+ """
2185+ Download an archive file.
2186+
2187+ :param str source: URL pointing to an archive file.
2188+ :param str dest: Local path location to download archive file to.
2189+ """
2190 # propogate all exceptions
2191 # URLError, OSError, etc
2192 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
2193@@ -48,7 +64,30 @@
2194 os.unlink(dest)
2195 raise e
2196
2197- def install(self, source):
2198+ # Mandatory file validation via Sha1 or MD5 hashing.
2199+ def download_and_validate(self, url, hashsum, validate="sha1"):
2200+ tempfile, headers = urlretrieve(url)
2201+ check_hash(tempfile, hashsum, validate)
2202+ return tempfile
2203+
2204+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
2205+ """
2206+ Download and install an archive file, with optional checksum validation.
2207+
2208+ The checksum can also be given on the `source` URL's fragment.
2209+ For example::
2210+
2211+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
2212+
2213+ :param str source: URL pointing to an archive file.
2214+ :param str dest: Local destination path to install to. If not given,
2215+ installs to `$CHARM_DIR/archives/archive_file_name`.
2216+ :param str checksum: If given, validate the archive file after download.
2217+ :param str hash_type: Algorithm used to generate `checksum`.
2218+ Can be any hash alrgorithm supported by :mod:`hashlib`,
2219+ such as md5, sha1, sha256, sha512, etc.
2220+
2221+ """
2222 url_parts = self.parse_url(source)
2223 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
2224 if not os.path.exists(dest_dir):
2225@@ -60,4 +99,10 @@
2226 raise UnhandledSource(e.reason)
2227 except OSError as e:
2228 raise UnhandledSource(e.strerror)
2229- return extract(dld_file)
2230+ options = urlparse.parse_qs(url_parts.fragment)
2231+ for key, value in options.items():
2232+ if key in hashlib.algorithms:
2233+ check_hash(dld_file, value, key)
2234+ if checksum:
2235+ check_hash(dld_file, checksum, hash_type)
2236+ return extract(dld_file, dest)
2237
2238=== added file 'hooks/zeromq_context.py'
2239--- hooks/zeromq_context.py 1970-01-01 00:00:00 +0000
2240+++ hooks/zeromq_context.py 2014-10-17 13:06:36 +0000
2241@@ -0,0 +1,41 @@
2242+import json
2243+from charmhelpers.core.hookenv import (
2244+ relation_ids,
2245+ related_units,
2246+ relation_get,
2247+ unit_private_ip,
2248+)
2249+from charmhelpers.contrib.openstack import context
2250+import socket
2251+
2252+class MatchmakerContext(context.OSContextGenerator):
2253+
2254+ def __call__(self):
2255+ topics = {}
2256+ for rid in relation_ids('topology'):
2257+ for unit in related_units(rid):
2258+ topic_info = relation_get(unit=unit, rid=rid)
2259+ if 'topics' in topic_info and 'host' in topic_info:
2260+ for topic in topic_info['topics'].split():
2261+ if topic in topics:
2262+ topics[topic].append(topic_info['host'])
2263+ else:
2264+ topics[topic] = [topic_info['host']]
2265+ for rid in relation_ids('zeromq-configuration'):
2266+ for unit in related_units(rid):
2267+ topic_info = relation_get(unit=unit, rid=rid)
2268+ topic_info['host'] = socket.gethostname()
2269+ if 'topics' in topic_info:
2270+ for topic in topic_info['topics'].split():
2271+ if topic in topics:
2272+ topics[topic].append(topic_info['host'])
2273+ else:
2274+ topics[topic] = [topic_info['host']]
2275+ return {'topology': json.dumps(topics, indent=4)}
2276+
2277+
2278+class OsloZMQContext(context.OSContextGenerator):
2279+
2280+ def __call__(self):
2281+
2282+ return {'zmq_host': socket.gethostname()}
2283
2284=== modified file 'hooks/zeromq_hooks.py'
2285--- hooks/zeromq_hooks.py 2014-10-15 11:29:28 +0000
2286+++ hooks/zeromq_hooks.py 2014-10-17 13:06:36 +0000
2287@@ -1,13 +1,10 @@
2288 #!/usr/bin/python
2289
2290-import json
2291-import shutil
2292+import socket
2293 import sys
2294 import uuid
2295-import socket
2296
2297 from charmhelpers.fetch import add_source
2298-from charmhelpers.core.templating import render
2299 from charmhelpers.fetch import apt_install, apt_update
2300 from charmhelpers.core.host import (
2301 adduser,
2302@@ -19,66 +16,37 @@
2303 from charmhelpers.core.hookenv import (
2304 Hooks,
2305 UnregisteredHookError,
2306- charm_dir,
2307 log,
2308 relation_get,
2309 relation_ids,
2310 relation_set,
2311- related_units,
2312+)
2313+from zeromq_utils import (
2314+ determine_packages,
2315+ get_principle_topics,
2316+ register_configs,
2317+ restart_map,
2318+ write_oslo_upstart,
2319 )
2320
2321 hooks = Hooks()
2322+CONFIGS = register_configs()
2323
2324
2325 @hooks.hook('install')
2326 def install():
2327 add_source('ppa:james-page/0mq')
2328 apt_update()
2329- apt_install(['python-zmq', 'python-oslo.messaging'], fatal=True)
2330+ apt_install(determine_packages(), fatal=True)
2331 adduser('oslo', password='oslo', system_user=True)
2332 mkdir('/etc/oslo/', owner='oslo', group='oslo', perms=0755)
2333
2334
2335-def write_mapping():
2336- topics = {}
2337- topology_file = '/etc/oslo/matchmaker_ring.json'
2338- for rid in relation_ids('topology'):
2339- for unit in related_units(rid):
2340- topic_info = relation_get(unit=unit, rid=rid)
2341- if 'topics' in topic_info and 'host' in topic_info:
2342- for topic in topic_info['topics'].split():
2343- if topic in topics:
2344- topics[topic].append(topic_info['host'])
2345- else:
2346- topics[topic] = [topic_info['host']]
2347- for rid in relation_ids('zeromq-configuration'):
2348- for unit in related_units(rid):
2349- topic_info = relation_get(unit=unit, rid=rid)
2350- topic_info['host'] = socket.gethostname()
2351- if 'topics' in topic_info:
2352- for topic in topic_info['topics'].split():
2353- if topic in topics:
2354- topics[topic].append(topic_info['host'])
2355- else:
2356- topics[topic] = [topic_info['host']]
2357- with open(topology_file, 'w') as outfile:
2358- json.dump(topics, outfile, indent=4)
2359- oslo_msg_file = 'oslo-messaging.conf'
2360-
2361- ctxt = {
2362- 'zmq_host': socket.gethostname(),
2363- }
2364- render(oslo_msg_file, '/etc/oslo/oslo-messaging.conf', ctxt)
2365-
2366 @hooks.hook('config-changed')
2367-@restart_on_change({
2368- '/etc/oslo/oslo-messaging.conf': ['oslo-messaging-zmq-receiver'],
2369- '/etc/init/oslo-messaging-zmq-receiver.conf': ['oslo-messaging-zmq-receiver']
2370-})
2371+@restart_on_change(restart_map(), stopstart=True)
2372 def config_changed():
2373- upstart_file = charm_dir() + '/files/' + 'oslo-messaging-zmq-receiver.conf'
2374- shutil.copyfile(upstart_file, '/etc/init/oslo-messaging-zmq-receiver.conf')
2375- write_mapping()
2376+ write_oslo_upstart()
2377+ CONFIGS.write_all()
2378 for rid in relation_ids('zeromq-configuration'):
2379 relation_set(relation_id=rid, host=socket.gethostname())
2380 configuration_relation_joined(rid=rid, remote_restart=True)
2381@@ -91,17 +59,9 @@
2382 if remote_restart:
2383 relation_set(relation_id=rid, nonce=str(uuid.uuid4()))
2384
2385-def get_principle_topics():
2386- princile_topics = []
2387- for rid in relation_ids('zeromq-configuration'):
2388- for unit in related_units(rid):
2389- topics = relation_get(attribute='topics', unit=unit, rid=rid)
2390- if topics:
2391- princile_topics += topics.split()
2392- return princile_topics
2393-
2394
2395 @hooks.hook('zeromq-configuration-relation-changed')
2396+@restart_on_change(restart_map(), stopstart=True)
2397 def configuration_relation_changed():
2398 rel_info = relation_get()
2399 if 'users' in rel_info:
2400@@ -112,13 +72,14 @@
2401 topics = " ".join(get_principle_topics())
2402 relation_set(relation_id=rid, topics=topics,
2403 host=socket.gethostname())
2404- write_mapping()
2405+ CONFIGS.write_all()
2406
2407
2408 @hooks.hook('topology-relation-changed',
2409 'topology-relation-departed')
2410+@restart_on_change(restart_map(), stopstart=True)
2411 def topology_relation_changed():
2412- write_mapping()
2413+ CONFIGS.write_all()
2414 # NOTE: drop when auto-reload of config file is implemented
2415 for rid in relation_ids('zeromq-configuration'):
2416 configuration_relation_joined(rid=rid, remote_restart=True)
2417
2418=== added file 'hooks/zeromq_utils.py'
2419--- hooks/zeromq_utils.py 1970-01-01 00:00:00 +0000
2420+++ hooks/zeromq_utils.py 2014-10-17 13:06:36 +0000
2421@@ -0,0 +1,78 @@
2422+from collections import OrderedDict
2423+from copy import deepcopy
2424+import zeromq_context
2425+from charmhelpers.contrib.openstack import templating
2426+import shutil
2427+from charmhelpers.core.host import (
2428+ service_running,
2429+ service_start,
2430+)
2431+from charmhelpers.core.hookenv import charm_dir
2432+from charmhelpers.core.hookenv import (
2433+ relation_get,
2434+ relation_ids,
2435+ related_units,
2436+)
2437+MATCHMAKER_CONF = "/etc/oslo/matchmaker_ring.json"
2438+OSLO_MSG_CONF = "/etc/oslo/oslo-messaging.conf"
2439+OSLO_UPSTART_CONF = "/etc/init/oslo-messaging-zmq-receiver.conf"
2440+TEMPLATES = 'templates/'
2441+
2442+BASE_RESOURCE_MAP = OrderedDict([
2443+ (MATCHMAKER_CONF, {
2444+ 'services': ['oslo-messaging-zmq-receiver'],
2445+ 'contexts': [zeromq_context.MatchmakerContext()],
2446+ }),
2447+ (OSLO_MSG_CONF, {
2448+ 'services': ['oslo-messaging-zmq-receiver'],
2449+ 'contexts': [zeromq_context.OsloZMQContext()],
2450+ }),
2451+])
2452+BASE_PACKAGES = [
2453+ 'python-zmq',
2454+ 'python-oslo.messaging',
2455+]
2456+
2457+
2458+def determine_packages():
2459+ return BASE_PACKAGES
2460+
2461+
2462+def resource_map():
2463+ '''
2464+ Dynamically generate a map of resources that will be managed for a single
2465+ hook execution.
2466+ '''
2467+ resource_map = deepcopy(BASE_RESOURCE_MAP)
2468+ return resource_map
2469+
2470+
2471+def register_configs(release=None):
2472+ configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
2473+ openstack_release='juno')
2474+ for cfg, rscs in resource_map().iteritems():
2475+ configs.register(cfg, rscs['contexts'])
2476+ return configs
2477+
2478+
2479+def restart_map():
2480+ return OrderedDict([(cfg, v['services'])
2481+ for cfg, v in resource_map().iteritems()
2482+ if v['services']])
2483+
2484+
2485+def write_oslo_upstart():
2486+ upstart_file = charm_dir() + '/files/oslo-messaging-zmq-receiver.conf'
2487+ shutil.copyfile(upstart_file, OSLO_UPSTART_CONF)
2488+ if not service_running('oslo-messaging-zmq-receiver'):
2489+ service_start('oslo-messaging-zmq-receiver')
2490+
2491+
2492+def get_principle_topics():
2493+ princile_topics = []
2494+ for rid in relation_ids('zeromq-configuration'):
2495+ for unit in related_units(rid):
2496+ topics = relation_get(attribute='topics', unit=unit, rid=rid)
2497+ if topics:
2498+ princile_topics += topics.split()
2499+ return princile_topics
2500
2501=== added file 'templates/matchmaker_ring.json'
2502--- templates/matchmaker_ring.json 1970-01-01 00:00:00 +0000
2503+++ templates/matchmaker_ring.json 2014-10-17 13:06:36 +0000
2504@@ -0,0 +1,1 @@
2505+{{ topology }}
2506
2507=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
2508--- tests/charmhelpers/contrib/amulet/deployment.py 2014-09-02 11:17:14 +0000
2509+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-17 13:06:36 +0000
2510@@ -24,25 +24,31 @@
2511 """Add services.
2512
2513 Add services to the deployment where this_service is the local charm
2514- that we're focused on testing and other_services are the other
2515- charms that come from the charm store.
2516+ that we're testing and other_services are the other services that
2517+ are being used in the local amulet tests.
2518 """
2519- name, units = range(2)
2520-
2521- if this_service[name] != os.path.basename(os.getcwd()):
2522- s = this_service[name]
2523+ if this_service['name'] != os.path.basename(os.getcwd()):
2524+ s = this_service['name']
2525 msg = "The charm's root directory name needs to be {}".format(s)
2526 amulet.raise_status(amulet.FAIL, msg=msg)
2527
2528- self.d.add(this_service[name], units=this_service[units])
2529+ if 'units' not in this_service:
2530+ this_service['units'] = 1
2531+
2532+ self.d.add(this_service['name'], units=this_service['units'])
2533
2534 for svc in other_services:
2535- if self.series:
2536- self.d.add(svc[name],
2537- charm='cs:{}/{}'.format(self.series, svc[name]),
2538- units=svc[units])
2539+ if 'location' in svc:
2540+ branch_location = svc['location']
2541+ elif self.series:
2542+ branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
2543 else:
2544- self.d.add(svc[name], units=svc[units])
2545+ branch_location = None
2546+
2547+ if 'units' not in svc:
2548+ svc['units'] = 1
2549+
2550+ self.d.add(svc['name'], charm=branch_location, units=svc['units'])
2551
2552 def _add_relations(self, relations):
2553 """Add all of the relations for the services."""
2554@@ -57,7 +63,7 @@
2555 def _deploy(self):
2556 """Deploy environment and wait for all hooks to finish executing."""
2557 try:
2558- self.d.setup()
2559+ self.d.setup(timeout=900)
2560 self.d.sentry.wait(timeout=900)
2561 except amulet.helpers.TimeoutError:
2562 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")

Subscribers

People subscribed via source and target branches