Merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup into lp:charms/trusty/openstack-zeromq

Proposed by Liam Young
Status: Rejected
Rejected by: James Page
Proposed branch: lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup
Merge into: lp:charms/trusty/openstack-zeromq
Diff against target: 2562 lines (+1786/-195)
24 files modified
charm-helpers-hooks.yaml (+2/-0)
hooks/charmhelpers/contrib/hahelpers/apache.py (+66/-0)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+225/-0)
hooks/charmhelpers/contrib/network/ip.py (+194/-19)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+38/-8)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+5/-4)
hooks/charmhelpers/contrib/openstack/context.py (+204/-69)
hooks/charmhelpers/contrib/openstack/ip.py (+1/-1)
hooks/charmhelpers/contrib/openstack/utils.py (+28/-1)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+388/-0)
hooks/charmhelpers/contrib/storage/linux/loopback.py (+62/-0)
hooks/charmhelpers/contrib/storage/linux/lvm.py (+88/-0)
hooks/charmhelpers/contrib/storage/linux/utils.py (+53/-0)
hooks/charmhelpers/core/hookenv.py (+17/-4)
hooks/charmhelpers/core/host.py (+38/-6)
hooks/charmhelpers/core/services/helpers.py (+119/-5)
hooks/charmhelpers/core/sysctl.py (+34/-0)
hooks/charmhelpers/fetch/__init__.py (+19/-5)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/zeromq_context.py (+41/-0)
hooks/zeromq_hooks.py (+17/-56)
hooks/zeromq_utils.py (+78/-0)
templates/matchmaker_ring.json (+1/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+19/-13)
To merge this branch: bzr merge lp:~gnuoy/charms/trusty/openstack-zeromq/tidyup
Reviewer Review Type Date Requested Status
James Page Needs Fixing
Review via email: mp+238712@code.launchpad.net
To post a comment you must log in.
Revision history for this message
James Page (james-page) :
review: Needs Fixing

Unmerged revisions

22. By Liam Young

Tidy up charm and bring format inline with other os charms

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== modified file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 2014-09-08 13:55:11 +0000
+++ charm-helpers-hooks.yaml 2014-10-17 13:06:36 +0000
@@ -5,3 +5,5 @@
5 - fetch5 - fetch
6 - contrib.openstack6 - contrib.openstack
7 - contrib.network7 - contrib.network
8 - contrib.hahelpers
9 - contrib.storage
810
=== added directory 'hooks/charmhelpers/contrib/hahelpers'
=== added file 'hooks/charmhelpers/contrib/hahelpers/__init__.py'
=== added file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,66 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import subprocess
12
13from charmhelpers.core.hookenv import (
14 config as config_get,
15 relation_get,
16 relation_ids,
17 related_units as relation_list,
18 log,
19 INFO,
20)
21
22
23def get_cert(cn=None):
24 # TODO: deal with multiple https endpoints via charm config
25 cert = config_get('ssl_cert')
26 key = config_get('ssl_key')
27 if not (cert and key):
28 log("Inspecting identity-service relations for SSL certificate.",
29 level=INFO)
30 cert = key = None
31 if cn:
32 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
33 ssl_key_attr = 'ssl_key_{}'.format(cn)
34 else:
35 ssl_cert_attr = 'ssl_cert'
36 ssl_key_attr = 'ssl_key'
37 for r_id in relation_ids('identity-service'):
38 for unit in relation_list(r_id):
39 if not cert:
40 cert = relation_get(ssl_cert_attr,
41 rid=r_id, unit=unit)
42 if not key:
43 key = relation_get(ssl_key_attr,
44 rid=r_id, unit=unit)
45 return (cert, key)
46
47
48def get_ca_cert():
49 ca_cert = config_get('ssl_ca')
50 if ca_cert is None:
51 log("Inspecting identity-service relations for CA SSL certificate.",
52 level=INFO)
53 for r_id in relation_ids('identity-service'):
54 for unit in relation_list(r_id):
55 if ca_cert is None:
56 ca_cert = relation_get('ca_cert',
57 rid=r_id, unit=unit)
58 return ca_cert
59
60
61def install_ca_cert(ca_cert):
62 if ca_cert:
63 with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
64 'w') as crt:
65 crt.write(ca_cert)
66 subprocess.check_call(['update-ca-certificates', '--fresh'])
067
=== added file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,225 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# Authors:
5# James Page <james.page@ubuntu.com>
6# Adam Gandelman <adamg@ubuntu.com>
7#
8
9"""
10Helpers for clustering and determining "cluster leadership" and other
11clustering-related helpers.
12"""
13
14import subprocess
15import os
16
17from socket import gethostname as get_unit_hostname
18
19from charmhelpers.core.hookenv import (
20 log,
21 relation_ids,
22 related_units as relation_list,
23 relation_get,
24 config as config_get,
25 INFO,
26 ERROR,
27 WARNING,
28 unit_get,
29)
30
31
32class HAIncompleteConfig(Exception):
33 pass
34
35
36def is_elected_leader(resource):
37 """
38 Returns True if the charm executing this is the elected cluster leader.
39
40 It relies on two mechanisms to determine leadership:
41 1. If the charm is part of a corosync cluster, call corosync to
42 determine leadership.
43 2. If the charm is not part of a corosync cluster, the leader is
44 determined as being "the alive unit with the lowest unit numer". In
45 other words, the oldest surviving unit.
46 """
47 if is_clustered():
48 if not is_crm_leader(resource):
49 log('Deferring action to CRM leader.', level=INFO)
50 return False
51 else:
52 peers = peer_units()
53 if peers and not oldest_peer(peers):
54 log('Deferring action to oldest service unit.', level=INFO)
55 return False
56 return True
57
58
59def is_clustered():
60 for r_id in (relation_ids('ha') or []):
61 for unit in (relation_list(r_id) or []):
62 clustered = relation_get('clustered',
63 rid=r_id,
64 unit=unit)
65 if clustered:
66 return True
67 return False
68
69
70def is_crm_leader(resource):
71 """
72 Returns True if the charm calling this is the elected corosync leader,
73 as returned by calling the external "crm" command.
74 """
75 cmd = [
76 "crm", "resource",
77 "show", resource
78 ]
79 try:
80 status = subprocess.check_output(cmd)
81 except subprocess.CalledProcessError:
82 return False
83 else:
84 if get_unit_hostname() in status:
85 return True
86 else:
87 return False
88
89
90def is_leader(resource):
91 log("is_leader is deprecated. Please consider using is_crm_leader "
92 "instead.", level=WARNING)
93 return is_crm_leader(resource)
94
95
96def peer_units(peer_relation="cluster"):
97 peers = []
98 for r_id in (relation_ids(peer_relation) or []):
99 for unit in (relation_list(r_id) or []):
100 peers.append(unit)
101 return peers
102
103
104def peer_ips(peer_relation='cluster', addr_key='private-address'):
105 '''Return a dict of peers and their private-address'''
106 peers = {}
107 for r_id in relation_ids(peer_relation):
108 for unit in relation_list(r_id):
109 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
110 return peers
111
112
113def oldest_peer(peers):
114 """Determines who the oldest peer is by comparing unit numbers."""
115 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
116 for peer in peers:
117 remote_unit_no = int(peer.split('/')[1])
118 if remote_unit_no < local_unit_no:
119 return False
120 return True
121
122
123def eligible_leader(resource):
124 log("eligible_leader is deprecated. Please consider using "
125 "is_elected_leader instead.", level=WARNING)
126 return is_elected_leader(resource)
127
128
129def https():
130 '''
131 Determines whether enough data has been provided in configuration
132 or relation data to configure HTTPS
133 .
134 returns: boolean
135 '''
136 if config_get('use-https') == "yes":
137 return True
138 if config_get('ssl_cert') and config_get('ssl_key'):
139 return True
140 for r_id in relation_ids('identity-service'):
141 for unit in relation_list(r_id):
142 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
143 rel_state = [
144 relation_get('https_keystone', rid=r_id, unit=unit),
145 relation_get('ca_cert', rid=r_id, unit=unit),
146 ]
147 # NOTE: works around (LP: #1203241)
148 if (None not in rel_state) and ('' not in rel_state):
149 return True
150 return False
151
152
153def determine_api_port(public_port):
154 '''
155 Determine correct API server listening port based on
156 existence of HTTPS reverse proxy and/or haproxy.
157
158 public_port: int: standard public port for given service
159
160 returns: int: the correct listening port for the API service
161 '''
162 i = 0
163 if len(peer_units()) > 0 or is_clustered():
164 i += 1
165 if https():
166 i += 1
167 return public_port - (i * 10)
168
169
170def determine_apache_port(public_port):
171 '''
172 Description: Determine correct apache listening port based on public IP +
173 state of the cluster.
174
175 public_port: int: standard public port for given service
176
177 returns: int: the correct listening port for the HAProxy service
178 '''
179 i = 0
180 if len(peer_units()) > 0 or is_clustered():
181 i += 1
182 return public_port - (i * 10)
183
184
185def get_hacluster_config():
186 '''
187 Obtains all relevant configuration from charm configuration required
188 for initiating a relation to hacluster:
189
190 ha-bindiface, ha-mcastport, vip
191
192 returns: dict: A dict containing settings keyed by setting name.
193 raises: HAIncompleteConfig if settings are missing.
194 '''
195 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
196 conf = {}
197 for setting in settings:
198 conf[setting] = config_get(setting)
199 missing = []
200 [missing.append(s) for s, v in conf.iteritems() if v is None]
201 if missing:
202 log('Insufficient config data to configure hacluster.', level=ERROR)
203 raise HAIncompleteConfig
204 return conf
205
206
207def canonical_url(configs, vip_setting='vip'):
208 '''
209 Returns the correct HTTP URL to this host given the state of HTTPS
210 configuration and hacluster.
211
212 :configs : OSTemplateRenderer: A config tempating object to inspect for
213 a complete https context.
214
215 :vip_setting: str: Setting in charm config that specifies
216 VIP address.
217 '''
218 scheme = 'http'
219 if 'https' in configs.complete_contexts():
220 scheme = 'https'
221 if is_clustered():
222 addr = config_get(vip_setting)
223 else:
224 addr = unit_get('private-address')
225 return '%s://%s' % (scheme, addr)
0226
=== modified file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 2014-09-08 14:18:52 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-17 13:06:36 +0000
@@ -1,10 +1,16 @@
1import glob
2import re
3import subprocess
1import sys4import sys
25
3from functools import partial6from functools import partial
47
8from charmhelpers.core.hookenv import unit_get
5from charmhelpers.fetch import apt_install9from charmhelpers.fetch import apt_install
6from charmhelpers.core.hookenv import (10from charmhelpers.core.hookenv import (
7 ERROR, log, config,11 WARNING,
12 ERROR,
13 log
8)14)
915
10try:16try:
@@ -51,6 +57,8 @@
51 else:57 else:
52 if fatal:58 if fatal:
53 not_found_error_out()59 not_found_error_out()
60 else:
61 return None
5462
55 _validate_cidr(network)63 _validate_cidr(network)
56 network = netaddr.IPNetwork(network)64 network = netaddr.IPNetwork(network)
@@ -132,7 +140,8 @@
132 if address.version == 4 and netifaces.AF_INET in addresses:140 if address.version == 4 and netifaces.AF_INET in addresses:
133 addr = addresses[netifaces.AF_INET][0]['addr']141 addr = addresses[netifaces.AF_INET][0]['addr']
134 netmask = addresses[netifaces.AF_INET][0]['netmask']142 netmask = addresses[netifaces.AF_INET][0]['netmask']
135 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))143 network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
144 cidr = network.cidr
136 if address in cidr:145 if address in cidr:
137 if key == 'iface':146 if key == 'iface':
138 return iface147 return iface
@@ -141,11 +150,14 @@
141 if address.version == 6 and netifaces.AF_INET6 in addresses:150 if address.version == 6 and netifaces.AF_INET6 in addresses:
142 for addr in addresses[netifaces.AF_INET6]:151 for addr in addresses[netifaces.AF_INET6]:
143 if not addr['addr'].startswith('fe80'):152 if not addr['addr'].startswith('fe80'):
144 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],153 network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
145 addr['netmask']))154 addr['netmask']))
155 cidr = network.cidr
146 if address in cidr:156 if address in cidr:
147 if key == 'iface':157 if key == 'iface':
148 return iface158 return iface
159 elif key == 'netmask' and cidr:
160 return str(cidr).split('/')[1]
149 else:161 else:
150 return addr[key]162 return addr[key]
151 return None163 return None
@@ -156,19 +168,182 @@
156get_netmask_for_address = partial(_get_for_address, key='netmask')168get_netmask_for_address = partial(_get_for_address, key='netmask')
157169
158170
159def get_ipv6_addr(iface="eth0"):171def format_ipv6_addr(address):
172 """
173 IPv6 needs to be wrapped with [] in url link to parse correctly.
174 """
175 if is_ipv6(address):
176 address = "[%s]" % address
177 else:
178 log("Not a valid ipv6 address: %s" % address, level=WARNING)
179 address = None
180
181 return address
182
183
184def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
185 fatal=True, exc_list=None):
186 """
187 Return the assigned IP address for a given interface, if any, or [].
188 """
189 # Extract nic if passed /dev/ethX
190 if '/' in iface:
191 iface = iface.split('/')[-1]
192 if not exc_list:
193 exc_list = []
160 try:194 try:
161 iface_addrs = netifaces.ifaddresses(iface)195 inet_num = getattr(netifaces, inet_type)
162 if netifaces.AF_INET6 not in iface_addrs:196 except AttributeError:
163 raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)197 raise Exception('Unknown inet type ' + str(inet_type))
164198
165 addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]199 interfaces = netifaces.interfaces()
166 ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')200 if inc_aliases:
167 and config('vip') != a['addr']]201 ifaces = []
168 if not ipv6_addr:202 for _iface in interfaces:
169 raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)203 if iface == _iface or _iface.split(':')[0] == iface:
170204 ifaces.append(_iface)
171 return ipv6_addr[0]205 if fatal and not ifaces:
172206 raise Exception("Invalid interface '%s'" % iface)
173 except ValueError:207 ifaces.sort()
174 raise ValueError("Invalid interface '%s'" % iface)208 else:
209 if iface not in interfaces:
210 if fatal:
211 raise Exception("%s not found " % (iface))
212 else:
213 return []
214 else:
215 ifaces = [iface]
216
217 addresses = []
218 for netiface in ifaces:
219 net_info = netifaces.ifaddresses(netiface)
220 if inet_num in net_info:
221 for entry in net_info[inet_num]:
222 if 'addr' in entry and entry['addr'] not in exc_list:
223 addresses.append(entry['addr'])
224 if fatal and not addresses:
225 raise Exception("Interface '%s' doesn't have any %s addresses." %
226 (iface, inet_type))
227 return addresses
228
229get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
230
231
232def get_iface_from_addr(addr):
233 """Work out on which interface the provided address is configured."""
234 for iface in netifaces.interfaces():
235 addresses = netifaces.ifaddresses(iface)
236 for inet_type in addresses:
237 for _addr in addresses[inet_type]:
238 _addr = _addr['addr']
239 # link local
240 ll_key = re.compile("(.+)%.*")
241 raw = re.match(ll_key, _addr)
242 if raw:
243 _addr = raw.group(1)
244 if _addr == addr:
245 log("Address '%s' is configured on iface '%s'" %
246 (addr, iface))
247 return iface
248
249 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
250 raise Exception(msg)
251
252
253def sniff_iface(f):
254 """If no iface provided, inject net iface inferred from unit private
255 address.
256 """
257 def iface_sniffer(*args, **kwargs):
258 if not kwargs.get('iface', None):
259 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
260
261 return f(*args, **kwargs)
262
263 return iface_sniffer
264
265
266@sniff_iface
267def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
268 dynamic_only=True):
269 """Get assigned IPv6 address for a given interface.
270
271 Returns list of addresses found. If no address found, returns empty list.
272
273 If iface is None, we infer the current primary interface by doing a reverse
274 lookup on the unit private-address.
275
276 We currently only support scope global IPv6 addresses i.e. non-temporary
277 addresses. If no global IPv6 address is found, return the first one found
278 in the ipv6 address list.
279 """
280 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
281 inc_aliases=inc_aliases, fatal=fatal,
282 exc_list=exc_list)
283
284 if addresses:
285 global_addrs = []
286 for addr in addresses:
287 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
288 m = re.match(key_scope_link_local, addr)
289 if m:
290 eui_64_mac = m.group(1)
291 iface = m.group(2)
292 else:
293 global_addrs.append(addr)
294
295 if global_addrs:
296 # Make sure any found global addresses are not temporary
297 cmd = ['ip', 'addr', 'show', iface]
298 out = subprocess.check_output(cmd)
299 if dynamic_only:
300 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
301 else:
302 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
303
304 addrs = []
305 for line in out.split('\n'):
306 line = line.strip()
307 m = re.match(key, line)
308 if m and 'temporary' not in line:
309 # Return the first valid address we find
310 for addr in global_addrs:
311 if m.group(1) == addr:
312 if not dynamic_only or \
313 m.group(1).endswith(eui_64_mac):
314 addrs.append(addr)
315
316 if addrs:
317 return addrs
318
319 if fatal:
320 raise Exception("Interface '%s' doesn't have a scope global "
321 "non-temporary ipv6 address." % iface)
322
323 return []
324
325
326def get_bridges(vnic_dir='/sys/devices/virtual/net'):
327 """
328 Return a list of bridges on the system or []
329 """
330 b_rgex = vnic_dir + '/*/bridge'
331 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
332
333
334def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
335 """
336 Return a list of nics comprising a given bridge on the system or []
337 """
338 brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
339 return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
340
341
342def is_bridge_member(nic):
343 """
344 Check if a given nic is a member of a bridge
345 """
346 for bridge in get_bridges():
347 if nic in get_bridge_nics(bridge):
348 return True
349 return False
175350
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-02 11:19:19 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-17 13:06:36 +0000
@@ -10,32 +10,62 @@
10 that is specifically for use by OpenStack charms.10 that is specifically for use by OpenStack charms.
11 """11 """
1212
13 def __init__(self, series=None, openstack=None, source=None):13 def __init__(self, series=None, openstack=None, source=None, stable=True):
14 """Initialize the deployment environment."""14 """Initialize the deployment environment."""
15 super(OpenStackAmuletDeployment, self).__init__(series)15 super(OpenStackAmuletDeployment, self).__init__(series)
16 self.openstack = openstack16 self.openstack = openstack
17 self.source = source17 self.source = source
18 self.stable = stable
19 # Note(coreycb): this needs to be changed when new next branches come
20 # out.
21 self.current_next = "trusty"
22
23 def _determine_branch_locations(self, other_services):
24 """Determine the branch locations for the other services.
25
26 Determine if the local branch being tested is derived from its
27 stable or next (dev) branch, and based on this, use the corresonding
28 stable or next branches for the other_services."""
29 base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
30
31 if self.stable:
32 for svc in other_services:
33 temp = 'lp:charms/{}'
34 svc['location'] = temp.format(svc['name'])
35 else:
36 for svc in other_services:
37 if svc['name'] in base_charms:
38 temp = 'lp:charms/{}'
39 svc['location'] = temp.format(svc['name'])
40 else:
41 temp = 'lp:~openstack-charmers/charms/{}/{}/next'
42 svc['location'] = temp.format(self.current_next,
43 svc['name'])
44 return other_services
1845
19 def _add_services(self, this_service, other_services):46 def _add_services(self, this_service, other_services):
20 """Add services to the deployment and set openstack-origin."""47 """Add services to the deployment and set openstack-origin/source."""
48 other_services = self._determine_branch_locations(other_services)
49
21 super(OpenStackAmuletDeployment, self)._add_services(this_service,50 super(OpenStackAmuletDeployment, self)._add_services(this_service,
22 other_services)51 other_services)
23 name = 052
24 services = other_services53 services = other_services
25 services.append(this_service)54 services.append(this_service)
26 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']55 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
56 'ceph-osd', 'ceph-radosgw']
2757
28 if self.openstack:58 if self.openstack:
29 for svc in services:59 for svc in services:
30 if svc[name] not in use_source:60 if svc['name'] not in use_source:
31 config = {'openstack-origin': self.openstack}61 config = {'openstack-origin': self.openstack}
32 self.d.configure(svc[name], config)62 self.d.configure(svc['name'], config)
3363
34 if self.source:64 if self.source:
35 for svc in services:65 for svc in services:
36 if svc[name] in use_source:66 if svc['name'] in use_source:
37 config = {'source': self.source}67 config = {'source': self.source}
38 self.d.configure(svc[name], config)68 self.d.configure(svc['name'], config)
3969
40 def _configure_services(self, configs):70 def _configure_services(self, configs):
41 """Configure all of the services."""71 """Configure all of the services."""
4272
=== modified file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-09-02 11:19:19 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-17 13:06:36 +0000
@@ -187,15 +187,16 @@
187187
188 f = opener.open("http://download.cirros-cloud.net/version/released")188 f = opener.open("http://download.cirros-cloud.net/version/released")
189 version = f.read().strip()189 version = f.read().strip()
190 cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)190 cirros_img = "cirros-{}-x86_64-disk.img".format(version)
191 local_path = os.path.join('tests', cirros_img)
191192
192 if not os.path.exists(cirros_img):193 if not os.path.exists(local_path):
193 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",194 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
194 version, cirros_img)195 version, cirros_img)
195 opener.retrieve(cirros_url, cirros_img)196 opener.retrieve(cirros_url, local_path)
196 f.close()197 f.close()
197198
198 with open(cirros_img) as f:199 with open(local_path) as f:
199 image = glance.images.create(name=image_name, is_public=True,200 image = glance.images.create(name=image_name, is_public=True,
200 disk_format='qcow2',201 disk_format='qcow2',
201 container_format='bare', data=f)202 container_format='bare', data=f)
202203
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-09-02 11:19:19 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-17 13:06:36 +0000
@@ -8,7 +8,6 @@
8 check_call8 check_call
9)9)
1010
11
12from charmhelpers.fetch import (11from charmhelpers.fetch import (
13 apt_install,12 apt_install,
14 filter_installed_packages,13 filter_installed_packages,
@@ -28,6 +27,11 @@
28 INFO27 INFO
29)28)
3029
30from charmhelpers.core.host import (
31 mkdir,
32 write_file
33)
34
31from charmhelpers.contrib.hahelpers.cluster import (35from charmhelpers.contrib.hahelpers.cluster import (
32 determine_apache_port,36 determine_apache_port,
33 determine_api_port,37 determine_api_port,
@@ -38,6 +42,7 @@
38from charmhelpers.contrib.hahelpers.apache import (42from charmhelpers.contrib.hahelpers.apache import (
39 get_cert,43 get_cert,
40 get_ca_cert,44 get_ca_cert,
45 install_ca_cert,
41)46)
4247
43from charmhelpers.contrib.openstack.neutron import (48from charmhelpers.contrib.openstack.neutron import (
@@ -47,8 +52,13 @@
47from charmhelpers.contrib.network.ip import (52from charmhelpers.contrib.network.ip import (
48 get_address_in_network,53 get_address_in_network,
49 get_ipv6_addr,54 get_ipv6_addr,
55 get_netmask_for_address,
56 format_ipv6_addr,
57 is_address_in_network
50)58)
5159
60from charmhelpers.contrib.openstack.utils import get_host_ip
61
52CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'62CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
5363
5464
@@ -168,8 +178,10 @@
168 for rid in relation_ids('shared-db'):178 for rid in relation_ids('shared-db'):
169 for unit in related_units(rid):179 for unit in related_units(rid):
170 rdata = relation_get(rid=rid, unit=unit)180 rdata = relation_get(rid=rid, unit=unit)
181 host = rdata.get('db_host')
182 host = format_ipv6_addr(host) or host
171 ctxt = {183 ctxt = {
172 'database_host': rdata.get('db_host'),184 'database_host': host,
173 'database': self.database,185 'database': self.database,
174 'database_user': self.user,186 'database_user': self.user,
175 'database_password': rdata.get(password_setting),187 'database_password': rdata.get(password_setting),
@@ -245,10 +257,15 @@
245 for rid in relation_ids('identity-service'):257 for rid in relation_ids('identity-service'):
246 for unit in related_units(rid):258 for unit in related_units(rid):
247 rdata = relation_get(rid=rid, unit=unit)259 rdata = relation_get(rid=rid, unit=unit)
260 serv_host = rdata.get('service_host')
261 serv_host = format_ipv6_addr(serv_host) or serv_host
262 auth_host = rdata.get('auth_host')
263 auth_host = format_ipv6_addr(auth_host) or auth_host
264
248 ctxt = {265 ctxt = {
249 'service_port': rdata.get('service_port'),266 'service_port': rdata.get('service_port'),
250 'service_host': rdata.get('service_host'),267 'service_host': serv_host,
251 'auth_host': rdata.get('auth_host'),268 'auth_host': auth_host,
252 'auth_port': rdata.get('auth_port'),269 'auth_port': rdata.get('auth_port'),
253 'admin_tenant_name': rdata.get('service_tenant'),270 'admin_tenant_name': rdata.get('service_tenant'),
254 'admin_user': rdata.get('service_username'),271 'admin_user': rdata.get('service_username'),
@@ -297,11 +314,13 @@
297 for unit in related_units(rid):314 for unit in related_units(rid):
298 if relation_get('clustered', rid=rid, unit=unit):315 if relation_get('clustered', rid=rid, unit=unit):
299 ctxt['clustered'] = True316 ctxt['clustered'] = True
300 ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,317 vip = relation_get('vip', rid=rid, unit=unit)
301 unit=unit)318 vip = format_ipv6_addr(vip) or vip
319 ctxt['rabbitmq_host'] = vip
302 else:320 else:
303 ctxt['rabbitmq_host'] = relation_get('private-address',321 host = relation_get('private-address', rid=rid, unit=unit)
304 rid=rid, unit=unit)322 host = format_ipv6_addr(host) or host
323 ctxt['rabbitmq_host'] = host
305 ctxt.update({324 ctxt.update({
306 'rabbitmq_user': username,325 'rabbitmq_user': username,
307 'rabbitmq_password': relation_get('password', rid=rid,326 'rabbitmq_password': relation_get('password', rid=rid,
@@ -340,8 +359,9 @@
340 and len(related_units(rid)) > 1:359 and len(related_units(rid)) > 1:
341 rabbitmq_hosts = []360 rabbitmq_hosts = []
342 for unit in related_units(rid):361 for unit in related_units(rid):
343 rabbitmq_hosts.append(relation_get('private-address',362 host = relation_get('private-address', rid=rid, unit=unit)
344 rid=rid, unit=unit))363 host = format_ipv6_addr(host) or host
364 rabbitmq_hosts.append(host)
345 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)365 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
346 if not context_complete(ctxt):366 if not context_complete(ctxt):
347 return {}367 return {}
@@ -370,6 +390,7 @@
370 ceph_addr = \390 ceph_addr = \
371 relation_get('ceph-public-address', rid=rid, unit=unit) or \391 relation_get('ceph-public-address', rid=rid, unit=unit) or \
372 relation_get('private-address', rid=rid, unit=unit)392 relation_get('private-address', rid=rid, unit=unit)
393 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
373 mon_hosts.append(ceph_addr)394 mon_hosts.append(ceph_addr)
374395
375 ctxt = {396 ctxt = {
@@ -390,6 +411,9 @@
390 return ctxt411 return ctxt
391412
392413
414ADDRESS_TYPES = ['admin', 'internal', 'public']
415
416
393class HAProxyContext(OSContextGenerator):417class HAProxyContext(OSContextGenerator):
394 interfaces = ['cluster']418 interfaces = ['cluster']
395419
@@ -402,25 +426,63 @@
402 if not relation_ids('cluster'):426 if not relation_ids('cluster'):
403 return {}427 return {}
404428
429 l_unit = local_unit().replace('/', '-')
430
431 if config('prefer-ipv6'):
432 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
433 else:
434 addr = get_host_ip(unit_get('private-address'))
435
405 cluster_hosts = {}436 cluster_hosts = {}
406 l_unit = local_unit().replace('/', '-')437
407 if config('prefer-ipv6'):438 # NOTE(jamespage): build out map of configured network endpoints
408 addr = get_ipv6_addr()439 # and associated backends
409 else:440 for addr_type in ADDRESS_TYPES:
410 addr = unit_get('private-address')441 laddr = get_address_in_network(
411 cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),442 config('os-{}-network'.format(addr_type)))
412 addr)443 if laddr:
413444 cluster_hosts[laddr] = {}
414 for rid in relation_ids('cluster'):445 cluster_hosts[laddr]['network'] = "{}/{}".format(
415 for unit in related_units(rid):446 laddr,
416 _unit = unit.replace('/', '-')447 get_netmask_for_address(laddr)
417 addr = relation_get('private-address', rid=rid, unit=unit)448 )
418 cluster_hosts[_unit] = addr449 cluster_hosts[laddr]['backends'] = {}
450 cluster_hosts[laddr]['backends'][l_unit] = laddr
451 for rid in relation_ids('cluster'):
452 for unit in related_units(rid):
453 _unit = unit.replace('/', '-')
454 _laddr = relation_get('{}-address'.format(addr_type),
455 rid=rid, unit=unit)
456 if _laddr:
457 cluster_hosts[laddr]['backends'][_unit] = _laddr
458
459 # NOTE(jamespage) no split configurations found, just use
460 # private addresses
461 if not cluster_hosts:
462 cluster_hosts[addr] = {}
463 cluster_hosts[addr]['network'] = "{}/{}".format(
464 addr,
465 get_netmask_for_address(addr)
466 )
467 cluster_hosts[addr]['backends'] = {}
468 cluster_hosts[addr]['backends'][l_unit] = addr
469 for rid in relation_ids('cluster'):
470 for unit in related_units(rid):
471 _unit = unit.replace('/', '-')
472 _laddr = relation_get('private-address',
473 rid=rid, unit=unit)
474 if _laddr:
475 cluster_hosts[addr]['backends'][_unit] = _laddr
419476
420 ctxt = {477 ctxt = {
421 'units': cluster_hosts,478 'frontends': cluster_hosts,
422 }479 }
423480
481 if config('haproxy-server-timeout'):
482 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
483 if config('haproxy-client-timeout'):
484 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
485
424 if config('prefer-ipv6'):486 if config('prefer-ipv6'):
425 ctxt['local_host'] = 'ip6-localhost'487 ctxt['local_host'] = 'ip6-localhost'
426 ctxt['haproxy_host'] = '::'488 ctxt['haproxy_host'] = '::'
@@ -430,12 +492,13 @@
430 ctxt['haproxy_host'] = '0.0.0.0'492 ctxt['haproxy_host'] = '0.0.0.0'
431 ctxt['stat_port'] = ':8888'493 ctxt['stat_port'] = ':8888'
432494
433 if len(cluster_hosts.keys()) > 1:495 for frontend in cluster_hosts:
434 # Enable haproxy when we have enough peers.496 if len(cluster_hosts[frontend]['backends']) > 1:
435 log('Ensuring haproxy enabled in /etc/default/haproxy.')497 # Enable haproxy when we have enough peers.
436 with open('/etc/default/haproxy', 'w') as out:498 log('Ensuring haproxy enabled in /etc/default/haproxy.')
437 out.write('ENABLED=1\n')499 with open('/etc/default/haproxy', 'w') as out:
438 return ctxt500 out.write('ENABLED=1\n')
501 return ctxt
439 log('HAProxy context is incomplete, this unit has no peers.')502 log('HAProxy context is incomplete, this unit has no peers.')
440 return {}503 return {}
441504
@@ -490,22 +553,36 @@
490 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']553 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
491 check_call(cmd)554 check_call(cmd)
492555
493 def configure_cert(self):556 def configure_cert(self, cn=None):
494 if not os.path.isdir('/etc/apache2/ssl'):
495 os.mkdir('/etc/apache2/ssl')
496 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)557 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
497 if not os.path.isdir(ssl_dir):558 mkdir(path=ssl_dir)
498 os.mkdir(ssl_dir)559 cert, key = get_cert(cn)
499 cert, key = get_cert()560 if cn:
500 with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:561 cert_filename = 'cert_{}'.format(cn)
501 cert_out.write(b64decode(cert))562 key_filename = 'key_{}'.format(cn)
502 with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:563 else:
503 key_out.write(b64decode(key))564 cert_filename = 'cert'
565 key_filename = 'key'
566 write_file(path=os.path.join(ssl_dir, cert_filename),
567 content=b64decode(cert))
568 write_file(path=os.path.join(ssl_dir, key_filename),
569 content=b64decode(key))
570
571 def configure_ca(self):
504 ca_cert = get_ca_cert()572 ca_cert = get_ca_cert()
505 if ca_cert:573 if ca_cert:
506 with open(CA_CERT_PATH, 'w') as ca_out:574 install_ca_cert(b64decode(ca_cert))
507 ca_out.write(b64decode(ca_cert))575
508 check_call(['update-ca-certificates'])576 def canonical_names(self):
577 '''Figure out which canonical names clients will access this service'''
578 cns = []
579 for r_id in relation_ids('identity-service'):
580 for unit in related_units(r_id):
581 rdata = relation_get(rid=r_id, unit=unit)
582 for k in rdata:
583 if k.startswith('ssl_key_'):
584 cns.append(k.lstrip('ssl_key_'))
585 return list(set(cns))
509586
510 def __call__(self):587 def __call__(self):
511 if isinstance(self.external_ports, basestring):588 if isinstance(self.external_ports, basestring):
@@ -513,21 +590,47 @@
513 if (not self.external_ports or not https()):590 if (not self.external_ports or not https()):
514 return {}591 return {}
515592
516 self.configure_cert()593 self.configure_ca()
517 self.enable_modules()594 self.enable_modules()
518595
519 ctxt = {596 ctxt = {
520 'namespace': self.service_namespace,597 'namespace': self.service_namespace,
521 'private_address': unit_get('private-address'),598 'endpoints': [],
522 'endpoints': []599 'ext_ports': []
523 }600 }
524 if is_clustered():601
525 ctxt['private_address'] = config('vip')602 for cn in self.canonical_names():
526 for api_port in self.external_ports:603 self.configure_cert(cn)
527 ext_port = determine_apache_port(api_port)604
528 int_port = determine_api_port(api_port)605 addresses = []
529 portmap = (int(ext_port), int(int_port))606 vips = []
530 ctxt['endpoints'].append(portmap)607 if config('vip'):
608 vips = config('vip').split()
609
610 for network_type in ['os-internal-network',
611 'os-admin-network',
612 'os-public-network']:
613 address = get_address_in_network(config(network_type),
614 unit_get('private-address'))
615 if len(vips) > 0 and is_clustered():
616 for vip in vips:
617 if is_address_in_network(config(network_type),
618 vip):
619 addresses.append((address, vip))
620 break
621 elif is_clustered():
622 addresses.append((address, config('vip')))
623 else:
624 addresses.append((address, address))
625
626 for address, endpoint in set(addresses):
627 for api_port in self.external_ports:
628 ext_port = determine_apache_port(api_port)
629 int_port = determine_api_port(api_port)
630 portmap = (address, endpoint, int(ext_port), int(int_port))
631 ctxt['endpoints'].append(portmap)
632 ctxt['ext_ports'].append(int(ext_port))
633 ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
531 return ctxt634 return ctxt
532635
533636
@@ -657,22 +760,22 @@
657760
658class OSConfigFlagContext(OSContextGenerator):761class OSConfigFlagContext(OSContextGenerator):
659762
660 """763 """
661 Responsible for adding user-defined config-flags in charm config to a764 Responsible for adding user-defined config-flags in charm config to a
662 template context.765 template context.
663766
664 NOTE: the value of config-flags may be a comma-separated list of767 NOTE: the value of config-flags may be a comma-separated list of
665 key=value pairs and some Openstack config files support768 key=value pairs and some Openstack config files support
666 comma-separated lists as values.769 comma-separated lists as values.
667 """770 """
668771
669 def __call__(self):772 def __call__(self):
670 config_flags = config('config-flags')773 config_flags = config('config-flags')
671 if not config_flags:774 if not config_flags:
672 return {}775 return {}
673776
674 flags = config_flags_parser(config_flags)777 flags = config_flags_parser(config_flags)
675 return {'user_config_flags': flags}778 return {'user_config_flags': flags}
676779
677780
678class SubordinateConfigContext(OSContextGenerator):781class SubordinateConfigContext(OSContextGenerator):
@@ -787,3 +890,35 @@
787 'use_syslog': config('use-syslog')890 'use_syslog': config('use-syslog')
788 }891 }
789 return ctxt892 return ctxt
893
894
895class BindHostContext(OSContextGenerator):
896
897 def __call__(self):
898 if config('prefer-ipv6'):
899 return {
900 'bind_host': '::'
901 }
902 else:
903 return {
904 'bind_host': '0.0.0.0'
905 }
906
907
908class WorkerConfigContext(OSContextGenerator):
909
910 @property
911 def num_cpus(self):
912 try:
913 from psutil import NUM_CPUS
914 except ImportError:
915 apt_install('python-psutil', fatal=True)
916 from psutil import NUM_CPUS
917 return NUM_CPUS
918
919 def __call__(self):
920 multiplier = config('worker-multiplier') or 1
921 ctxt = {
922 "workers": self.num_cpus * multiplier
923 }
924 return ctxt
790925
=== modified file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 2014-09-02 11:19:19 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-17 13:06:36 +0000
@@ -66,7 +66,7 @@
66 resolved_address = vip66 resolved_address = vip
67 else:67 else:
68 if config('prefer-ipv6'):68 if config('prefer-ipv6'):
69 fallback_addr = get_ipv6_addr()69 fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
70 else:70 else:
71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
72 resolved_address = get_address_in_network(72 resolved_address = get_address_in_network(
7373
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-09-08 13:55:11 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-17 13:06:36 +0000
@@ -4,6 +4,7 @@
4from collections import OrderedDict4from collections import OrderedDict
55
6import subprocess6import subprocess
7import json
7import os8import os
8import socket9import socket
9import sys10import sys
@@ -13,7 +14,9 @@
13 log as juju_log,14 log as juju_log,
14 charm_dir,15 charm_dir,
15 ERROR,16 ERROR,
16 INFO17 INFO,
18 relation_ids,
19 relation_set
17)20)
1821
19from charmhelpers.contrib.storage.linux.lvm import (22from charmhelpers.contrib.storage.linux.lvm import (
@@ -22,6 +25,10 @@
22 remove_lvm_physical_volume,25 remove_lvm_physical_volume,
23)26)
2427
28from charmhelpers.contrib.network.ip import (
29 get_ipv6_addr
30)
31
25from charmhelpers.core.host import lsb_release, mounts, umount32from charmhelpers.core.host import lsb_release, mounts, umount
26from charmhelpers.fetch import apt_install, apt_cache33from charmhelpers.fetch import apt_install, apt_cache
27from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk34from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@@ -71,6 +78,8 @@
71 ('1.12.0', 'icehouse'),78 ('1.12.0', 'icehouse'),
72 ('1.11.0', 'icehouse'),79 ('1.11.0', 'icehouse'),
73 ('2.0.0', 'juno'),80 ('2.0.0', 'juno'),
81 ('2.1.0', 'juno'),
82 ('2.2.0', 'juno'),
74])83])
7584
76DEFAULT_LOOPBACK_SIZE = '5G'85DEFAULT_LOOPBACK_SIZE = '5G'
@@ -457,3 +466,21 @@
457 return result466 return result
458 else:467 else:
459 return result.split('.')[0]468 return result.split('.')[0]
469
470
471def sync_db_with_multi_ipv6_addresses(database, database_user,
472 relation_prefix=None):
473 hosts = get_ipv6_addr(dynamic_only=False)
474
475 kwargs = {'database': database,
476 'username': database_user,
477 'hostname': json.dumps(hosts)}
478
479 if relation_prefix:
480 keys = kwargs.keys()
481 for key in keys:
482 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
483 del kwargs[key]
484
485 for rid in relation_ids('shared-db'):
486 relation_set(relation_id=rid, **kwargs)
460487
=== added directory 'hooks/charmhelpers/contrib/storage'
=== added file 'hooks/charmhelpers/contrib/storage/__init__.py'
=== added directory 'hooks/charmhelpers/contrib/storage/linux'
=== added file 'hooks/charmhelpers/contrib/storage/linux/__init__.py'
=== added file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,388 @@
1#
2# Copyright 2012 Canonical Ltd.
3#
4# This file is sourced from lp:openstack-charm-helpers
5#
6# Authors:
7# James Page <james.page@ubuntu.com>
8# Adam Gandelman <adamg@ubuntu.com>
9#
10
11import os
12import shutil
13import json
14import time
15
16from subprocess import (
17 check_call,
18 check_output,
19 CalledProcessError
20)
21
22from charmhelpers.core.hookenv import (
23 relation_get,
24 relation_ids,
25 related_units,
26 log,
27 INFO,
28 WARNING,
29 ERROR
30)
31
32from charmhelpers.core.host import (
33 mount,
34 mounts,
35 service_start,
36 service_stop,
37 service_running,
38 umount,
39)
40
41from charmhelpers.fetch import (
42 apt_install,
43)
44
45KEYRING = '/etc/ceph/ceph.client.{}.keyring'
46KEYFILE = '/etc/ceph/ceph.client.{}.key'
47
48CEPH_CONF = """[global]
49 auth supported = {auth}
50 keyring = {keyring}
51 mon host = {mon_hosts}
52 log to syslog = {use_syslog}
53 err to syslog = {use_syslog}
54 clog to syslog = {use_syslog}
55"""
56
57
58def install():
59 ''' Basic Ceph client installation '''
60 ceph_dir = "/etc/ceph"
61 if not os.path.exists(ceph_dir):
62 os.mkdir(ceph_dir)
63 apt_install('ceph-common', fatal=True)
64
65
66def rbd_exists(service, pool, rbd_img):
67 ''' Check to see if a RADOS block device exists '''
68 try:
69 out = check_output(['rbd', 'list', '--id', service,
70 '--pool', pool])
71 except CalledProcessError:
72 return False
73 else:
74 return rbd_img in out
75
76
77def create_rbd_image(service, pool, image, sizemb):
78 ''' Create a new RADOS block device '''
79 cmd = [
80 'rbd',
81 'create',
82 image,
83 '--size',
84 str(sizemb),
85 '--id',
86 service,
87 '--pool',
88 pool
89 ]
90 check_call(cmd)
91
92
93def pool_exists(service, name):
94 ''' Check to see if a RADOS pool already exists '''
95 try:
96 out = check_output(['rados', '--id', service, 'lspools'])
97 except CalledProcessError:
98 return False
99 else:
100 return name in out
101
102
103def get_osds(service):
104 '''
105 Return a list of all Ceph Object Storage Daemons
106 currently in the cluster
107 '''
108 version = ceph_version()
109 if version and version >= '0.56':
110 return json.loads(check_output(['ceph', '--id', service,
111 'osd', 'ls', '--format=json']))
112 else:
113 return None
114
115
116def create_pool(service, name, replicas=3):
117 ''' Create a new RADOS pool '''
118 if pool_exists(service, name):
119 log("Ceph pool {} already exists, skipping creation".format(name),
120 level=WARNING)
121 return
122 # Calculate the number of placement groups based
123 # on upstream recommended best practices.
124 osds = get_osds(service)
125 if osds:
126 pgnum = (len(osds) * 100 / replicas)
127 else:
128 # NOTE(james-page): Default to 200 for older ceph versions
129 # which don't support OSD query from cli
130 pgnum = 200
131 cmd = [
132 'ceph', '--id', service,
133 'osd', 'pool', 'create',
134 name, str(pgnum)
135 ]
136 check_call(cmd)
137 cmd = [
138 'ceph', '--id', service,
139 'osd', 'pool', 'set', name,
140 'size', str(replicas)
141 ]
142 check_call(cmd)
143
144
145def delete_pool(service, name):
146 ''' Delete a RADOS pool from ceph '''
147 cmd = [
148 'ceph', '--id', service,
149 'osd', 'pool', 'delete',
150 name, '--yes-i-really-really-mean-it'
151 ]
152 check_call(cmd)
153
154
155def _keyfile_path(service):
156 return KEYFILE.format(service)
157
158
159def _keyring_path(service):
160 return KEYRING.format(service)
161
162
163def create_keyring(service, key):
164 ''' Create a new Ceph keyring containing key'''
165 keyring = _keyring_path(service)
166 if os.path.exists(keyring):
167 log('ceph: Keyring exists at %s.' % keyring, level=WARNING)
168 return
169 cmd = [
170 'ceph-authtool',
171 keyring,
172 '--create-keyring',
173 '--name=client.{}'.format(service),
174 '--add-key={}'.format(key)
175 ]
176 check_call(cmd)
177 log('ceph: Created new ring at %s.' % keyring, level=INFO)
178
179
180def create_key_file(service, key):
181 ''' Create a file containing key '''
182 keyfile = _keyfile_path(service)
183 if os.path.exists(keyfile):
184 log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING)
185 return
186 with open(keyfile, 'w') as fd:
187 fd.write(key)
188 log('ceph: Created new keyfile at %s.' % keyfile, level=INFO)
189
190
191def get_ceph_nodes():
192 ''' Query named relation 'ceph' to detemine current nodes '''
193 hosts = []
194 for r_id in relation_ids('ceph'):
195 for unit in related_units(r_id):
196 hosts.append(relation_get('private-address', unit=unit, rid=r_id))
197 return hosts
198
199
200def configure(service, key, auth, use_syslog):
201 ''' Perform basic configuration of Ceph '''
202 create_keyring(service, key)
203 create_key_file(service, key)
204 hosts = get_ceph_nodes()
205 with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
206 ceph_conf.write(CEPH_CONF.format(auth=auth,
207 keyring=_keyring_path(service),
208 mon_hosts=",".join(map(str, hosts)),
209 use_syslog=use_syslog))
210 modprobe('rbd')
211
212
213def image_mapped(name):
214 ''' Determine whether a RADOS block device is mapped locally '''
215 try:
216 out = check_output(['rbd', 'showmapped'])
217 except CalledProcessError:
218 return False
219 else:
220 return name in out
221
222
223def map_block_storage(service, pool, image):
224 ''' Map a RADOS block device for local use '''
225 cmd = [
226 'rbd',
227 'map',
228 '{}/{}'.format(pool, image),
229 '--user',
230 service,
231 '--secret',
232 _keyfile_path(service),
233 ]
234 check_call(cmd)
235
236
237def filesystem_mounted(fs):
238 ''' Determine whether a filesytems is already mounted '''
239 return fs in [f for f, m in mounts()]
240
241
242def make_filesystem(blk_device, fstype='ext4', timeout=10):
243 ''' Make a new filesystem on the specified block device '''
244 count = 0
245 e_noent = os.errno.ENOENT
246 while not os.path.exists(blk_device):
247 if count >= timeout:
248 log('ceph: gave up waiting on block device %s' % blk_device,
249 level=ERROR)
250 raise IOError(e_noent, os.strerror(e_noent), blk_device)
251 log('ceph: waiting for block device %s to appear' % blk_device,
252 level=INFO)
253 count += 1
254 time.sleep(1)
255 else:
256 log('ceph: Formatting block device %s as filesystem %s.' %
257 (blk_device, fstype), level=INFO)
258 check_call(['mkfs', '-t', fstype, blk_device])
259
260
261def place_data_on_block_device(blk_device, data_src_dst):
262 ''' Migrate data in data_src_dst to blk_device and then remount '''
263 # mount block device into /mnt
264 mount(blk_device, '/mnt')
265 # copy data to /mnt
266 copy_files(data_src_dst, '/mnt')
267 # umount block device
268 umount('/mnt')
269 # Grab user/group ID's from original source
270 _dir = os.stat(data_src_dst)
271 uid = _dir.st_uid
272 gid = _dir.st_gid
273 # re-mount where the data should originally be
274 # TODO: persist is currently a NO-OP in core.host
275 mount(blk_device, data_src_dst, persist=True)
276 # ensure original ownership of new mount.
277 os.chown(data_src_dst, uid, gid)
278
279
280# TODO: re-use
281def modprobe(module):
282 ''' Load a kernel module and configure for auto-load on reboot '''
283 log('ceph: Loading kernel module', level=INFO)
284 cmd = ['modprobe', module]
285 check_call(cmd)
286 with open('/etc/modules', 'r+') as modules:
287 if module not in modules.read():
288 modules.write(module)
289
290
291def copy_files(src, dst, symlinks=False, ignore=None):
292 ''' Copy files from src to dst '''
293 for item in os.listdir(src):
294 s = os.path.join(src, item)
295 d = os.path.join(dst, item)
296 if os.path.isdir(s):
297 shutil.copytree(s, d, symlinks, ignore)
298 else:
299 shutil.copy2(s, d)
300
301
302def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
303 blk_device, fstype, system_services=[],
304 replicas=3):
305 """
306 NOTE: This function must only be called from a single service unit for
307 the same rbd_img otherwise data loss will occur.
308
309 Ensures given pool and RBD image exists, is mapped to a block device,
310 and the device is formatted and mounted at the given mount_point.
311
312 If formatting a device for the first time, data existing at mount_point
313 will be migrated to the RBD device before being re-mounted.
314
315 All services listed in system_services will be stopped prior to data
316 migration and restarted when complete.
317 """
318 # Ensure pool, RBD image, RBD mappings are in place.
319 if not pool_exists(service, pool):
320 log('ceph: Creating new pool {}.'.format(pool))
321 create_pool(service, pool, replicas=replicas)
322
323 if not rbd_exists(service, pool, rbd_img):
324 log('ceph: Creating RBD image ({}).'.format(rbd_img))
325 create_rbd_image(service, pool, rbd_img, sizemb)
326
327 if not image_mapped(rbd_img):
328 log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img))
329 map_block_storage(service, pool, rbd_img)
330
331 # make file system
332 # TODO: What happens if for whatever reason this is run again and
333 # the data is already in the rbd device and/or is mounted??
334 # When it is mounted already, it will fail to make the fs
335 # XXX: This is really sketchy! Need to at least add an fstab entry
336 # otherwise this hook will blow away existing data if its executed
337 # after a reboot.
338 if not filesystem_mounted(mount_point):
339 make_filesystem(blk_device, fstype)
340
341 for svc in system_services:
342 if service_running(svc):
343 log('ceph: Stopping services {} prior to migrating data.'
344 .format(svc))
345 service_stop(svc)
346
347 place_data_on_block_device(blk_device, mount_point)
348
349 for svc in system_services:
350 log('ceph: Starting service {} after migrating data.'
351 .format(svc))
352 service_start(svc)
353
354
355def ensure_ceph_keyring(service, user=None, group=None):
356 '''
357 Ensures a ceph keyring is created for a named service
358 and optionally ensures user and group ownership.
359
360 Returns False if no ceph key is available in relation state.
361 '''
362 key = None
363 for rid in relation_ids('ceph'):
364 for unit in related_units(rid):
365 key = relation_get('key', rid=rid, unit=unit)
366 if key:
367 break
368 if not key:
369 return False
370 create_keyring(service=service, key=key)
371 keyring = _keyring_path(service)
372 if user and group:
373 check_call(['chown', '%s.%s' % (user, group), keyring])
374 return True
375
376
377def ceph_version():
378 ''' Retrieve the local version of ceph '''
379 if os.path.exists('/usr/bin/ceph'):
380 cmd = ['ceph', '-v']
381 output = check_output(cmd)
382 output = output.split()
383 if len(output) > 3:
384 return output[2]
385 else:
386 return None
387 else:
388 return None
0389
=== added file 'hooks/charmhelpers/contrib/storage/linux/loopback.py'
--- hooks/charmhelpers/contrib/storage/linux/loopback.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/loopback.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,62 @@
1
2import os
3import re
4
5from subprocess import (
6 check_call,
7 check_output,
8)
9
10
11##################################################
12# loopback device helpers.
13##################################################
14def loopback_devices():
15 '''
16 Parse through 'losetup -a' output to determine currently mapped
17 loopback devices. Output is expected to look like:
18
19 /dev/loop0: [0807]:961814 (/tmp/my.img)
20
21 :returns: dict: a dict mapping {loopback_dev: backing_file}
22 '''
23 loopbacks = {}
24 cmd = ['losetup', '-a']
25 devs = [d.strip().split(' ') for d in
26 check_output(cmd).splitlines() if d != '']
27 for dev, _, f in devs:
28 loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
29 return loopbacks
30
31
32def create_loopback(file_path):
33 '''
34 Create a loopback device for a given backing file.
35
36 :returns: str: Full path to new loopback device (eg, /dev/loop0)
37 '''
38 file_path = os.path.abspath(file_path)
39 check_call(['losetup', '--find', file_path])
40 for d, f in loopback_devices().iteritems():
41 if f == file_path:
42 return d
43
44
45def ensure_loopback_device(path, size):
46 '''
47 Ensure a loopback device exists for a given backing file path and size.
48 If it a loopback device is not mapped to file, a new one will be created.
49
50 TODO: Confirm size of found loopback device.
51
52 :returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
53 '''
54 for d, f in loopback_devices().iteritems():
55 if f == path:
56 return d
57
58 if not os.path.exists(path):
59 cmd = ['truncate', '--size', size, path]
60 check_call(cmd)
61
62 return create_loopback(path)
063
=== added file 'hooks/charmhelpers/contrib/storage/linux/lvm.py'
--- hooks/charmhelpers/contrib/storage/linux/lvm.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/lvm.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,88 @@
1from subprocess import (
2 CalledProcessError,
3 check_call,
4 check_output,
5 Popen,
6 PIPE,
7)
8
9
10##################################################
11# LVM helpers.
12##################################################
13def deactivate_lvm_volume_group(block_device):
14 '''
15 Deactivate any volume gruop associated with an LVM physical volume.
16
17 :param block_device: str: Full path to LVM physical volume
18 '''
19 vg = list_lvm_volume_group(block_device)
20 if vg:
21 cmd = ['vgchange', '-an', vg]
22 check_call(cmd)
23
24
25def is_lvm_physical_volume(block_device):
26 '''
27 Determine whether a block device is initialized as an LVM PV.
28
29 :param block_device: str: Full path of block device to inspect.
30
31 :returns: boolean: True if block device is a PV, False if not.
32 '''
33 try:
34 check_output(['pvdisplay', block_device])
35 return True
36 except CalledProcessError:
37 return False
38
39
40def remove_lvm_physical_volume(block_device):
41 '''
42 Remove LVM PV signatures from a given block device.
43
44 :param block_device: str: Full path of block device to scrub.
45 '''
46 p = Popen(['pvremove', '-ff', block_device],
47 stdin=PIPE)
48 p.communicate(input='y\n')
49
50
51def list_lvm_volume_group(block_device):
52 '''
53 List LVM volume group associated with a given block device.
54
55 Assumes block device is a valid LVM PV.
56
57 :param block_device: str: Full path of block device to inspect.
58
59 :returns: str: Name of volume group associated with block device or None
60 '''
61 vg = None
62 pvd = check_output(['pvdisplay', block_device]).splitlines()
63 for l in pvd:
64 if l.strip().startswith('VG Name'):
65 vg = ' '.join(l.strip().split()[2:])
66 return vg
67
68
69def create_lvm_physical_volume(block_device):
70 '''
71 Initialize a block device as an LVM physical volume.
72
73 :param block_device: str: Full path of block device to initialize.
74
75 '''
76 check_call(['pvcreate', block_device])
77
78
79def create_lvm_volume_group(volume_group, block_device):
80 '''
81 Create an LVM volume group backed by a given block device.
82
83 Assumes block device has already been initialized as an LVM PV.
84
85 :param volume_group: str: Name of volume group to create.
86 :block_device: str: Full path of PV-initialized block device.
87 '''
88 check_call(['vgcreate', volume_group, block_device])
089
=== added file 'hooks/charmhelpers/contrib/storage/linux/utils.py'
--- hooks/charmhelpers/contrib/storage/linux/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/storage/linux/utils.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,53 @@
1import os
2import re
3from stat import S_ISBLK
4
5from subprocess import (
6 check_call,
7 check_output,
8 call
9)
10
11
12def is_block_device(path):
13 '''
14 Confirm device at path is a valid block device node.
15
16 :returns: boolean: True if path is a block device, False if not.
17 '''
18 if not os.path.exists(path):
19 return False
20 return S_ISBLK(os.stat(path).st_mode)
21
22
23def zap_disk(block_device):
24 '''
25 Clear a block device of partition table. Relies on sgdisk, which is
26 installed as pat of the 'gdisk' package in Ubuntu.
27
28 :param block_device: str: Full path of block device to clean.
29 '''
30 # sometimes sgdisk exits non-zero; this is OK, dd will clean up
31 call(['sgdisk', '--zap-all', '--mbrtogpt',
32 '--clear', block_device])
33 dev_end = check_output(['blockdev', '--getsz', block_device])
34 gpt_end = int(dev_end.split()[0]) - 100
35 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
36 'bs=1M', 'count=1'])
37 check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
38 'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
39
40
41def is_device_mounted(device):
42 '''Given a device path, return True if that device is mounted, and False
43 if it isn't.
44
45 :param device: str: Full path of the device to check.
46 :returns: boolean: True if the path represents a mounted device, False if
47 it doesn't.
48 '''
49 is_partition = bool(re.search(r".*[0-9]+\b", device))
50 out = check_output(['mount'])
51 if is_partition:
52 return bool(re.search(device + r"\b", out))
53 return bool(re.search(device + r"[0-9]+\b", out))
054
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-09-02 11:17:14 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-10-17 13:06:36 +0000
@@ -203,6 +203,17 @@
203 if os.path.exists(self.path):203 if os.path.exists(self.path):
204 self.load_previous()204 self.load_previous()
205205
206 def __getitem__(self, key):
207 """For regular dict lookups, check the current juju config first,
208 then the previous (saved) copy. This ensures that user-saved values
209 will be returned by a dict lookup.
210
211 """
212 try:
213 return dict.__getitem__(self, key)
214 except KeyError:
215 return (self._prev_dict or {})[key]
216
206 def load_previous(self, path=None):217 def load_previous(self, path=None):
207 """Load previous copy of config from disk.218 """Load previous copy of config from disk.
208219
@@ -475,9 +486,10 @@
475 hooks.execute(sys.argv)486 hooks.execute(sys.argv)
476 """487 """
477488
478 def __init__(self):489 def __init__(self, config_save=True):
479 super(Hooks, self).__init__()490 super(Hooks, self).__init__()
480 self._hooks = {}491 self._hooks = {}
492 self._config_save = config_save
481493
482 def register(self, name, function):494 def register(self, name, function):
483 """Register a hook"""495 """Register a hook"""
@@ -488,9 +500,10 @@
488 hook_name = os.path.basename(args[0])500 hook_name = os.path.basename(args[0])
489 if hook_name in self._hooks:501 if hook_name in self._hooks:
490 self._hooks[hook_name]()502 self._hooks[hook_name]()
491 cfg = config()503 if self._config_save:
492 if cfg.implicit_save:504 cfg = config()
493 cfg.save()505 if cfg.implicit_save:
506 cfg.save()
494 else:507 else:
495 raise UnregisteredHookError(hook_name)508 raise UnregisteredHookError(hook_name)
496509
497510
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-09-02 11:17:14 +0000
+++ hooks/charmhelpers/core/host.py 2014-10-17 13:06:36 +0000
@@ -6,6 +6,7 @@
6# Matthew Wedgwood <matthew.wedgwood@canonical.com>6# Matthew Wedgwood <matthew.wedgwood@canonical.com>
77
8import os8import os
9import re
9import pwd10import pwd
10import grp11import grp
11import random12import random
@@ -68,8 +69,8 @@
68 """Determine whether a system service is available"""69 """Determine whether a system service is available"""
69 try:70 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)71 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
71 except subprocess.CalledProcessError:72 except subprocess.CalledProcessError as e:
72 return False73 return 'unrecognized service' not in e.output
73 else:74 else:
74 return True75 return True
7576
@@ -209,10 +210,15 @@
209 return system_mounts210 return system_mounts
210211
211212
212def file_hash(path):213def file_hash(path, hash_type='md5'):
213 """Generate a md5 hash of the contents of 'path' or None if not found """214 """
215 Generate a hash checksum of the contents of 'path' or None if not found.
216
217 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
218 such as md5, sha1, sha256, sha512, etc.
219 """
214 if os.path.exists(path):220 if os.path.exists(path):
215 h = hashlib.md5()221 h = getattr(hashlib, hash_type)()
216 with open(path, 'r') as source:222 with open(path, 'r') as source:
217 h.update(source.read()) # IGNORE:E1101 - it does have update223 h.update(source.read()) # IGNORE:E1101 - it does have update
218 return h.hexdigest()224 return h.hexdigest()
@@ -220,6 +226,26 @@
220 return None226 return None
221227
222228
229def check_hash(path, checksum, hash_type='md5'):
230 """
231 Validate a file using a cryptographic checksum.
232
233 :param str checksum: Value of the checksum used to validate the file.
234 :param str hash_type: Hash algorithm used to generate `checksum`.
235 Can be any hash alrgorithm supported by :mod:`hashlib`,
236 such as md5, sha1, sha256, sha512, etc.
237 :raises ChecksumError: If the file fails the checksum
238
239 """
240 actual_checksum = file_hash(path, hash_type)
241 if checksum != actual_checksum:
242 raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
243
244
245class ChecksumError(ValueError):
246 pass
247
248
223def restart_on_change(restart_map, stopstart=False):249def restart_on_change(restart_map, stopstart=False):
224 """Restart services based on configuration files changing250 """Restart services based on configuration files changing
225251
@@ -292,7 +318,13 @@
292 ip_output = (line for line in ip_output if line)318 ip_output = (line for line in ip_output if line)
293 for line in ip_output:319 for line in ip_output:
294 if line.split()[1].startswith(int_type):320 if line.split()[1].startswith(int_type):
295 interfaces.append(line.split()[1].replace(":", ""))321 matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line)
322 if matched:
323 interface = matched.groups()[0]
324 else:
325 interface = line.split()[1].replace(":", "")
326 interfaces.append(interface)
327
296 return interfaces328 return interfaces
297329
298330
299331
=== modified file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 2014-09-02 11:17:14 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2014-10-17 13:06:36 +0000
@@ -1,3 +1,5 @@
1import os
2import yaml
1from charmhelpers.core import hookenv3from charmhelpers.core import hookenv
2from charmhelpers.core import templating4from charmhelpers.core import templating
35
@@ -19,15 +21,21 @@
19 the `name` attribute that are complete will used to populate the dictionary21 the `name` attribute that are complete will used to populate the dictionary
20 values (see `get_data`, below).22 values (see `get_data`, below).
2123
22 The generated context will be namespaced under the interface type, to prevent24 The generated context will be namespaced under the relation :attr:`name`,
23 potential naming conflicts.25 to prevent potential naming conflicts.
26
27 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
28 :param list additional_required_keys: Extend the list of :attr:`required_keys`
24 """29 """
25 name = None30 name = None
26 interface = None31 interface = None
27 required_keys = []32 required_keys = []
2833
29 def __init__(self, *args, **kwargs):34 def __init__(self, name=None, additional_required_keys=None):
30 super(RelationContext, self).__init__(*args, **kwargs)35 if name is not None:
36 self.name = name
37 if additional_required_keys is not None:
38 self.required_keys.extend(additional_required_keys)
31 self.get_data()39 self.get_data()
3240
33 def __bool__(self):41 def __bool__(self):
@@ -101,9 +109,115 @@
101 return {}109 return {}
102110
103111
112class MysqlRelation(RelationContext):
113 """
114 Relation context for the `mysql` interface.
115
116 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
117 :param list additional_required_keys: Extend the list of :attr:`required_keys`
118 """
119 name = 'db'
120 interface = 'mysql'
121 required_keys = ['host', 'user', 'password', 'database']
122
123
124class HttpRelation(RelationContext):
125 """
126 Relation context for the `http` interface.
127
128 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
129 :param list additional_required_keys: Extend the list of :attr:`required_keys`
130 """
131 name = 'website'
132 interface = 'http'
133 required_keys = ['host', 'port']
134
135 def provide_data(self):
136 return {
137 'host': hookenv.unit_get('private-address'),
138 'port': 80,
139 }
140
141
142class RequiredConfig(dict):
143 """
144 Data context that loads config options with one or more mandatory options.
145
146 Once the required options have been changed from their default values, all
147 config options will be available, namespaced under `config` to prevent
148 potential naming conflicts (for example, between a config option and a
149 relation property).
150
151 :param list *args: List of options that must be changed from their default values.
152 """
153
154 def __init__(self, *args):
155 self.required_options = args
156 self['config'] = hookenv.config()
157 with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
158 self.config = yaml.load(fp).get('options', {})
159
160 def __bool__(self):
161 for option in self.required_options:
162 if option not in self['config']:
163 return False
164 current_value = self['config'][option]
165 default_value = self.config[option].get('default')
166 if current_value == default_value:
167 return False
168 if current_value in (None, '') and default_value in (None, ''):
169 return False
170 return True
171
172 def __nonzero__(self):
173 return self.__bool__()
174
175
176class StoredContext(dict):
177 """
178 A data context that always returns the data that it was first created with.
179
180 This is useful to do a one-time generation of things like passwords, that
181 will thereafter use the same value that was originally generated, instead
182 of generating a new value each time it is run.
183 """
184 def __init__(self, file_name, config_data):
185 """
186 If the file exists, populate `self` with the data from the file.
187 Otherwise, populate with the given data and persist it to the file.
188 """
189 if os.path.exists(file_name):
190 self.update(self.read_context(file_name))
191 else:
192 self.store_context(file_name, config_data)
193 self.update(config_data)
194
195 def store_context(self, file_name, config_data):
196 if not os.path.isabs(file_name):
197 file_name = os.path.join(hookenv.charm_dir(), file_name)
198 with open(file_name, 'w') as file_stream:
199 os.fchmod(file_stream.fileno(), 0600)
200 yaml.dump(config_data, file_stream)
201
202 def read_context(self, file_name):
203 if not os.path.isabs(file_name):
204 file_name = os.path.join(hookenv.charm_dir(), file_name)
205 with open(file_name, 'r') as file_stream:
206 data = yaml.load(file_stream)
207 if not data:
208 raise OSError("%s is empty" % file_name)
209 return data
210
211
104class TemplateCallback(ManagerCallback):212class TemplateCallback(ManagerCallback):
105 """213 """
106 Callback class that will render a template, for use as a ready action.214 Callback class that will render a Jinja2 template, for use as a ready action.
215
216 :param str source: The template source file, relative to `$CHARM_DIR/templates`
217 :param str target: The target to write the rendered template to
218 :param str owner: The owner of the rendered file
219 :param str group: The group of the rendered file
220 :param int perms: The permissions of the rendered file
107 """221 """
108 def __init__(self, source, target, owner='root', group='root', perms=0444):222 def __init__(self, source, target, owner='root', group='root', perms=0444):
109 self.source = source223 self.source = source
110224
=== added file 'hooks/charmhelpers/core/sysctl.py'
--- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/sysctl.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,34 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import yaml
7
8from subprocess import check_call
9
10from charmhelpers.core.hookenv import (
11 log,
12 DEBUG,
13)
14
15
16def create(sysctl_dict, sysctl_file):
17 """Creates a sysctl.conf file from a YAML associative array
18
19 :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 }
20 :type sysctl_dict: dict
21 :param sysctl_file: path to the sysctl file to be saved
22 :type sysctl_file: str or unicode
23 :returns: None
24 """
25 sysctl_dict = yaml.load(sysctl_dict)
26
27 with open(sysctl_file, "w") as fd:
28 for key, value in sysctl_dict.items():
29 fd.write("{}={}\n".format(key, value))
30
31 log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict),
32 level=DEBUG)
33
34 check_call(["sysctl", "-p", sysctl_file])
035
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-09-02 11:17:14 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-10-17 13:06:36 +0000
@@ -208,7 +208,8 @@
208 """Add a package source to this system.208 """Add a package source to this system.
209209
210 @param source: a URL or sources.list entry, as supported by210 @param source: a URL or sources.list entry, as supported by
211 add-apt-repository(1). Examples:211 add-apt-repository(1). Examples::
212
212 ppa:charmers/example213 ppa:charmers/example
213 deb https://stub:key@private.example.com/ubuntu trusty main214 deb https://stub:key@private.example.com/ubuntu trusty main
214215
@@ -311,22 +312,35 @@
311 apt_update(fatal=True)312 apt_update(fatal=True)
312313
313314
314def install_remote(source):315def install_remote(source, *args, **kwargs):
315 """316 """
316 Install a file tree from a remote source317 Install a file tree from a remote source
317318
318 The specified source should be a url of the form:319 The specified source should be a url of the form:
319 scheme://[host]/path[#[option=value][&...]]320 scheme://[host]/path[#[option=value][&...]]
320321
321 Schemes supported are based on this modules submodules322 Schemes supported are based on this modules submodules.
322 Options supported are submodule-specific"""323 Options supported are submodule-specific.
324 Additional arguments are passed through to the submodule.
325
326 For example::
327
328 dest = install_remote('http://example.com/archive.tgz',
329 checksum='deadbeef',
330 hash_type='sha1')
331
332 This will download `archive.tgz`, validate it using SHA1 and, if
333 the file is ok, extract it and return the directory in which it
334 was extracted. If the checksum fails, it will raise
335 :class:`charmhelpers.core.host.ChecksumError`.
336 """
323 # We ONLY check for True here because can_handle may return a string337 # We ONLY check for True here because can_handle may return a string
324 # explaining why it can't handle a given source.338 # explaining why it can't handle a given source.
325 handlers = [h for h in plugins() if h.can_handle(source) is True]339 handlers = [h for h in plugins() if h.can_handle(source) is True]
326 installed_to = None340 installed_to = None
327 for handler in handlers:341 for handler in handlers:
328 try:342 try:
329 installed_to = handler.install(source)343 installed_to = handler.install(source, *args, **kwargs)
330 except UnhandledSource:344 except UnhandledSource:
331 pass345 pass
332 if not installed_to:346 if not installed_to:
333347
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2014-09-02 11:17:14 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-17 13:06:36 +0000
@@ -1,6 +1,8 @@
1import os1import os
2import urllib22import urllib2
3from urllib import urlretrieve
3import urlparse4import urlparse
5import hashlib
46
5from charmhelpers.fetch import (7from charmhelpers.fetch import (
6 BaseFetchHandler,8 BaseFetchHandler,
@@ -10,11 +12,19 @@
10 get_archive_handler,12 get_archive_handler,
11 extract,13 extract,
12)14)
13from charmhelpers.core.host import mkdir15from charmhelpers.core.host import mkdir, check_hash
1416
1517
16class ArchiveUrlFetchHandler(BaseFetchHandler):18class ArchiveUrlFetchHandler(BaseFetchHandler):
17 """Handler for archives via generic URLs"""19 """
20 Handler to download archive files from arbitrary URLs.
21
22 Can fetch from http, https, ftp, and file URLs.
23
24 Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
25
26 Installs the contents of the archive in $CHARM_DIR/fetched/.
27 """
18 def can_handle(self, source):28 def can_handle(self, source):
19 url_parts = self.parse_url(source)29 url_parts = self.parse_url(source)
20 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):30 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@@ -24,6 +34,12 @@
24 return False34 return False
2535
26 def download(self, source, dest):36 def download(self, source, dest):
37 """
38 Download an archive file.
39
40 :param str source: URL pointing to an archive file.
41 :param str dest: Local path location to download archive file to.
42 """
27 # propogate all exceptions43 # propogate all exceptions
28 # URLError, OSError, etc44 # URLError, OSError, etc
29 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)45 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
@@ -48,7 +64,30 @@
48 os.unlink(dest)64 os.unlink(dest)
49 raise e65 raise e
5066
51 def install(self, source):67 # Mandatory file validation via Sha1 or MD5 hashing.
68 def download_and_validate(self, url, hashsum, validate="sha1"):
69 tempfile, headers = urlretrieve(url)
70 check_hash(tempfile, hashsum, validate)
71 return tempfile
72
73 def install(self, source, dest=None, checksum=None, hash_type='sha1'):
74 """
75 Download and install an archive file, with optional checksum validation.
76
77 The checksum can also be given on the `source` URL's fragment.
78 For example::
79
80 handler.install('http://example.com/file.tgz#sha1=deadbeef')
81
82 :param str source: URL pointing to an archive file.
83 :param str dest: Local destination path to install to. If not given,
84 installs to `$CHARM_DIR/archives/archive_file_name`.
85 :param str checksum: If given, validate the archive file after download.
86 :param str hash_type: Algorithm used to generate `checksum`.
87 Can be any hash alrgorithm supported by :mod:`hashlib`,
88 such as md5, sha1, sha256, sha512, etc.
89
90 """
52 url_parts = self.parse_url(source)91 url_parts = self.parse_url(source)
53 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')92 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
54 if not os.path.exists(dest_dir):93 if not os.path.exists(dest_dir):
@@ -60,4 +99,10 @@
60 raise UnhandledSource(e.reason)99 raise UnhandledSource(e.reason)
61 except OSError as e:100 except OSError as e:
62 raise UnhandledSource(e.strerror)101 raise UnhandledSource(e.strerror)
63 return extract(dld_file)102 options = urlparse.parse_qs(url_parts.fragment)
103 for key, value in options.items():
104 if key in hashlib.algorithms:
105 check_hash(dld_file, value, key)
106 if checksum:
107 check_hash(dld_file, checksum, hash_type)
108 return extract(dld_file, dest)
64109
=== added file 'hooks/zeromq_context.py'
--- hooks/zeromq_context.py 1970-01-01 00:00:00 +0000
+++ hooks/zeromq_context.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,41 @@
1import json
2from charmhelpers.core.hookenv import (
3 relation_ids,
4 related_units,
5 relation_get,
6 unit_private_ip,
7)
8from charmhelpers.contrib.openstack import context
9import socket
10
11class MatchmakerContext(context.OSContextGenerator):
12
13 def __call__(self):
14 topics = {}
15 for rid in relation_ids('topology'):
16 for unit in related_units(rid):
17 topic_info = relation_get(unit=unit, rid=rid)
18 if 'topics' in topic_info and 'host' in topic_info:
19 for topic in topic_info['topics'].split():
20 if topic in topics:
21 topics[topic].append(topic_info['host'])
22 else:
23 topics[topic] = [topic_info['host']]
24 for rid in relation_ids('zeromq-configuration'):
25 for unit in related_units(rid):
26 topic_info = relation_get(unit=unit, rid=rid)
27 topic_info['host'] = socket.gethostname()
28 if 'topics' in topic_info:
29 for topic in topic_info['topics'].split():
30 if topic in topics:
31 topics[topic].append(topic_info['host'])
32 else:
33 topics[topic] = [topic_info['host']]
34 return {'topology': json.dumps(topics, indent=4)}
35
36
37class OsloZMQContext(context.OSContextGenerator):
38
39 def __call__(self):
40
41 return {'zmq_host': socket.gethostname()}
042
=== modified file 'hooks/zeromq_hooks.py'
--- hooks/zeromq_hooks.py 2014-10-15 11:29:28 +0000
+++ hooks/zeromq_hooks.py 2014-10-17 13:06:36 +0000
@@ -1,13 +1,10 @@
1#!/usr/bin/python1#!/usr/bin/python
22
3import json3import socket
4import shutil
5import sys4import sys
6import uuid5import uuid
7import socket
86
9from charmhelpers.fetch import add_source7from charmhelpers.fetch import add_source
10from charmhelpers.core.templating import render
11from charmhelpers.fetch import apt_install, apt_update8from charmhelpers.fetch import apt_install, apt_update
12from charmhelpers.core.host import (9from charmhelpers.core.host import (
13 adduser,10 adduser,
@@ -19,66 +16,37 @@
19from charmhelpers.core.hookenv import (16from charmhelpers.core.hookenv import (
20 Hooks,17 Hooks,
21 UnregisteredHookError,18 UnregisteredHookError,
22 charm_dir,
23 log,19 log,
24 relation_get,20 relation_get,
25 relation_ids,21 relation_ids,
26 relation_set,22 relation_set,
27 related_units,23)
24from zeromq_utils import (
25 determine_packages,
26 get_principle_topics,
27 register_configs,
28 restart_map,
29 write_oslo_upstart,
28)30)
2931
30hooks = Hooks()32hooks = Hooks()
33CONFIGS = register_configs()
3134
3235
33@hooks.hook('install')36@hooks.hook('install')
34def install():37def install():
35 add_source('ppa:james-page/0mq')38 add_source('ppa:james-page/0mq')
36 apt_update()39 apt_update()
37 apt_install(['python-zmq', 'python-oslo.messaging'], fatal=True)40 apt_install(determine_packages(), fatal=True)
38 adduser('oslo', password='oslo', system_user=True)41 adduser('oslo', password='oslo', system_user=True)
39 mkdir('/etc/oslo/', owner='oslo', group='oslo', perms=0755)42 mkdir('/etc/oslo/', owner='oslo', group='oslo', perms=0755)
4043
4144
42def write_mapping():
43 topics = {}
44 topology_file = '/etc/oslo/matchmaker_ring.json'
45 for rid in relation_ids('topology'):
46 for unit in related_units(rid):
47 topic_info = relation_get(unit=unit, rid=rid)
48 if 'topics' in topic_info and 'host' in topic_info:
49 for topic in topic_info['topics'].split():
50 if topic in topics:
51 topics[topic].append(topic_info['host'])
52 else:
53 topics[topic] = [topic_info['host']]
54 for rid in relation_ids('zeromq-configuration'):
55 for unit in related_units(rid):
56 topic_info = relation_get(unit=unit, rid=rid)
57 topic_info['host'] = socket.gethostname()
58 if 'topics' in topic_info:
59 for topic in topic_info['topics'].split():
60 if topic in topics:
61 topics[topic].append(topic_info['host'])
62 else:
63 topics[topic] = [topic_info['host']]
64 with open(topology_file, 'w') as outfile:
65 json.dump(topics, outfile, indent=4)
66 oslo_msg_file = 'oslo-messaging.conf'
67
68 ctxt = {
69 'zmq_host': socket.gethostname(),
70 }
71 render(oslo_msg_file, '/etc/oslo/oslo-messaging.conf', ctxt)
72
73@hooks.hook('config-changed')45@hooks.hook('config-changed')
74@restart_on_change({46@restart_on_change(restart_map(), stopstart=True)
75 '/etc/oslo/oslo-messaging.conf': ['oslo-messaging-zmq-receiver'],
76 '/etc/init/oslo-messaging-zmq-receiver.conf': ['oslo-messaging-zmq-receiver']
77})
78def config_changed():47def config_changed():
79 upstart_file = charm_dir() + '/files/' + 'oslo-messaging-zmq-receiver.conf'48 write_oslo_upstart()
80 shutil.copyfile(upstart_file, '/etc/init/oslo-messaging-zmq-receiver.conf')49 CONFIGS.write_all()
81 write_mapping()
82 for rid in relation_ids('zeromq-configuration'):50 for rid in relation_ids('zeromq-configuration'):
83 relation_set(relation_id=rid, host=socket.gethostname())51 relation_set(relation_id=rid, host=socket.gethostname())
84 configuration_relation_joined(rid=rid, remote_restart=True)52 configuration_relation_joined(rid=rid, remote_restart=True)
@@ -91,17 +59,9 @@
91 if remote_restart:59 if remote_restart:
92 relation_set(relation_id=rid, nonce=str(uuid.uuid4()))60 relation_set(relation_id=rid, nonce=str(uuid.uuid4()))
9361
94def get_principle_topics():
95 princile_topics = []
96 for rid in relation_ids('zeromq-configuration'):
97 for unit in related_units(rid):
98 topics = relation_get(attribute='topics', unit=unit, rid=rid)
99 if topics:
100 princile_topics += topics.split()
101 return princile_topics
102
10362
104@hooks.hook('zeromq-configuration-relation-changed')63@hooks.hook('zeromq-configuration-relation-changed')
64@restart_on_change(restart_map(), stopstart=True)
105def configuration_relation_changed():65def configuration_relation_changed():
106 rel_info = relation_get()66 rel_info = relation_get()
107 if 'users' in rel_info:67 if 'users' in rel_info:
@@ -112,13 +72,14 @@
112 topics = " ".join(get_principle_topics())72 topics = " ".join(get_principle_topics())
113 relation_set(relation_id=rid, topics=topics,73 relation_set(relation_id=rid, topics=topics,
114 host=socket.gethostname())74 host=socket.gethostname())
115 write_mapping()75 CONFIGS.write_all()
11676
11777
118@hooks.hook('topology-relation-changed',78@hooks.hook('topology-relation-changed',
119 'topology-relation-departed')79 'topology-relation-departed')
80@restart_on_change(restart_map(), stopstart=True)
120def topology_relation_changed():81def topology_relation_changed():
121 write_mapping()82 CONFIGS.write_all()
122 # NOTE: drop when auto-reload of config file is implemented83 # NOTE: drop when auto-reload of config file is implemented
123 for rid in relation_ids('zeromq-configuration'):84 for rid in relation_ids('zeromq-configuration'):
124 configuration_relation_joined(rid=rid, remote_restart=True)85 configuration_relation_joined(rid=rid, remote_restart=True)
12586
=== added file 'hooks/zeromq_utils.py'
--- hooks/zeromq_utils.py 1970-01-01 00:00:00 +0000
+++ hooks/zeromq_utils.py 2014-10-17 13:06:36 +0000
@@ -0,0 +1,78 @@
1from collections import OrderedDict
2from copy import deepcopy
3import zeromq_context
4from charmhelpers.contrib.openstack import templating
5import shutil
6from charmhelpers.core.host import (
7 service_running,
8 service_start,
9)
10from charmhelpers.core.hookenv import charm_dir
11from charmhelpers.core.hookenv import (
12 relation_get,
13 relation_ids,
14 related_units,
15)
16MATCHMAKER_CONF = "/etc/oslo/matchmaker_ring.json"
17OSLO_MSG_CONF = "/etc/oslo/oslo-messaging.conf"
18OSLO_UPSTART_CONF = "/etc/init/oslo-messaging-zmq-receiver.conf"
19TEMPLATES = 'templates/'
20
21BASE_RESOURCE_MAP = OrderedDict([
22 (MATCHMAKER_CONF, {
23 'services': ['oslo-messaging-zmq-receiver'],
24 'contexts': [zeromq_context.MatchmakerContext()],
25 }),
26 (OSLO_MSG_CONF, {
27 'services': ['oslo-messaging-zmq-receiver'],
28 'contexts': [zeromq_context.OsloZMQContext()],
29 }),
30])
31BASE_PACKAGES = [
32 'python-zmq',
33 'python-oslo.messaging',
34]
35
36
37def determine_packages():
38 return BASE_PACKAGES
39
40
41def resource_map():
42 '''
43 Dynamically generate a map of resources that will be managed for a single
44 hook execution.
45 '''
46 resource_map = deepcopy(BASE_RESOURCE_MAP)
47 return resource_map
48
49
50def register_configs(release=None):
51 configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
52 openstack_release='juno')
53 for cfg, rscs in resource_map().iteritems():
54 configs.register(cfg, rscs['contexts'])
55 return configs
56
57
58def restart_map():
59 return OrderedDict([(cfg, v['services'])
60 for cfg, v in resource_map().iteritems()
61 if v['services']])
62
63
64def write_oslo_upstart():
65 upstart_file = charm_dir() + '/files/oslo-messaging-zmq-receiver.conf'
66 shutil.copyfile(upstart_file, OSLO_UPSTART_CONF)
67 if not service_running('oslo-messaging-zmq-receiver'):
68 service_start('oslo-messaging-zmq-receiver')
69
70
71def get_principle_topics():
72 princile_topics = []
73 for rid in relation_ids('zeromq-configuration'):
74 for unit in related_units(rid):
75 topics = relation_get(attribute='topics', unit=unit, rid=rid)
76 if topics:
77 princile_topics += topics.split()
78 return princile_topics
079
=== added file 'templates/matchmaker_ring.json'
--- templates/matchmaker_ring.json 1970-01-01 00:00:00 +0000
+++ templates/matchmaker_ring.json 2014-10-17 13:06:36 +0000
@@ -0,0 +1,1 @@
1{{ topology }}
02
=== modified file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 2014-09-02 11:17:14 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-17 13:06:36 +0000
@@ -24,25 +24,31 @@
24 """Add services.24 """Add services.
2525
26 Add services to the deployment where this_service is the local charm26 Add services to the deployment where this_service is the local charm
27 that we're focused on testing and other_services are the other27 that we're testing and other_services are the other services that
28 charms that come from the charm store.28 are being used in the local amulet tests.
29 """29 """
30 name, units = range(2)30 if this_service['name'] != os.path.basename(os.getcwd()):
3131 s = this_service['name']
32 if this_service[name] != os.path.basename(os.getcwd()):
33 s = this_service[name]
34 msg = "The charm's root directory name needs to be {}".format(s)32 msg = "The charm's root directory name needs to be {}".format(s)
35 amulet.raise_status(amulet.FAIL, msg=msg)33 amulet.raise_status(amulet.FAIL, msg=msg)
3634
37 self.d.add(this_service[name], units=this_service[units])35 if 'units' not in this_service:
36 this_service['units'] = 1
37
38 self.d.add(this_service['name'], units=this_service['units'])
3839
39 for svc in other_services:40 for svc in other_services:
40 if self.series:41 if 'location' in svc:
41 self.d.add(svc[name],42 branch_location = svc['location']
42 charm='cs:{}/{}'.format(self.series, svc[name]),43 elif self.series:
43 units=svc[units])44 branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
44 else:45 else:
45 self.d.add(svc[name], units=svc[units])46 branch_location = None
47
48 if 'units' not in svc:
49 svc['units'] = 1
50
51 self.d.add(svc['name'], charm=branch_location, units=svc['units'])
4652
47 def _add_relations(self, relations):53 def _add_relations(self, relations):
48 """Add all of the relations for the services."""54 """Add all of the relations for the services."""
@@ -57,7 +63,7 @@
57 def _deploy(self):63 def _deploy(self):
58 """Deploy environment and wait for all hooks to finish executing."""64 """Deploy environment and wait for all hooks to finish executing."""
59 try:65 try:
60 self.d.setup()66 self.d.setup(timeout=900)
61 self.d.sentry.wait(timeout=900)67 self.d.sentry.wait(timeout=900)
62 except amulet.helpers.TimeoutError:68 except amulet.helpers.TimeoutError:
63 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")69 amulet.raise_status(amulet.FAIL, msg="Deployment timed out")

Subscribers

People subscribed via source and target branches