Merge lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name into lp:~openstack-charmers-archive/charms/trusty/swift-proxy/trunk

Proposed by JuanJo Ciarlante
Status: Superseded
Proposed branch: lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-proxy/trunk
Diff against target: 5652 lines (+4171/-249) (has conflicts)
52 files modified
.bzrignore (+2/-0)
Makefile (+24/-1)
charm-helpers-hooks.yaml (+13/-0)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+53/-9)
hooks/charmhelpers/contrib/hahelpers/apache.py (+10/-3)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17)
hooks/charmhelpers/contrib/network/ip.py (+343/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
hooks/charmhelpers/contrib/openstack/context.py (+237/-59)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+18/-4)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+9/-8)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+9/-8)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+45/-6)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+131/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+49/-21)
hooks/charmhelpers/core/host.py (+75/-11)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+313/-0)
hooks/charmhelpers/core/services/helpers.py (+239/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+115/-32)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/swift_context.py (+30/-11)
hooks/swift_hooks.py (+78/-23)
hooks/swift_utils.py (+26/-3)
revision (+1/-1)
templates/essex/proxy-server.conf (+2/-0)
templates/grizzly/proxy-server.conf (+2/-0)
templates/havana/proxy-server.conf (+2/-0)
templates/icehouse/proxy-server.conf (+4/-1)
templates/memcached.conf (+1/-1)
tests/00-setup (+11/-0)
tests/10-basic-precise-essex (+9/-0)
tests/11-basic-precise-folsom (+11/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+52/-0)
tests/basic_deployment.py (+827/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+72/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
unit_tests/test_templates.py (+2/-2)
Conflict adding file .bzrignore.  Moved existing file to .bzrignore.moved.
Text conflict in Makefile
Contents conflict in charm-helpers.yaml
Text conflict in hooks/charmhelpers/contrib/openstack/context.py
Text conflict in hooks/charmhelpers/contrib/openstack/utils.py
Conflict adding file hooks/charmhelpers/core/fstab.py.  Moved existing file to hooks/charmhelpers/core/fstab.py.moved.
Text conflict in hooks/charmhelpers/core/host.py
Text conflict in hooks/charmhelpers/fetch/__init__.py
To merge this branch: bzr merge lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+237290@code.launchpad.net

This proposal supersedes a proposal from 2014-10-06.

This proposal has been superseded by a proposal from 2014-10-06.

To post a comment you must log in.

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.bzrignore'
2--- .bzrignore 1970-01-01 00:00:00 +0000
3+++ .bzrignore 2014-10-06 15:38:41 +0000
4@@ -0,0 +1,2 @@
5+.coverage
6+bin
7
8=== renamed file '.bzrignore' => '.bzrignore.moved'
9=== modified file 'Makefile'
10--- Makefile 2014-08-13 15:57:07 +0000
11+++ Makefile 2014-10-06 15:38:41 +0000
12@@ -3,10 +3,15 @@
13
14 lint:
15 @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks
16- @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests
17+ @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests tests
18 @charm proof
19
20+unit_test:
21+ @echo Starting unit tests...
22+ @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
23+
24 test:
25+<<<<<<< TREE
26 @echo Starting tests...
27 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
28
29@@ -19,5 +24,23 @@
30 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
31
32 publish: lint test
33+=======
34+ @echo Starting Amulet tests...
35+ # coreycb note: The -v should only be temporary until Amulet sends
36+ # raise_status() messages to stderr:
37+ # https://bugs.launchpad.net/amulet/+bug/1320357
38+ @juju test -v -p AMULET_HTTP_PROXY
39+
40+bin/charm_helpers_sync.py:
41+ @mkdir -p bin
42+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
43+ > bin/charm_helpers_sync.py
44+
45+sync: bin/charm_helpers_sync.py
46+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
47+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
48+
49+publish: lint unit_test
50+>>>>>>> MERGE-SOURCE
51 bzr push lp:charms/swift-proxy
52 bzr push lp:charms/trusty/swift-proxy
53
54=== added file 'charm-helpers-hooks.yaml'
55--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
56+++ charm-helpers-hooks.yaml 2014-10-06 15:38:41 +0000
57@@ -0,0 +1,13 @@
58+branch: lp:charm-helpers
59+destination: hooks/charmhelpers
60+include:
61+ - core
62+ - fetch
63+ - contrib.openstack|inc=*
64+ - contrib.storage.linux
65+ - contrib.hahelpers:
66+ - apache
67+ - cluster
68+ - payload.execd
69+ - contrib.network.ip
70+ - contrib.peerstorage
71
72=== added file 'charm-helpers-tests.yaml'
73--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
74+++ charm-helpers-tests.yaml 2014-10-06 15:38:41 +0000
75@@ -0,0 +1,5 @@
76+branch: lp:charm-helpers
77+destination: tests/charmhelpers
78+include:
79+ - contrib.amulet
80+ - contrib.openstack.amulet
81
82=== renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS'
83=== modified file 'config.yaml'
84--- config.yaml 2013-04-15 19:41:45 +0000
85+++ config.yaml 2014-10-06 15:38:41 +0000
86@@ -106,6 +106,19 @@
87 default: true
88 type: boolean
89 description: Delay authentication to downstream WSGI services.
90+ node-timeout:
91+ default: 60
92+ type: int
93+ description: How long the proxy server will wait on responses from the a/c/o servers.
94+ recoverable-node-timeout:
95+ default: 30
96+ type: int
97+ description: |
98+ How long the proxy server will wait for an initial response and to read a
99+ chunk of data from the object servers while serving GET / HEAD requests.
100+ Timeouts from these requests can be recovered from so setting this to
101+ something lower than node-timeout would provide quicker error recovery
102+ while allowing for a longer timeout for non-recoverable requests (PUTs).
103 # Manual Keystone configuration.
104 keystone-auth-host:
105 type: string
106@@ -134,15 +147,11 @@
107 description: Hash to use across all swift-proxy servers - don't loose
108 vip:
109 type: string
110- description: "Virtual IP to use to front swift-proxy in ha configuration"
111- vip_iface:
112- type: string
113- default: eth0
114- description: "Network Interface where to place the Virtual IP"
115- vip_cidr:
116- type: int
117- default: 24
118- description: "Netmask that will be used for the Virtual IP"
119+ description: |
120+ Virtual IP(s) to use to front API services in HA configuration.
121+ .
122+ If multiple networks are being used, a VIP should be provided for each
123+ network, separated by spaces.
124 ha-bindiface:
125 type: string
126 default: eth0
127@@ -155,3 +164,38 @@
128 description: |
129 Default multicast port number that will be used to communicate between
130 HA Cluster nodes.
131+ # Network configuration options
132+ # by default all access is over 'private-address'
133+ os-admin-network:
134+ type: string
135+ description: |
136+ The IP address and netmask of the OpenStack Admin network (e.g.,
137+ 192.168.0.0/24)
138+ .
139+ This network will be used for admin endpoints.
140+ os-internal-network:
141+ type: string
142+ description: |
143+ The IP address and netmask of the OpenStack Internal network (e.g.,
144+ 192.168.0.0/24)
145+ .
146+ This network will be used for internal endpoints.
147+ os-public-network:
148+ type: string
149+ description: |
150+ The IP address and netmask of the OpenStack Public network (e.g.,
151+ 192.168.0.0/24)
152+ .
153+ This network will be used for public endpoints.
154+ prefer-ipv6:
155+ type: boolean
156+ default: False
157+ description: |
158+ If True enables IPv6 support. The charm will expect network interfaces
159+ to be configured with an IPv6 address. If set to False (default) IPv4
160+ is expected.
161+ .
162+ NOTE: these charms do not currently support IPv6 privacy extension. In
163+ order for this charm to function correctly, the privacy extension must be
164+ disabled and a non-temporary address must be configured/available on
165+ your network interface.
166
167=== modified file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
168--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-03-27 11:23:24 +0000
169+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-06 15:38:41 +0000
170@@ -20,20 +20,27 @@
171 )
172
173
174-def get_cert():
175+def get_cert(cn=None):
176+ # TODO: deal with multiple https endpoints via charm config
177 cert = config_get('ssl_cert')
178 key = config_get('ssl_key')
179 if not (cert and key):
180 log("Inspecting identity-service relations for SSL certificate.",
181 level=INFO)
182 cert = key = None
183+ if cn:
184+ ssl_cert_attr = 'ssl_cert_{}'.format(cn)
185+ ssl_key_attr = 'ssl_key_{}'.format(cn)
186+ else:
187+ ssl_cert_attr = 'ssl_cert'
188+ ssl_key_attr = 'ssl_key'
189 for r_id in relation_ids('identity-service'):
190 for unit in relation_list(r_id):
191 if not cert:
192- cert = relation_get('ssl_cert',
193+ cert = relation_get(ssl_cert_attr,
194 rid=r_id, unit=unit)
195 if not key:
196- key = relation_get('ssl_key',
197+ key = relation_get(ssl_key_attr,
198 rid=r_id, unit=unit)
199 return (cert, key)
200
201
202=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
203--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-27 11:23:24 +0000
204+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-06 15:38:41 +0000
205@@ -6,6 +6,11 @@
206 # Adam Gandelman <adamg@ubuntu.com>
207 #
208
209+"""
210+Helpers for clustering and determining "cluster leadership" and other
211+clustering-related helpers.
212+"""
213+
214 import subprocess
215 import os
216
217@@ -19,6 +24,7 @@
218 config as config_get,
219 INFO,
220 ERROR,
221+ WARNING,
222 unit_get,
223 )
224
225@@ -27,6 +33,29 @@
226 pass
227
228
229+def is_elected_leader(resource):
230+ """
231+ Returns True if the charm executing this is the elected cluster leader.
232+
233+ It relies on two mechanisms to determine leadership:
234+ 1. If the charm is part of a corosync cluster, call corosync to
235+ determine leadership.
236+ 2. If the charm is not part of a corosync cluster, the leader is
237+ determined as being "the alive unit with the lowest unit numer". In
238+ other words, the oldest surviving unit.
239+ """
240+ if is_clustered():
241+ if not is_crm_leader(resource):
242+ log('Deferring action to CRM leader.', level=INFO)
243+ return False
244+ else:
245+ peers = peer_units()
246+ if peers and not oldest_peer(peers):
247+ log('Deferring action to oldest service unit.', level=INFO)
248+ return False
249+ return True
250+
251+
252 def is_clustered():
253 for r_id in (relation_ids('ha') or []):
254 for unit in (relation_list(r_id) or []):
255@@ -38,7 +67,11 @@
256 return False
257
258
259-def is_leader(resource):
260+def is_crm_leader(resource):
261+ """
262+ Returns True if the charm calling this is the elected corosync leader,
263+ as returned by calling the external "crm" command.
264+ """
265 cmd = [
266 "crm", "resource",
267 "show", resource
268@@ -54,15 +87,31 @@
269 return False
270
271
272-def peer_units():
273+def is_leader(resource):
274+ log("is_leader is deprecated. Please consider using is_crm_leader "
275+ "instead.", level=WARNING)
276+ return is_crm_leader(resource)
277+
278+
279+def peer_units(peer_relation="cluster"):
280 peers = []
281- for r_id in (relation_ids('cluster') or []):
282+ for r_id in (relation_ids(peer_relation) or []):
283 for unit in (relation_list(r_id) or []):
284 peers.append(unit)
285 return peers
286
287
288+def peer_ips(peer_relation='cluster', addr_key='private-address'):
289+ '''Return a dict of peers and their private-address'''
290+ peers = {}
291+ for r_id in relation_ids(peer_relation):
292+ for unit in relation_list(r_id):
293+ peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
294+ return peers
295+
296+
297 def oldest_peer(peers):
298+ """Determines who the oldest peer is by comparing unit numbers."""
299 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
300 for peer in peers:
301 remote_unit_no = int(peer.split('/')[1])
302@@ -72,16 +121,9 @@
303
304
305 def eligible_leader(resource):
306- if is_clustered():
307- if not is_leader(resource):
308- log('Deferring action to CRM leader.', level=INFO)
309- return False
310- else:
311- peers = peer_units()
312- if peers and not oldest_peer(peers):
313- log('Deferring action to oldest service unit.', level=INFO)
314- return False
315- return True
316+ log("eligible_leader is deprecated. Please consider using "
317+ "is_elected_leader instead.", level=WARNING)
318+ return is_elected_leader(resource)
319
320
321 def https():
322@@ -97,10 +139,9 @@
323 return True
324 for r_id in relation_ids('identity-service'):
325 for unit in relation_list(r_id):
326+ # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
327 rel_state = [
328 relation_get('https_keystone', rid=r_id, unit=unit),
329- relation_get('ssl_cert', rid=r_id, unit=unit),
330- relation_get('ssl_key', rid=r_id, unit=unit),
331 relation_get('ca_cert', rid=r_id, unit=unit),
332 ]
333 # NOTE: works around (LP: #1203241)
334@@ -146,12 +187,12 @@
335 Obtains all relevant configuration from charm configuration required
336 for initiating a relation to hacluster:
337
338- ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
339+ ha-bindiface, ha-mcastport, vip
340
341 returns: dict: A dict containing settings keyed by setting name.
342 raises: HAIncompleteConfig if settings are missing.
343 '''
344- settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
345+ settings = ['ha-bindiface', 'ha-mcastport', 'vip']
346 conf = {}
347 for setting in settings:
348 conf[setting] = config_get(setting)
349@@ -170,6 +211,7 @@
350
351 :configs : OSTemplateRenderer: A config tempating object to inspect for
352 a complete https context.
353+
354 :vip_setting: str: Setting in charm config that specifies
355 VIP address.
356 '''
357
358=== added directory 'hooks/charmhelpers/contrib/network'
359=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
360=== added file 'hooks/charmhelpers/contrib/network/ip.py'
361--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
362+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-06 15:38:41 +0000
363@@ -0,0 +1,343 @@
364+import glob
365+import re
366+import subprocess
367+import sys
368+
369+from functools import partial
370+
371+from charmhelpers.core.hookenv import unit_get
372+from charmhelpers.fetch import apt_install
373+from charmhelpers.core.hookenv import (
374+ WARNING,
375+ ERROR,
376+ log
377+)
378+
379+try:
380+ import netifaces
381+except ImportError:
382+ apt_install('python-netifaces')
383+ import netifaces
384+
385+try:
386+ import netaddr
387+except ImportError:
388+ apt_install('python-netaddr')
389+ import netaddr
390+
391+
392+def _validate_cidr(network):
393+ try:
394+ netaddr.IPNetwork(network)
395+ except (netaddr.core.AddrFormatError, ValueError):
396+ raise ValueError("Network (%s) is not in CIDR presentation format" %
397+ network)
398+
399+
400+def get_address_in_network(network, fallback=None, fatal=False):
401+ """
402+ Get an IPv4 or IPv6 address within the network from the host.
403+
404+ :param network (str): CIDR presentation format. For example,
405+ '192.168.1.0/24'.
406+ :param fallback (str): If no address is found, return fallback.
407+ :param fatal (boolean): If no address is found, fallback is not
408+ set and fatal is True then exit(1).
409+
410+ """
411+
412+ def not_found_error_out():
413+ log("No IP address found in network: %s" % network,
414+ level=ERROR)
415+ sys.exit(1)
416+
417+ if network is None:
418+ if fallback is not None:
419+ return fallback
420+ else:
421+ if fatal:
422+ not_found_error_out()
423+
424+ _validate_cidr(network)
425+ network = netaddr.IPNetwork(network)
426+ for iface in netifaces.interfaces():
427+ addresses = netifaces.ifaddresses(iface)
428+ if network.version == 4 and netifaces.AF_INET in addresses:
429+ addr = addresses[netifaces.AF_INET][0]['addr']
430+ netmask = addresses[netifaces.AF_INET][0]['netmask']
431+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
432+ if cidr in network:
433+ return str(cidr.ip)
434+ if network.version == 6 and netifaces.AF_INET6 in addresses:
435+ for addr in addresses[netifaces.AF_INET6]:
436+ if not addr['addr'].startswith('fe80'):
437+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
438+ addr['netmask']))
439+ if cidr in network:
440+ return str(cidr.ip)
441+
442+ if fallback is not None:
443+ return fallback
444+
445+ if fatal:
446+ not_found_error_out()
447+
448+ return None
449+
450+
451+def is_ipv6(address):
452+ '''Determine whether provided address is IPv6 or not'''
453+ try:
454+ address = netaddr.IPAddress(address)
455+ except netaddr.AddrFormatError:
456+ # probably a hostname - so not an address at all!
457+ return False
458+ else:
459+ return address.version == 6
460+
461+
462+def is_address_in_network(network, address):
463+ """
464+ Determine whether the provided address is within a network range.
465+
466+ :param network (str): CIDR presentation format. For example,
467+ '192.168.1.0/24'.
468+ :param address: An individual IPv4 or IPv6 address without a net
469+ mask or subnet prefix. For example, '192.168.1.1'.
470+ :returns boolean: Flag indicating whether address is in network.
471+ """
472+ try:
473+ network = netaddr.IPNetwork(network)
474+ except (netaddr.core.AddrFormatError, ValueError):
475+ raise ValueError("Network (%s) is not in CIDR presentation format" %
476+ network)
477+ try:
478+ address = netaddr.IPAddress(address)
479+ except (netaddr.core.AddrFormatError, ValueError):
480+ raise ValueError("Address (%s) is not in correct presentation format" %
481+ address)
482+ if address in network:
483+ return True
484+ else:
485+ return False
486+
487+
488+def _get_for_address(address, key):
489+ """Retrieve an attribute of or the physical interface that
490+ the IP address provided could be bound to.
491+
492+ :param address (str): An individual IPv4 or IPv6 address without a net
493+ mask or subnet prefix. For example, '192.168.1.1'.
494+ :param key: 'iface' for the physical interface name or an attribute
495+ of the configured interface, for example 'netmask'.
496+ :returns str: Requested attribute or None if address is not bindable.
497+ """
498+ address = netaddr.IPAddress(address)
499+ for iface in netifaces.interfaces():
500+ addresses = netifaces.ifaddresses(iface)
501+ if address.version == 4 and netifaces.AF_INET in addresses:
502+ addr = addresses[netifaces.AF_INET][0]['addr']
503+ netmask = addresses[netifaces.AF_INET][0]['netmask']
504+ cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
505+ if address in cidr:
506+ if key == 'iface':
507+ return iface
508+ else:
509+ return addresses[netifaces.AF_INET][0][key]
510+ if address.version == 6 and netifaces.AF_INET6 in addresses:
511+ for addr in addresses[netifaces.AF_INET6]:
512+ if not addr['addr'].startswith('fe80'):
513+ cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
514+ addr['netmask']))
515+ if address in cidr:
516+ if key == 'iface':
517+ return iface
518+ else:
519+ return addr[key]
520+ return None
521+
522+
523+get_iface_for_address = partial(_get_for_address, key='iface')
524+
525+get_netmask_for_address = partial(_get_for_address, key='netmask')
526+
527+
528+def format_ipv6_addr(address):
529+ """
530+ IPv6 needs to be wrapped with [] in url link to parse correctly.
531+ """
532+ if is_ipv6(address):
533+ address = "[%s]" % address
534+ else:
535+ log("Not a valid ipv6 address: %s" % address, level=WARNING)
536+ address = None
537+
538+ return address
539+
540+
541+def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
542+ fatal=True, exc_list=None):
543+ """
544+ Return the assigned IP address for a given interface, if any, or [].
545+ """
546+ # Extract nic if passed /dev/ethX
547+ if '/' in iface:
548+ iface = iface.split('/')[-1]
549+ if not exc_list:
550+ exc_list = []
551+ try:
552+ inet_num = getattr(netifaces, inet_type)
553+ except AttributeError:
554+ raise Exception('Unknown inet type ' + str(inet_type))
555+
556+ interfaces = netifaces.interfaces()
557+ if inc_aliases:
558+ ifaces = []
559+ for _iface in interfaces:
560+ if iface == _iface or _iface.split(':')[0] == iface:
561+ ifaces.append(_iface)
562+ if fatal and not ifaces:
563+ raise Exception("Invalid interface '%s'" % iface)
564+ ifaces.sort()
565+ else:
566+ if iface not in interfaces:
567+ if fatal:
568+ raise Exception("%s not found " % (iface))
569+ else:
570+ return []
571+ else:
572+ ifaces = [iface]
573+
574+ addresses = []
575+ for netiface in ifaces:
576+ net_info = netifaces.ifaddresses(netiface)
577+ if inet_num in net_info:
578+ for entry in net_info[inet_num]:
579+ if 'addr' in entry and entry['addr'] not in exc_list:
580+ addresses.append(entry['addr'])
581+ if fatal and not addresses:
582+ raise Exception("Interface '%s' doesn't have any %s addresses." %
583+ (iface, inet_type))
584+ return addresses
585+
586+get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
587+
588+
589+def get_iface_from_addr(addr):
590+ """Work out on which interface the provided address is configured."""
591+ for iface in netifaces.interfaces():
592+ addresses = netifaces.ifaddresses(iface)
593+ for inet_type in addresses:
594+ for _addr in addresses[inet_type]:
595+ _addr = _addr['addr']
596+ # link local
597+ ll_key = re.compile("(.+)%.*")
598+ raw = re.match(ll_key, _addr)
599+ if raw:
600+ _addr = raw.group(1)
601+ if _addr == addr:
602+ log("Address '%s' is configured on iface '%s'" %
603+ (addr, iface))
604+ return iface
605+
606+ msg = "Unable to infer net iface on which '%s' is configured" % (addr)
607+ raise Exception(msg)
608+
609+
610+def sniff_iface(f):
611+ """If no iface provided, inject net iface inferred from unit private
612+ address.
613+ """
614+ def iface_sniffer(*args, **kwargs):
615+ if not kwargs.get('iface', None):
616+ kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
617+
618+ return f(*args, **kwargs)
619+
620+ return iface_sniffer
621+
622+
623+@sniff_iface
624+def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
625+ dynamic_only=True):
626+ """Get assigned IPv6 address for a given interface.
627+
628+ Returns list of addresses found. If no address found, returns empty list.
629+
630+ If iface is None, we infer the current primary interface by doing a reverse
631+ lookup on the unit private-address.
632+
633+ We currently only support scope global IPv6 addresses i.e. non-temporary
634+ addresses. If no global IPv6 address is found, return the first one found
635+ in the ipv6 address list.
636+ """
637+ addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
638+ inc_aliases=inc_aliases, fatal=fatal,
639+ exc_list=exc_list)
640+
641+ if addresses:
642+ global_addrs = []
643+ for addr in addresses:
644+ key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
645+ m = re.match(key_scope_link_local, addr)
646+ if m:
647+ eui_64_mac = m.group(1)
648+ iface = m.group(2)
649+ else:
650+ global_addrs.append(addr)
651+
652+ if global_addrs:
653+ # Make sure any found global addresses are not temporary
654+ cmd = ['ip', 'addr', 'show', iface]
655+ out = subprocess.check_output(cmd)
656+ if dynamic_only:
657+ key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
658+ else:
659+ key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
660+
661+ addrs = []
662+ for line in out.split('\n'):
663+ line = line.strip()
664+ m = re.match(key, line)
665+ if m and 'temporary' not in line:
666+ # Return the first valid address we find
667+ for addr in global_addrs:
668+ if m.group(1) == addr:
669+ if not dynamic_only or \
670+ m.group(1).endswith(eui_64_mac):
671+ addrs.append(addr)
672+
673+ if addrs:
674+ return addrs
675+
676+ if fatal:
677+ raise Exception("Interface '%s' doesn't have a scope global "
678+ "non-temporary ipv6 address." % iface)
679+
680+ return []
681+
682+
683+def get_bridges(vnic_dir='/sys/devices/virtual/net'):
684+ """
685+ Return a list of bridges on the system or []
686+ """
687+ b_rgex = vnic_dir + '/*/bridge'
688+ return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
689+
690+
691+def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
692+ """
693+ Return a list of nics comprising a given bridge on the system or []
694+ """
695+ brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
696+ return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
697+
698+
699+def is_bridge_member(nic):
700+ """
701+ Check if a given nic is a member of a bridge
702+ """
703+ for bridge in get_bridges():
704+ if nic in get_bridge_nics(bridge):
705+ return True
706+ return False
707
708=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
709=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
710=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
711--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
712+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-06 15:38:41 +0000
713@@ -0,0 +1,94 @@
714+from bzrlib.branch import Branch
715+import os
716+import re
717+from charmhelpers.contrib.amulet.deployment import (
718+ AmuletDeployment
719+)
720+
721+
722+class OpenStackAmuletDeployment(AmuletDeployment):
723+ """OpenStack amulet deployment.
724+
725+ This class inherits from AmuletDeployment and has additional support
726+ that is specifically for use by OpenStack charms.
727+ """
728+
729+ def __init__(self, series=None, openstack=None, source=None):
730+ """Initialize the deployment environment."""
731+ super(OpenStackAmuletDeployment, self).__init__(series)
732+ self.openstack = openstack
733+ self.source = source
734+
735+ def _is_dev_branch(self):
736+ """Determine if branch being tested is a dev (i.e. next) branch."""
737+ branch = Branch.open(os.getcwd())
738+ parent = branch.get_parent()
739+ pattern = re.compile("^.*/next/$")
740+ if (pattern.match(parent)):
741+ return True
742+ else:
743+ return False
744+
745+ def _determine_branch_locations(self, other_services):
746+ """Determine the branch locations for the other services.
747+
748+ If the branch being tested is a dev branch, then determine the
749+ development branch locations for the other services. Otherwise,
750+ the default charm store branches will be used."""
751+ name = 0
752+ if self._is_dev_branch():
753+ updated_services = []
754+ for svc in other_services:
755+ if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
756+ location = 'lp:charms/{}'.format(svc[name])
757+ else:
758+ temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
759+ location = temp.format(svc[name])
760+ updated_services.append(svc + (location,))
761+ other_services = updated_services
762+ return other_services
763+
764+ def _add_services(self, this_service, other_services):
765+ """Add services to the deployment and set openstack-origin/source."""
766+ name = 0
767+ other_services = self._determine_branch_locations(other_services)
768+ super(OpenStackAmuletDeployment, self)._add_services(this_service,
769+ other_services)
770+ services = other_services
771+ services.append(this_service)
772+ use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
773+
774+ if self.openstack:
775+ for svc in services:
776+ if svc[name] not in use_source:
777+ config = {'openstack-origin': self.openstack}
778+ self.d.configure(svc[name], config)
779+
780+ if self.source:
781+ for svc in services:
782+ if svc[name] in use_source:
783+ config = {'source': self.source}
784+ self.d.configure(svc[name], config)
785+
786+ def _configure_services(self, configs):
787+ """Configure all of the services."""
788+ for service, config in configs.iteritems():
789+ self.d.configure(service, config)
790+
791+ def _get_openstack_release(self):
792+ """Get openstack release.
793+
794+ Return an integer representing the enum value of the openstack
795+ release.
796+ """
797+ (self.precise_essex, self.precise_folsom, self.precise_grizzly,
798+ self.precise_havana, self.precise_icehouse,
799+ self.trusty_icehouse) = range(6)
800+ releases = {
801+ ('precise', None): self.precise_essex,
802+ ('precise', 'cloud:precise-folsom'): self.precise_folsom,
803+ ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
804+ ('precise', 'cloud:precise-havana'): self.precise_havana,
805+ ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
806+ ('trusty', None): self.trusty_icehouse}
807+ return releases[(self.series, self.openstack)]
808
809=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
810--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
811+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-06 15:38:41 +0000
812@@ -0,0 +1,276 @@
813+import logging
814+import os
815+import time
816+import urllib
817+
818+import glanceclient.v1.client as glance_client
819+import keystoneclient.v2_0 as keystone_client
820+import novaclient.v1_1.client as nova_client
821+
822+from charmhelpers.contrib.amulet.utils import (
823+ AmuletUtils
824+)
825+
826+DEBUG = logging.DEBUG
827+ERROR = logging.ERROR
828+
829+
830+class OpenStackAmuletUtils(AmuletUtils):
831+ """OpenStack amulet utilities.
832+
833+ This class inherits from AmuletUtils and has additional support
834+ that is specifically for use by OpenStack charms.
835+ """
836+
837+ def __init__(self, log_level=ERROR):
838+ """Initialize the deployment environment."""
839+ super(OpenStackAmuletUtils, self).__init__(log_level)
840+
841+ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
842+ public_port, expected):
843+ """Validate endpoint data.
844+
845+ Validate actual endpoint data vs expected endpoint data. The ports
846+ are used to find the matching endpoint.
847+ """
848+ found = False
849+ for ep in endpoints:
850+ self.log.debug('endpoint: {}'.format(repr(ep)))
851+ if (admin_port in ep.adminurl and
852+ internal_port in ep.internalurl and
853+ public_port in ep.publicurl):
854+ found = True
855+ actual = {'id': ep.id,
856+ 'region': ep.region,
857+ 'adminurl': ep.adminurl,
858+ 'internalurl': ep.internalurl,
859+ 'publicurl': ep.publicurl,
860+ 'service_id': ep.service_id}
861+ ret = self._validate_dict_data(expected, actual)
862+ if ret:
863+ return 'unexpected endpoint data - {}'.format(ret)
864+
865+ if not found:
866+ return 'endpoint not found'
867+
868+ def validate_svc_catalog_endpoint_data(self, expected, actual):
869+ """Validate service catalog endpoint data.
870+
871+ Validate a list of actual service catalog endpoints vs a list of
872+ expected service catalog endpoints.
873+ """
874+ self.log.debug('actual: {}'.format(repr(actual)))
875+ for k, v in expected.iteritems():
876+ if k in actual:
877+ ret = self._validate_dict_data(expected[k][0], actual[k][0])
878+ if ret:
879+ return self.endpoint_error(k, ret)
880+ else:
881+ return "endpoint {} does not exist".format(k)
882+ return ret
883+
884+ def validate_tenant_data(self, expected, actual):
885+ """Validate tenant data.
886+
887+ Validate a list of actual tenant data vs list of expected tenant
888+ data.
889+ """
890+ self.log.debug('actual: {}'.format(repr(actual)))
891+ for e in expected:
892+ found = False
893+ for act in actual:
894+ a = {'enabled': act.enabled, 'description': act.description,
895+ 'name': act.name, 'id': act.id}
896+ if e['name'] == a['name']:
897+ found = True
898+ ret = self._validate_dict_data(e, a)
899+ if ret:
900+ return "unexpected tenant data - {}".format(ret)
901+ if not found:
902+ return "tenant {} does not exist".format(e['name'])
903+ return ret
904+
905+ def validate_role_data(self, expected, actual):
906+ """Validate role data.
907+
908+ Validate a list of actual role data vs a list of expected role
909+ data.
910+ """
911+ self.log.debug('actual: {}'.format(repr(actual)))
912+ for e in expected:
913+ found = False
914+ for act in actual:
915+ a = {'name': act.name, 'id': act.id}
916+ if e['name'] == a['name']:
917+ found = True
918+ ret = self._validate_dict_data(e, a)
919+ if ret:
920+ return "unexpected role data - {}".format(ret)
921+ if not found:
922+ return "role {} does not exist".format(e['name'])
923+ return ret
924+
925+ def validate_user_data(self, expected, actual):
926+ """Validate user data.
927+
928+ Validate a list of actual user data vs a list of expected user
929+ data.
930+ """
931+ self.log.debug('actual: {}'.format(repr(actual)))
932+ for e in expected:
933+ found = False
934+ for act in actual:
935+ a = {'enabled': act.enabled, 'name': act.name,
936+ 'email': act.email, 'tenantId': act.tenantId,
937+ 'id': act.id}
938+ if e['name'] == a['name']:
939+ found = True
940+ ret = self._validate_dict_data(e, a)
941+ if ret:
942+ return "unexpected user data - {}".format(ret)
943+ if not found:
944+ return "user {} does not exist".format(e['name'])
945+ return ret
946+
947+ def validate_flavor_data(self, expected, actual):
948+ """Validate flavor data.
949+
950+ Validate a list of actual flavors vs a list of expected flavors.
951+ """
952+ self.log.debug('actual: {}'.format(repr(actual)))
953+ act = [a.name for a in actual]
954+ return self._validate_list_data(expected, act)
955+
956+ def tenant_exists(self, keystone, tenant):
957+ """Return True if tenant exists."""
958+ return tenant in [t.name for t in keystone.tenants.list()]
959+
960+ def authenticate_keystone_admin(self, keystone_sentry, user, password,
961+ tenant):
962+ """Authenticates admin user with the keystone admin endpoint."""
963+ unit = keystone_sentry
964+ service_ip = unit.relation('shared-db',
965+ 'mysql:shared-db')['private-address']
966+ ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
967+ return keystone_client.Client(username=user, password=password,
968+ tenant_name=tenant, auth_url=ep)
969+
970+ def authenticate_keystone_user(self, keystone, user, password, tenant):
971+ """Authenticates a regular user with the keystone public endpoint."""
972+ ep = keystone.service_catalog.url_for(service_type='identity',
973+ endpoint_type='publicURL')
974+ return keystone_client.Client(username=user, password=password,
975+ tenant_name=tenant, auth_url=ep)
976+
977+ def authenticate_glance_admin(self, keystone):
978+ """Authenticates admin user with glance."""
979+ ep = keystone.service_catalog.url_for(service_type='image',
980+ endpoint_type='adminURL')
981+ return glance_client.Client(ep, token=keystone.auth_token)
982+
983+ def authenticate_nova_user(self, keystone, user, password, tenant):
984+ """Authenticates a regular user with nova-api."""
985+ ep = keystone.service_catalog.url_for(service_type='identity',
986+ endpoint_type='publicURL')
987+ return nova_client.Client(username=user, api_key=password,
988+ project_id=tenant, auth_url=ep)
989+
990+ def create_cirros_image(self, glance, image_name):
991+ """Download the latest cirros image and upload it to glance."""
992+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
993+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
994+ if http_proxy:
995+ proxies = {'http': http_proxy}
996+ opener = urllib.FancyURLopener(proxies)
997+ else:
998+ opener = urllib.FancyURLopener()
999+
1000+ f = opener.open("http://download.cirros-cloud.net/version/released")
1001+ version = f.read().strip()
1002+ cirros_img = "cirros-{}-x86_64-disk.img".format(version)
1003+ local_path = os.path.join('tests', cirros_img)
1004+
1005+ if not os.path.exists(local_path):
1006+ cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
1007+ version, cirros_img)
1008+ opener.retrieve(cirros_url, local_path)
1009+ f.close()
1010+
1011+ with open(local_path) as f:
1012+ image = glance.images.create(name=image_name, is_public=True,
1013+ disk_format='qcow2',
1014+ container_format='bare', data=f)
1015+ count = 1
1016+ status = image.status
1017+ while status != 'active' and count < 10:
1018+ time.sleep(3)
1019+ image = glance.images.get(image.id)
1020+ status = image.status
1021+ self.log.debug('image status: {}'.format(status))
1022+ count += 1
1023+
1024+ if status != 'active':
1025+ self.log.error('image creation timed out')
1026+ return None
1027+
1028+ return image
1029+
1030+ def delete_image(self, glance, image):
1031+ """Delete the specified image."""
1032+ num_before = len(list(glance.images.list()))
1033+ glance.images.delete(image)
1034+
1035+ count = 1
1036+ num_after = len(list(glance.images.list()))
1037+ while num_after != (num_before - 1) and count < 10:
1038+ time.sleep(3)
1039+ num_after = len(list(glance.images.list()))
1040+ self.log.debug('number of images: {}'.format(num_after))
1041+ count += 1
1042+
1043+ if num_after != (num_before - 1):
1044+ self.log.error('image deletion timed out')
1045+ return False
1046+
1047+ return True
1048+
1049+ def create_instance(self, nova, image_name, instance_name, flavor):
1050+ """Create the specified instance."""
1051+ image = nova.images.find(name=image_name)
1052+ flavor = nova.flavors.find(name=flavor)
1053+ instance = nova.servers.create(name=instance_name, image=image,
1054+ flavor=flavor)
1055+
1056+ count = 1
1057+ status = instance.status
1058+ while status != 'ACTIVE' and count < 60:
1059+ time.sleep(3)
1060+ instance = nova.servers.get(instance.id)
1061+ status = instance.status
1062+ self.log.debug('instance status: {}'.format(status))
1063+ count += 1
1064+
1065+ if status != 'ACTIVE':
1066+ self.log.error('instance creation timed out')
1067+ return None
1068+
1069+ return instance
1070+
1071+ def delete_instance(self, nova, instance):
1072+ """Delete the specified instance."""
1073+ num_before = len(list(nova.servers.list()))
1074+ nova.servers.delete(instance)
1075+
1076+ count = 1
1077+ num_after = len(list(nova.servers.list()))
1078+ while num_after != (num_before - 1) and count < 10:
1079+ time.sleep(3)
1080+ num_after = len(list(nova.servers.list()))
1081+ self.log.debug('number of instances: {}'.format(num_after))
1082+ count += 1
1083+
1084+ if num_after != (num_before - 1):
1085+ self.log.error('instance deletion timed out')
1086+ return False
1087+
1088+ return True
1089
1090=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
1091--- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:57:07 +0000
1092+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-06 15:38:41 +0000
1093@@ -8,7 +8,6 @@
1094 check_call
1095 )
1096
1097-
1098 from charmhelpers.fetch import (
1099 apt_install,
1100 filter_installed_packages,
1101@@ -21,10 +20,20 @@
1102 relation_get,
1103 relation_ids,
1104 related_units,
1105+ relation_set,
1106 unit_get,
1107 unit_private_ip,
1108 ERROR,
1109- INFO
1110+<<<<<<< TREE
1111+ INFO
1112+=======
1113+ INFO
1114+)
1115+
1116+from charmhelpers.core.host import (
1117+ mkdir,
1118+ write_file
1119+>>>>>>> MERGE-SOURCE
1120 )
1121
1122 from charmhelpers.contrib.hahelpers.cluster import (
1123@@ -37,12 +46,20 @@
1124 from charmhelpers.contrib.hahelpers.apache import (
1125 get_cert,
1126 get_ca_cert,
1127+ install_ca_cert,
1128 )
1129
1130 from charmhelpers.contrib.openstack.neutron import (
1131 neutron_plugin_attribute,
1132 )
1133
1134+from charmhelpers.contrib.network.ip import (
1135+ get_address_in_network,
1136+ get_ipv6_addr,
1137+ format_ipv6_addr,
1138+ is_address_in_network
1139+)
1140+
1141 CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
1142
1143
1144@@ -135,8 +152,26 @@
1145 'Missing required charm config options. '
1146 '(database name and user)')
1147 raise OSContextError
1148+
1149 ctxt = {}
1150
1151+ # NOTE(jamespage) if mysql charm provides a network upon which
1152+ # access to the database should be made, reconfigure relation
1153+ # with the service units local address and defer execution
1154+ access_network = relation_get('access-network')
1155+ if access_network is not None:
1156+ if self.relation_prefix is not None:
1157+ hostname_key = "{}_hostname".format(self.relation_prefix)
1158+ else:
1159+ hostname_key = "hostname"
1160+ access_hostname = get_address_in_network(access_network,
1161+ unit_get('private-address'))
1162+ set_hostname = relation_get(attribute=hostname_key,
1163+ unit=local_unit())
1164+ if set_hostname != access_hostname:
1165+ relation_set(relation_settings={hostname_key: access_hostname})
1166+ return ctxt # Defer any further hook execution for now....
1167+
1168 password_setting = 'password'
1169 if self.relation_prefix:
1170 password_setting = self.relation_prefix + '_password'
1171@@ -144,8 +179,10 @@
1172 for rid in relation_ids('shared-db'):
1173 for unit in related_units(rid):
1174 rdata = relation_get(rid=rid, unit=unit)
1175+ host = rdata.get('db_host')
1176+ host = format_ipv6_addr(host) or host
1177 ctxt = {
1178- 'database_host': rdata.get('db_host'),
1179+ 'database_host': host,
1180 'database': self.database,
1181 'database_user': self.user,
1182 'database_password': rdata.get(password_setting),
1183@@ -221,10 +258,15 @@
1184 for rid in relation_ids('identity-service'):
1185 for unit in related_units(rid):
1186 rdata = relation_get(rid=rid, unit=unit)
1187+ serv_host = rdata.get('service_host')
1188+ serv_host = format_ipv6_addr(serv_host) or serv_host
1189+ auth_host = rdata.get('auth_host')
1190+ auth_host = format_ipv6_addr(auth_host) or auth_host
1191+
1192 ctxt = {
1193 'service_port': rdata.get('service_port'),
1194- 'service_host': rdata.get('service_host'),
1195- 'auth_host': rdata.get('auth_host'),
1196+ 'service_host': serv_host,
1197+ 'auth_host': auth_host,
1198 'auth_port': rdata.get('auth_port'),
1199 'admin_tenant_name': rdata.get('service_tenant'),
1200 'admin_user': rdata.get('service_username'),
1201@@ -244,32 +286,42 @@
1202
1203
1204 class AMQPContext(OSContextGenerator):
1205- interfaces = ['amqp']
1206
1207- def __init__(self, ssl_dir=None):
1208+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
1209 self.ssl_dir = ssl_dir
1210+ self.rel_name = rel_name
1211+ self.relation_prefix = relation_prefix
1212+ self.interfaces = [rel_name]
1213
1214 def __call__(self):
1215 log('Generating template context for amqp')
1216 conf = config()
1217+ user_setting = 'rabbit-user'
1218+ vhost_setting = 'rabbit-vhost'
1219+ if self.relation_prefix:
1220+ user_setting = self.relation_prefix + '-rabbit-user'
1221+ vhost_setting = self.relation_prefix + '-rabbit-vhost'
1222+
1223 try:
1224- username = conf['rabbit-user']
1225- vhost = conf['rabbit-vhost']
1226+ username = conf[user_setting]
1227+ vhost = conf[vhost_setting]
1228 except KeyError as e:
1229 log('Could not generate shared_db context. '
1230 'Missing required charm config options: %s.' % e)
1231 raise OSContextError
1232 ctxt = {}
1233- for rid in relation_ids('amqp'):
1234+ for rid in relation_ids(self.rel_name):
1235 ha_vip_only = False
1236 for unit in related_units(rid):
1237 if relation_get('clustered', rid=rid, unit=unit):
1238 ctxt['clustered'] = True
1239- ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
1240- unit=unit)
1241+ vip = relation_get('vip', rid=rid, unit=unit)
1242+ vip = format_ipv6_addr(vip) or vip
1243+ ctxt['rabbitmq_host'] = vip
1244 else:
1245- ctxt['rabbitmq_host'] = relation_get('private-address',
1246- rid=rid, unit=unit)
1247+ host = relation_get('private-address', rid=rid, unit=unit)
1248+ host = format_ipv6_addr(host) or host
1249+ ctxt['rabbitmq_host'] = host
1250 ctxt.update({
1251 'rabbitmq_user': username,
1252 'rabbitmq_password': relation_get('password', rid=rid,
1253@@ -308,8 +360,9 @@
1254 and len(related_units(rid)) > 1:
1255 rabbitmq_hosts = []
1256 for unit in related_units(rid):
1257- rabbitmq_hosts.append(relation_get('private-address',
1258- rid=rid, unit=unit))
1259+ host = relation_get('private-address', rid=rid, unit=unit)
1260+ host = format_ipv6_addr(host) or host
1261+ rabbitmq_hosts.append(host)
1262 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
1263 if not context_complete(ctxt):
1264 return {}
1265@@ -333,10 +386,13 @@
1266 use_syslog = str(config('use-syslog')).lower()
1267 for rid in relation_ids('ceph'):
1268 for unit in related_units(rid):
1269- mon_hosts.append(relation_get('private-address', rid=rid,
1270- unit=unit))
1271 auth = relation_get('auth', rid=rid, unit=unit)
1272 key = relation_get('key', rid=rid, unit=unit)
1273+ ceph_addr = \
1274+ relation_get('ceph-public-address', rid=rid, unit=unit) or \
1275+ relation_get('private-address', rid=rid, unit=unit)
1276+ ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
1277+ mon_hosts.append(ceph_addr)
1278
1279 ctxt = {
1280 'mon_hosts': ' '.join(mon_hosts),
1281@@ -370,7 +426,14 @@
1282
1283 cluster_hosts = {}
1284 l_unit = local_unit().replace('/', '-')
1285- cluster_hosts[l_unit] = unit_get('private-address')
1286+
1287+ if config('prefer-ipv6'):
1288+ addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1289+ else:
1290+ addr = unit_get('private-address')
1291+
1292+ cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
1293+ addr)
1294
1295 for rid in relation_ids('cluster'):
1296 for unit in related_units(rid):
1297@@ -381,6 +444,21 @@
1298 ctxt = {
1299 'units': cluster_hosts,
1300 }
1301+
1302+ if config('haproxy-server-timeout'):
1303+ ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
1304+ if config('haproxy-client-timeout'):
1305+ ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
1306+
1307+ if config('prefer-ipv6'):
1308+ ctxt['local_host'] = 'ip6-localhost'
1309+ ctxt['haproxy_host'] = '::'
1310+ ctxt['stat_port'] = ':::8888'
1311+ else:
1312+ ctxt['local_host'] = '127.0.0.1'
1313+ ctxt['haproxy_host'] = '0.0.0.0'
1314+ ctxt['stat_port'] = ':8888'
1315+
1316 if len(cluster_hosts.keys()) > 1:
1317 # Enable haproxy when we have enough peers.
1318 log('Ensuring haproxy enabled in /etc/default/haproxy.')
1319@@ -419,12 +497,13 @@
1320 """
1321 Generates a context for an apache vhost configuration that configures
1322 HTTPS reverse proxying for one or many endpoints. Generated context
1323- looks something like:
1324- {
1325- 'namespace': 'cinder',
1326- 'private_address': 'iscsi.mycinderhost.com',
1327- 'endpoints': [(8776, 8766), (8777, 8767)]
1328- }
1329+ looks something like::
1330+
1331+ {
1332+ 'namespace': 'cinder',
1333+ 'private_address': 'iscsi.mycinderhost.com',
1334+ 'endpoints': [(8776, 8766), (8777, 8767)]
1335+ }
1336
1337 The endpoints list consists of a tuples mapping external ports
1338 to internal ports.
1339@@ -440,22 +519,36 @@
1340 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
1341 check_call(cmd)
1342
1343- def configure_cert(self):
1344- if not os.path.isdir('/etc/apache2/ssl'):
1345- os.mkdir('/etc/apache2/ssl')
1346+ def configure_cert(self, cn=None):
1347 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
1348- if not os.path.isdir(ssl_dir):
1349- os.mkdir(ssl_dir)
1350- cert, key = get_cert()
1351- with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
1352- cert_out.write(b64decode(cert))
1353- with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
1354- key_out.write(b64decode(key))
1355+ mkdir(path=ssl_dir)
1356+ cert, key = get_cert(cn)
1357+ if cn:
1358+ cert_filename = 'cert_{}'.format(cn)
1359+ key_filename = 'key_{}'.format(cn)
1360+ else:
1361+ cert_filename = 'cert'
1362+ key_filename = 'key'
1363+ write_file(path=os.path.join(ssl_dir, cert_filename),
1364+ content=b64decode(cert))
1365+ write_file(path=os.path.join(ssl_dir, key_filename),
1366+ content=b64decode(key))
1367+
1368+ def configure_ca(self):
1369 ca_cert = get_ca_cert()
1370 if ca_cert:
1371- with open(CA_CERT_PATH, 'w') as ca_out:
1372- ca_out.write(b64decode(ca_cert))
1373- check_call(['update-ca-certificates'])
1374+ install_ca_cert(b64decode(ca_cert))
1375+
1376+ def canonical_names(self):
1377+ '''Figure out which canonical names clients will access this service'''
1378+ cns = []
1379+ for r_id in relation_ids('identity-service'):
1380+ for unit in related_units(r_id):
1381+ rdata = relation_get(rid=r_id, unit=unit)
1382+ for k in rdata:
1383+ if k.startswith('ssl_key_'):
1384+ cns.append(k.lstrip('ssl_key_'))
1385+ return list(set(cns))
1386
1387 def __call__(self):
1388 if isinstance(self.external_ports, basestring):
1389@@ -463,21 +556,47 @@
1390 if (not self.external_ports or not https()):
1391 return {}
1392
1393- self.configure_cert()
1394+ self.configure_ca()
1395 self.enable_modules()
1396
1397 ctxt = {
1398 'namespace': self.service_namespace,
1399- 'private_address': unit_get('private-address'),
1400- 'endpoints': []
1401+ 'endpoints': [],
1402+ 'ext_ports': []
1403 }
1404- if is_clustered():
1405- ctxt['private_address'] = config('vip')
1406- for api_port in self.external_ports:
1407- ext_port = determine_apache_port(api_port)
1408- int_port = determine_api_port(api_port)
1409- portmap = (int(ext_port), int(int_port))
1410- ctxt['endpoints'].append(portmap)
1411+
1412+ for cn in self.canonical_names():
1413+ self.configure_cert(cn)
1414+
1415+ addresses = []
1416+ vips = []
1417+ if config('vip'):
1418+ vips = config('vip').split()
1419+
1420+ for network_type in ['os-internal-network',
1421+ 'os-admin-network',
1422+ 'os-public-network']:
1423+ address = get_address_in_network(config(network_type),
1424+ unit_get('private-address'))
1425+ if len(vips) > 0 and is_clustered():
1426+ for vip in vips:
1427+ if is_address_in_network(config(network_type),
1428+ vip):
1429+ addresses.append((address, vip))
1430+ break
1431+ elif is_clustered():
1432+ addresses.append((address, config('vip')))
1433+ else:
1434+ addresses.append((address, address))
1435+
1436+ for address, endpoint in set(addresses):
1437+ for api_port in self.external_ports:
1438+ ext_port = determine_apache_port(api_port)
1439+ int_port = determine_api_port(api_port)
1440+ portmap = (address, endpoint, int(ext_port), int(int_port))
1441+ ctxt['endpoints'].append(portmap)
1442+ ctxt['ext_ports'].append(int(ext_port))
1443+ ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
1444 return ctxt
1445
1446
1447@@ -542,6 +661,26 @@
1448
1449 return nvp_ctxt
1450
1451+ def n1kv_ctxt(self):
1452+ driver = neutron_plugin_attribute(self.plugin, 'driver',
1453+ self.network_manager)
1454+ n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
1455+ self.network_manager)
1456+ n1kv_ctxt = {
1457+ 'core_plugin': driver,
1458+ 'neutron_plugin': 'n1kv',
1459+ 'neutron_security_groups': self.neutron_security_groups,
1460+ 'local_ip': unit_private_ip(),
1461+ 'config': n1kv_config,
1462+ 'vsm_ip': config('n1kv-vsm-ip'),
1463+ 'vsm_username': config('n1kv-vsm-username'),
1464+ 'vsm_password': config('n1kv-vsm-password'),
1465+ 'restrict_policy_profiles': config(
1466+ 'n1kv_restrict_policy_profiles'),
1467+ }
1468+
1469+ return n1kv_ctxt
1470+
1471 def neutron_ctxt(self):
1472 if https():
1473 proto = 'https'
1474@@ -573,6 +712,8 @@
1475 ctxt.update(self.ovs_ctxt())
1476 elif self.plugin in ['nvp', 'nsx']:
1477 ctxt.update(self.nvp_ctxt())
1478+ elif self.plugin == 'n1kv':
1479+ ctxt.update(self.n1kv_ctxt())
1480
1481 alchemy_flags = config('neutron-alchemy-flags')
1482 if alchemy_flags:
1483@@ -612,7 +753,7 @@
1484 The subordinate interface allows subordinates to export their
1485 configuration requirements to the principle for multiple config
1486 files and multiple serivces. Ie, a subordinate that has interfaces
1487- to both glance and nova may export to following yaml blob as json:
1488+ to both glance and nova may export to following yaml blob as json::
1489
1490 glance:
1491 /etc/glance/glance-api.conf:
1492@@ -631,7 +772,8 @@
1493
1494 It is then up to the principle charms to subscribe this context to
1495 the service+config file it is interestd in. Configuration data will
1496- be available in the template context, in glance's case, as:
1497+ be available in the template context, in glance's case, as::
1498+
1499 ctxt = {
1500 ... other context ...
1501 'subordinate_config': {
1502@@ -684,15 +826,38 @@
1503
1504 sub_config = sub_config[self.config_file]
1505 for k, v in sub_config.iteritems():
1506- if k == 'sections':
1507- for section, config_dict in v.iteritems():
1508- log("adding section '%s'" % (section))
1509- ctxt[k][section] = config_dict
1510- else:
1511- ctxt[k] = v
1512-
1513- log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1514-
1515+<<<<<<< TREE
1516+ if k == 'sections':
1517+ for section, config_dict in v.iteritems():
1518+ log("adding section '%s'" % (section))
1519+ ctxt[k][section] = config_dict
1520+ else:
1521+ ctxt[k] = v
1522+
1523+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1524+
1525+=======
1526+ if k == 'sections':
1527+ for section, config_dict in v.iteritems():
1528+ log("adding section '%s'" % (section))
1529+ ctxt[k][section] = config_dict
1530+ else:
1531+ ctxt[k] = v
1532+
1533+ log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
1534+
1535+ return ctxt
1536+
1537+
1538+class LogLevelContext(OSContextGenerator):
1539+
1540+ def __call__(self):
1541+ ctxt = {}
1542+ ctxt['debug'] = \
1543+ False if config('debug') is None else config('debug')
1544+ ctxt['verbose'] = \
1545+ False if config('verbose') is None else config('verbose')
1546+>>>>>>> MERGE-SOURCE
1547 return ctxt
1548
1549
1550@@ -703,3 +868,16 @@
1551 'use_syslog': config('use-syslog')
1552 }
1553 return ctxt
1554+
1555+
1556+class BindHostContext(OSContextGenerator):
1557+
1558+ def __call__(self):
1559+ if config('prefer-ipv6'):
1560+ return {
1561+ 'bind_host': '::'
1562+ }
1563+ else:
1564+ return {
1565+ 'bind_host': '0.0.0.0'
1566+ }
1567
1568=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
1569--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
1570+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-06 15:38:41 +0000
1571@@ -0,0 +1,79 @@
1572+from charmhelpers.core.hookenv import (
1573+ config,
1574+ unit_get,
1575+)
1576+
1577+from charmhelpers.contrib.network.ip import (
1578+ get_address_in_network,
1579+ is_address_in_network,
1580+ is_ipv6,
1581+ get_ipv6_addr,
1582+)
1583+
1584+from charmhelpers.contrib.hahelpers.cluster import is_clustered
1585+
1586+PUBLIC = 'public'
1587+INTERNAL = 'int'
1588+ADMIN = 'admin'
1589+
1590+_address_map = {
1591+ PUBLIC: {
1592+ 'config': 'os-public-network',
1593+ 'fallback': 'public-address'
1594+ },
1595+ INTERNAL: {
1596+ 'config': 'os-internal-network',
1597+ 'fallback': 'private-address'
1598+ },
1599+ ADMIN: {
1600+ 'config': 'os-admin-network',
1601+ 'fallback': 'private-address'
1602+ }
1603+}
1604+
1605+
1606+def canonical_url(configs, endpoint_type=PUBLIC):
1607+ '''
1608+ Returns the correct HTTP URL to this host given the state of HTTPS
1609+ configuration, hacluster and charm configuration.
1610+
1611+ :configs OSTemplateRenderer: A config tempating object to inspect for
1612+ a complete https context.
1613+ :endpoint_type str: The endpoint type to resolve.
1614+
1615+ :returns str: Base URL for services on the current service unit.
1616+ '''
1617+ scheme = 'http'
1618+ if 'https' in configs.complete_contexts():
1619+ scheme = 'https'
1620+ address = resolve_address(endpoint_type)
1621+ if is_ipv6(address):
1622+ address = "[{}]".format(address)
1623+ return '%s://%s' % (scheme, address)
1624+
1625+
1626+def resolve_address(endpoint_type=PUBLIC):
1627+ resolved_address = None
1628+ if is_clustered():
1629+ if config(_address_map[endpoint_type]['config']) is None:
1630+ # Assume vip is simple and pass back directly
1631+ resolved_address = config('vip')
1632+ else:
1633+ for vip in config('vip').split():
1634+ if is_address_in_network(
1635+ config(_address_map[endpoint_type]['config']),
1636+ vip):
1637+ resolved_address = vip
1638+ else:
1639+ if config('prefer-ipv6'):
1640+ fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
1641+ else:
1642+ fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
1643+ resolved_address = get_address_in_network(
1644+ config(_address_map[endpoint_type]['config']), fallback_addr)
1645+
1646+ if resolved_address is None:
1647+ raise ValueError('Unable to resolve a suitable IP address'
1648+ ' based on charm state and configuration')
1649+ else:
1650+ return resolved_address
1651
1652=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
1653--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:39:11 +0000
1654+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-10-06 15:38:41 +0000
1655@@ -128,6 +128,20 @@
1656 'server_packages': ['neutron-server',
1657 'neutron-plugin-vmware'],
1658 'server_services': ['neutron-server']
1659+ },
1660+ 'n1kv': {
1661+ 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
1662+ 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
1663+ 'contexts': [
1664+ context.SharedDBContext(user=config('neutron-database-user'),
1665+ database=config('neutron-database'),
1666+ relation_prefix='neutron',
1667+ ssl_dir=NEUTRON_CONF_DIR)],
1668+ 'services': [],
1669+ 'packages': [['neutron-plugin-cisco']],
1670+ 'server_packages': ['neutron-server',
1671+ 'neutron-plugin-cisco'],
1672+ 'server_services': ['neutron-server']
1673 }
1674 }
1675 if release >= 'icehouse':
1676
1677=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
1678--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-03-27 11:23:24 +0000
1679+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-06 15:38:41 +0000
1680@@ -1,6 +1,6 @@
1681 global
1682- log 127.0.0.1 local0
1683- log 127.0.0.1 local1 notice
1684+ log {{ local_host }} local0
1685+ log {{ local_host }} local1 notice
1686 maxconn 20000
1687 user haproxy
1688 group haproxy
1689@@ -14,10 +14,19 @@
1690 retries 3
1691 timeout queue 1000
1692 timeout connect 1000
1693+{% if haproxy_client_timeout -%}
1694+ timeout client {{ haproxy_client_timeout }}
1695+{% else -%}
1696 timeout client 30000
1697+{% endif -%}
1698+
1699+{% if haproxy_server_timeout -%}
1700+ timeout server {{ haproxy_server_timeout }}
1701+{% else -%}
1702 timeout server 30000
1703+{% endif -%}
1704
1705-listen stats :8888
1706+listen stats {{ stat_port }}
1707 mode http
1708 stats enable
1709 stats hide-version
1710@@ -27,7 +36,12 @@
1711
1712 {% if units -%}
1713 {% for service, ports in service_ports.iteritems() -%}
1714-listen {{ service }} 0.0.0.0:{{ ports[0] }}
1715+listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
1716+ balance roundrobin
1717+ {% for unit, address in units.iteritems() -%}
1718+ server {{ unit }} {{ address }}:{{ ports[1] }} check
1719+ {% endfor %}
1720+listen {{ service }}_ipv6 :::{{ ports[0] }}
1721 balance roundrobin
1722 {% for unit, address in units.iteritems() -%}
1723 server {{ unit }} {{ address }}:{{ ports[1] }} check
1724
1725=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
1726--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2013-09-27 12:02:37 +0000
1727+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2014-10-06 15:38:41 +0000
1728@@ -1,16 +1,18 @@
1729 {% if endpoints -%}
1730-{% for ext, int in endpoints -%}
1731-Listen {{ ext }}
1732-NameVirtualHost *:{{ ext }}
1733-<VirtualHost *:{{ ext }}>
1734- ServerName {{ private_address }}
1735+{% for ext_port in ext_ports -%}
1736+Listen {{ ext_port }}
1737+{% endfor -%}
1738+{% for address, endpoint, ext, int in endpoints -%}
1739+<VirtualHost {{ address }}:{{ ext }}>
1740+ ServerName {{ endpoint }}
1741 SSLEngine on
1742- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1743- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1744+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
1745+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
1746 ProxyPass / http://localhost:{{ int }}/
1747 ProxyPassReverse / http://localhost:{{ int }}/
1748 ProxyPreserveHost on
1749 </VirtualHost>
1750+{% endfor -%}
1751 <Proxy *>
1752 Order deny,allow
1753 Allow from all
1754@@ -19,5 +21,4 @@
1755 Order allow,deny
1756 Allow from all
1757 </Location>
1758-{% endfor -%}
1759 {% endif -%}
1760
1761=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
1762--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2013-09-27 12:02:37 +0000
1763+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2014-10-06 15:38:41 +0000
1764@@ -1,16 +1,18 @@
1765 {% if endpoints -%}
1766-{% for ext, int in endpoints -%}
1767-Listen {{ ext }}
1768-NameVirtualHost *:{{ ext }}
1769-<VirtualHost *:{{ ext }}>
1770- ServerName {{ private_address }}
1771+{% for ext_port in ext_ports -%}
1772+Listen {{ ext_port }}
1773+{% endfor -%}
1774+{% for address, endpoint, ext, int in endpoints -%}
1775+<VirtualHost {{ address }}:{{ ext }}>
1776+ ServerName {{ endpoint }}
1777 SSLEngine on
1778- SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert
1779- SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key
1780+ SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
1781+ SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
1782 ProxyPass / http://localhost:{{ int }}/
1783 ProxyPassReverse / http://localhost:{{ int }}/
1784 ProxyPreserveHost on
1785 </VirtualHost>
1786+{% endfor -%}
1787 <Proxy *>
1788 Order deny,allow
1789 Allow from all
1790@@ -19,5 +21,4 @@
1791 Order allow,deny
1792 Allow from all
1793 </Location>
1794-{% endfor -%}
1795 {% endif -%}
1796
1797=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
1798--- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-27 12:02:37 +0000
1799+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-10-06 15:38:41 +0000
1800@@ -30,17 +30,17 @@
1801 loading dir.
1802
1803 A charm may also ship a templates dir with this module
1804- and it will be appended to the bottom of the search list, eg:
1805- hooks/charmhelpers/contrib/openstack/templates.
1806-
1807- :param templates_dir: str: Base template directory containing release
1808- sub-directories.
1809- :param os_release : str: OpenStack release codename to construct template
1810- loader.
1811-
1812- :returns : jinja2.ChoiceLoader constructed with a list of
1813- jinja2.FilesystemLoaders, ordered in descending
1814- order by OpenStack release.
1815+ and it will be appended to the bottom of the search list, eg::
1816+
1817+ hooks/charmhelpers/contrib/openstack/templates
1818+
1819+ :param templates_dir (str): Base template directory containing release
1820+ sub-directories.
1821+ :param os_release (str): OpenStack release codename to construct template
1822+ loader.
1823+ :returns: jinja2.ChoiceLoader constructed with a list of
1824+ jinja2.FilesystemLoaders, ordered in descending
1825+ order by OpenStack release.
1826 """
1827 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
1828 for rel in OPENSTACK_CODENAMES.itervalues()]
1829@@ -111,7 +111,8 @@
1830 and ease the burden of managing config templates across multiple OpenStack
1831 releases.
1832
1833- Basic usage:
1834+ Basic usage::
1835+
1836 # import some common context generates from charmhelpers
1837 from charmhelpers.contrib.openstack import context
1838
1839@@ -131,21 +132,19 @@
1840 # write out all registered configs
1841 configs.write_all()
1842
1843- Details:
1844+ **OpenStack Releases and template loading**
1845
1846- OpenStack Releases and template loading
1847- ---------------------------------------
1848 When the object is instantiated, it is associated with a specific OS
1849 release. This dictates how the template loader will be constructed.
1850
1851 The constructed loader attempts to load the template from several places
1852 in the following order:
1853- - from the most recent OS release-specific template dir (if one exists)
1854- - the base templates_dir
1855- - a template directory shipped in the charm with this helper file.
1856-
1857-
1858- For the example above, '/tmp/templates' contains the following structure:
1859+ - from the most recent OS release-specific template dir (if one exists)
1860+ - the base templates_dir
1861+ - a template directory shipped in the charm with this helper file.
1862+
1863+ For the example above, '/tmp/templates' contains the following structure::
1864+
1865 /tmp/templates/nova.conf
1866 /tmp/templates/api-paste.ini
1867 /tmp/templates/grizzly/api-paste.ini
1868@@ -169,8 +168,8 @@
1869 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
1870 us to ship common templates (haproxy, apache) with the helpers.
1871
1872- Context generators
1873- ---------------------------------------
1874+ **Context generators**
1875+
1876 Context generators are used to generate template contexts during hook
1877 execution. Doing so may require inspecting service relations, charm
1878 config, etc. When registered, a config file is associated with a list
1879
1880=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
1881--- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:17:33 +0000
1882+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-06 15:38:41 +0000
1883@@ -3,8 +3,8 @@
1884 # Common python helper functions used for OpenStack charms.
1885 from collections import OrderedDict
1886
1887-import apt_pkg as apt
1888 import subprocess
1889+import json
1890 import os
1891 import socket
1892 import sys
1893@@ -14,7 +14,9 @@
1894 log as juju_log,
1895 charm_dir,
1896 ERROR,
1897- INFO
1898+ INFO,
1899+ relation_ids,
1900+ relation_set
1901 )
1902
1903 from charmhelpers.contrib.storage.linux.lvm import (
1904@@ -23,6 +25,10 @@
1905 remove_lvm_physical_volume,
1906 )
1907
1908+from charmhelpers.contrib.network.ip import (
1909+ get_ipv6_addr
1910+)
1911+
1912 from charmhelpers.core.host import lsb_release, mounts, umount
1913 from charmhelpers.fetch import apt_install, apt_cache
1914 from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
1915@@ -41,7 +47,8 @@
1916 ('quantal', 'folsom'),
1917 ('raring', 'grizzly'),
1918 ('saucy', 'havana'),
1919- ('trusty', 'icehouse')
1920+ ('trusty', 'icehouse'),
1921+ ('utopic', 'juno'),
1922 ])
1923
1924
1925@@ -52,6 +59,7 @@
1926 ('2013.1', 'grizzly'),
1927 ('2013.2', 'havana'),
1928 ('2014.1', 'icehouse'),
1929+ ('2014.2', 'juno'),
1930 ])
1931
1932 # The ugly duckling
1933@@ -69,6 +77,7 @@
1934 ('1.13.0', 'icehouse'),
1935 ('1.12.0', 'icehouse'),
1936 ('1.11.0', 'icehouse'),
1937+ ('2.0.0', 'juno'),
1938 ])
1939
1940 DEFAULT_LOOPBACK_SIZE = '5G'
1941@@ -83,6 +92,8 @@
1942 '''Derive OpenStack release codename from a given installation source.'''
1943 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
1944 rel = ''
1945+ if src is None:
1946+ return rel
1947 if src in ['distro', 'distro-proposed']:
1948 try:
1949 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
1950@@ -130,8 +141,14 @@
1951
1952 def get_os_codename_package(package, fatal=True):
1953 '''Derive OpenStack release codename from an installed package.'''
1954-
1955- cache = apt_cache()
1956+<<<<<<< TREE
1957+
1958+ cache = apt_cache()
1959+=======
1960+ import apt_pkg as apt
1961+
1962+ cache = apt_cache()
1963+>>>>>>> MERGE-SOURCE
1964
1965 try:
1966 pkg = cache[package]
1967@@ -182,7 +199,7 @@
1968 for version, cname in vers_map.iteritems():
1969 if cname == codename:
1970 return version
1971- #e = "Could not determine OpenStack version for package: %s" % pkg
1972+ # e = "Could not determine OpenStack version for package: %s" % pkg
1973 # error_out(e)
1974
1975
1976@@ -268,6 +285,9 @@
1977 'icehouse': 'precise-updates/icehouse',
1978 'icehouse/updates': 'precise-updates/icehouse',
1979 'icehouse/proposed': 'precise-proposed/icehouse',
1980+ 'juno': 'trusty-updates/juno',
1981+ 'juno/updates': 'trusty-updates/juno',
1982+ 'juno/proposed': 'trusty-proposed/juno',
1983 }
1984
1985 try:
1986@@ -315,6 +335,7 @@
1987
1988 """
1989
1990+ import apt_pkg as apt
1991 src = config('openstack-origin')
1992 cur_vers = get_os_version_package(package)
1993 available_vers = get_os_version_install_source(src)
1994@@ -448,3 +469,21 @@
1995 return result
1996 else:
1997 return result.split('.')[0]
1998+
1999+
2000+def sync_db_with_multi_ipv6_addresses(database, database_user,
2001+ relation_prefix=None):
2002+ hosts = get_ipv6_addr(dynamic_only=False)
2003+
2004+ kwargs = {'database': database,
2005+ 'username': database_user,
2006+ 'hostname': json.dumps(hosts)}
2007+
2008+ if relation_prefix:
2009+ keys = kwargs.keys()
2010+ for key in keys:
2011+ kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
2012+ del kwargs[key]
2013+
2014+ for rid in relation_ids('shared-db'):
2015+ relation_set(relation_id=rid, **kwargs)
2016
2017=== added directory 'hooks/charmhelpers/contrib/peerstorage'
2018=== added file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
2019--- hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
2020+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-10-06 15:38:41 +0000
2021@@ -0,0 +1,131 @@
2022+from charmhelpers.core.hookenv import relation_id as current_relation_id
2023+from charmhelpers.core.hookenv import (
2024+ is_relation_made,
2025+ relation_ids,
2026+ relation_get,
2027+ local_unit,
2028+ relation_set,
2029+)
2030+
2031+
2032+"""
2033+This helper provides functions to support use of a peer relation
2034+for basic key/value storage, with the added benefit that all storage
2035+can be replicated across peer units.
2036+
2037+Requirement to use:
2038+
2039+To use this, the "peer_echo()" method has to be called form the peer
2040+relation's relation-changed hook:
2041+
2042+@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
2043+def cluster_relation_changed():
2044+ peer_echo()
2045+
2046+Once this is done, you can use peer storage from anywhere:
2047+
2048+@hooks.hook("some-hook")
2049+def some_hook():
2050+ # You can store and retrieve key/values this way:
2051+ if is_relation_made("cluster"): # from charmhelpers.core.hookenv
2052+ # There are peers available so we can work with peer storage
2053+ peer_store("mykey", "myvalue")
2054+ value = peer_retrieve("mykey")
2055+ print value
2056+ else:
2057+ print "No peers joind the relation, cannot share key/values :("
2058+"""
2059+
2060+
2061+def peer_retrieve(key, relation_name='cluster'):
2062+ """Retrieve a named key from peer relation `relation_name`."""
2063+ cluster_rels = relation_ids(relation_name)
2064+ if len(cluster_rels) > 0:
2065+ cluster_rid = cluster_rels[0]
2066+ return relation_get(attribute=key, rid=cluster_rid,
2067+ unit=local_unit())
2068+ else:
2069+ raise ValueError('Unable to detect'
2070+ 'peer relation {}'.format(relation_name))
2071+
2072+
2073+def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
2074+ inc_list=None, exc_list=None):
2075+ """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
2076+ inc_list = inc_list if inc_list else []
2077+ exc_list = exc_list if exc_list else []
2078+ peerdb_settings = peer_retrieve('-', relation_name=relation_name)
2079+ matched = {}
2080+ for k, v in peerdb_settings.items():
2081+ full_prefix = prefix + delimiter
2082+ if k.startswith(full_prefix):
2083+ new_key = k.replace(full_prefix, '')
2084+ if new_key in exc_list:
2085+ continue
2086+ if new_key in inc_list or len(inc_list) == 0:
2087+ matched[new_key] = v
2088+ return matched
2089+
2090+
2091+def peer_store(key, value, relation_name='cluster'):
2092+ """Store the key/value pair on the named peer relation `relation_name`."""
2093+ cluster_rels = relation_ids(relation_name)
2094+ if len(cluster_rels) > 0:
2095+ cluster_rid = cluster_rels[0]
2096+ relation_set(relation_id=cluster_rid,
2097+ relation_settings={key: value})
2098+ else:
2099+ raise ValueError('Unable to detect '
2100+ 'peer relation {}'.format(relation_name))
2101+
2102+
2103+def peer_echo(includes=None):
2104+ """Echo filtered attributes back onto the same relation for storage.
2105+
2106+ This is a requirement to use the peerstorage module - it needs to be called
2107+ from the peer relation's changed hook.
2108+ """
2109+ rdata = relation_get()
2110+ echo_data = {}
2111+ if includes is None:
2112+ echo_data = rdata.copy()
2113+ for ex in ['private-address', 'public-address']:
2114+ if ex in echo_data:
2115+ echo_data.pop(ex)
2116+ else:
2117+ for attribute, value in rdata.iteritems():
2118+ for include in includes:
2119+ if include in attribute:
2120+ echo_data[attribute] = value
2121+ if len(echo_data) > 0:
2122+ relation_set(relation_settings=echo_data)
2123+
2124+
2125+def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
2126+ peer_store_fatal=False, relation_settings=None,
2127+ delimiter='_', **kwargs):
2128+ """Store passed-in arguments both in argument relation and in peer storage.
2129+
2130+ It functions like doing relation_set() and peer_store() at the same time,
2131+ with the same data.
2132+
2133+ @param relation_id: the id of the relation to store the data on. Defaults
2134+ to the current relation.
2135+ @param peer_store_fatal: Set to True, the function will raise an exception
2136+ should the peer sotrage not be avialable."""
2137+
2138+ relation_settings = relation_settings if relation_settings else {}
2139+ relation_set(relation_id=relation_id,
2140+ relation_settings=relation_settings,
2141+ **kwargs)
2142+ if is_relation_made(peer_relation_name):
2143+ for key, value in dict(kwargs.items() +
2144+ relation_settings.items()).iteritems():
2145+ key_prefix = relation_id or current_relation_id()
2146+ peer_store(key_prefix + delimiter + key,
2147+ value,
2148+ relation_name=peer_relation_name)
2149+ else:
2150+ if peer_store_fatal:
2151+ raise ValueError('Unable to detect '
2152+ 'peer relation {}'.format(peer_relation_name))
2153
2154=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
2155--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-02-24 17:51:34 +0000
2156+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-06 15:38:41 +0000
2157@@ -303,7 +303,7 @@
2158 blk_device, fstype, system_services=[]):
2159 """
2160 NOTE: This function must only be called from a single service unit for
2161- the same rbd_img otherwise data loss will occur.
2162+ the same rbd_img otherwise data loss will occur.
2163
2164 Ensures given pool and RBD image exists, is mapped to a block device,
2165 and the device is formatted and mounted at the given mount_point.
2166
2167=== added file 'hooks/charmhelpers/core/fstab.py'
2168--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
2169+++ hooks/charmhelpers/core/fstab.py 2014-10-06 15:38:41 +0000
2170@@ -0,0 +1,116 @@
2171+#!/usr/bin/env python
2172+# -*- coding: utf-8 -*-
2173+
2174+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
2175+
2176+import os
2177+
2178+
2179+class Fstab(file):
2180+ """This class extends file in order to implement a file reader/writer
2181+ for file `/etc/fstab`
2182+ """
2183+
2184+ class Entry(object):
2185+ """Entry class represents a non-comment line on the `/etc/fstab` file
2186+ """
2187+ def __init__(self, device, mountpoint, filesystem,
2188+ options, d=0, p=0):
2189+ self.device = device
2190+ self.mountpoint = mountpoint
2191+ self.filesystem = filesystem
2192+
2193+ if not options:
2194+ options = "defaults"
2195+
2196+ self.options = options
2197+ self.d = d
2198+ self.p = p
2199+
2200+ def __eq__(self, o):
2201+ return str(self) == str(o)
2202+
2203+ def __str__(self):
2204+ return "{} {} {} {} {} {}".format(self.device,
2205+ self.mountpoint,
2206+ self.filesystem,
2207+ self.options,
2208+ self.d,
2209+ self.p)
2210+
2211+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
2212+
2213+ def __init__(self, path=None):
2214+ if path:
2215+ self._path = path
2216+ else:
2217+ self._path = self.DEFAULT_PATH
2218+ file.__init__(self, self._path, 'r+')
2219+
2220+ def _hydrate_entry(self, line):
2221+ # NOTE: use split with no arguments to split on any
2222+ # whitespace including tabs
2223+ return Fstab.Entry(*filter(
2224+ lambda x: x not in ('', None),
2225+ line.strip("\n").split()))
2226+
2227+ @property
2228+ def entries(self):
2229+ self.seek(0)
2230+ for line in self.readlines():
2231+ try:
2232+ if not line.startswith("#"):
2233+ yield self._hydrate_entry(line)
2234+ except ValueError:
2235+ pass
2236+
2237+ def get_entry_by_attr(self, attr, value):
2238+ for entry in self.entries:
2239+ e_attr = getattr(entry, attr)
2240+ if e_attr == value:
2241+ return entry
2242+ return None
2243+
2244+ def add_entry(self, entry):
2245+ if self.get_entry_by_attr('device', entry.device):
2246+ return False
2247+
2248+ self.write(str(entry) + '\n')
2249+ self.truncate()
2250+ return entry
2251+
2252+ def remove_entry(self, entry):
2253+ self.seek(0)
2254+
2255+ lines = self.readlines()
2256+
2257+ found = False
2258+ for index, line in enumerate(lines):
2259+ if not line.startswith("#"):
2260+ if self._hydrate_entry(line) == entry:
2261+ found = True
2262+ break
2263+
2264+ if not found:
2265+ return False
2266+
2267+ lines.remove(line)
2268+
2269+ self.seek(0)
2270+ self.write(''.join(lines))
2271+ self.truncate()
2272+ return True
2273+
2274+ @classmethod
2275+ def remove_by_mountpoint(cls, mountpoint, path=None):
2276+ fstab = cls(path=path)
2277+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
2278+ if entry:
2279+ return fstab.remove_entry(entry)
2280+ return False
2281+
2282+ @classmethod
2283+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
2284+ return cls(path=path).add_entry(Fstab.Entry(device,
2285+ mountpoint, filesystem,
2286+ options=options))
2287
2288=== renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved'
2289=== modified file 'hooks/charmhelpers/core/hookenv.py'
2290--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:39:11 +0000
2291+++ hooks/charmhelpers/core/hookenv.py 2014-10-06 15:38:41 +0000
2292@@ -25,7 +25,7 @@
2293 def cached(func):
2294 """Cache return values for multiple executions of func + args
2295
2296- For example:
2297+ For example::
2298
2299 @cached
2300 def unit_get(attribute):
2301@@ -156,12 +156,15 @@
2302
2303
2304 class Config(dict):
2305- """A Juju charm config dictionary that can write itself to
2306- disk (as json) and track which values have changed since
2307- the previous hook invocation.
2308-
2309- Do not instantiate this object directly - instead call
2310- ``hookenv.config()``
2311+ """A dictionary representation of the charm's config.yaml, with some
2312+ extra features:
2313+
2314+ - See which values in the dictionary have changed since the previous hook.
2315+ - For values that have changed, see what the previous value was.
2316+ - Store arbitrary data for use in a later hook.
2317+
2318+ NOTE: Do not instantiate this object directly - instead call
2319+ ``hookenv.config()``, which will return an instance of :class:`Config`.
2320
2321 Example usage::
2322
2323@@ -170,8 +173,8 @@
2324 >>> config = hookenv.config()
2325 >>> config['foo']
2326 'bar'
2327+ >>> # store a new key/value for later use
2328 >>> config['mykey'] = 'myval'
2329- >>> config.save()
2330
2331
2332 >>> # user runs `juju set mycharm foo=baz`
2333@@ -188,22 +191,34 @@
2334 >>> # keys/values that we add are preserved across hooks
2335 >>> config['mykey']
2336 'myval'
2337- >>> # don't forget to save at the end of hook!
2338- >>> config.save()
2339
2340 """
2341 CONFIG_FILE_NAME = '.juju-persistent-config'
2342
2343 def __init__(self, *args, **kw):
2344 super(Config, self).__init__(*args, **kw)
2345+ self.implicit_save = True
2346 self._prev_dict = None
2347 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
2348 if os.path.exists(self.path):
2349 self.load_previous()
2350
2351+ def __getitem__(self, key):
2352+ """For regular dict lookups, check the current juju config first,
2353+ then the previous (saved) copy. This ensures that user-saved values
2354+ will be returned by a dict lookup.
2355+
2356+ """
2357+ try:
2358+ return dict.__getitem__(self, key)
2359+ except KeyError:
2360+ return (self._prev_dict or {})[key]
2361+
2362 def load_previous(self, path=None):
2363- """Load previous copy of config from disk so that current values
2364- can be compared to previous values.
2365+ """Load previous copy of config from disk.
2366+
2367+ In normal usage you don't need to call this method directly - it
2368+ is called automatically at object initialization.
2369
2370 :param path:
2371
2372@@ -218,8 +233,8 @@
2373 self._prev_dict = json.load(f)
2374
2375 def changed(self, key):
2376- """Return true if the value for this key has changed since
2377- the last save.
2378+ """Return True if the current value for this key is different from
2379+ the previous value.
2380
2381 """
2382 if self._prev_dict is None:
2383@@ -228,7 +243,7 @@
2384
2385 def previous(self, key):
2386 """Return previous value for this key, or None if there
2387- is no "previous" value.
2388+ is no previous value.
2389
2390 """
2391 if self._prev_dict:
2392@@ -238,7 +253,13 @@
2393 def save(self):
2394 """Save this config to disk.
2395
2396- Preserves items in _prev_dict that do not exist in self.
2397+ If the charm is using the :mod:`Services Framework <services.base>`
2398+ or :meth:'@hook <Hooks.hook>' decorator, this
2399+ is called automatically at the end of successful hook execution.
2400+ Otherwise, it should be called directly by user code.
2401+
2402+ To disable automatic saves, set ``implicit_save=False`` on this
2403+ instance.
2404
2405 """
2406 if self._prev_dict:
2407@@ -285,8 +306,9 @@
2408 raise
2409
2410
2411-def relation_set(relation_id=None, relation_settings={}, **kwargs):
2412+def relation_set(relation_id=None, relation_settings=None, **kwargs):
2413 """Set relation information for the current unit"""
2414+ relation_settings = relation_settings if relation_settings else {}
2415 relation_cmd_line = ['relation-set']
2416 if relation_id is not None:
2417 relation_cmd_line.extend(('-r', relation_id))
2418@@ -445,27 +467,29 @@
2419 class Hooks(object):
2420 """A convenient handler for hook functions.
2421
2422- Example:
2423+ Example::
2424+
2425 hooks = Hooks()
2426
2427 # register a hook, taking its name from the function name
2428 @hooks.hook()
2429 def install():
2430- ...
2431+ pass # your code here
2432
2433 # register a hook, providing a custom hook name
2434 @hooks.hook("config-changed")
2435 def config_changed():
2436- ...
2437+ pass # your code here
2438
2439 if __name__ == "__main__":
2440 # execute a hook based on the name the program is called by
2441 hooks.execute(sys.argv)
2442 """
2443
2444- def __init__(self):
2445+ def __init__(self, config_save=True):
2446 super(Hooks, self).__init__()
2447 self._hooks = {}
2448+ self._config_save = config_save
2449
2450 def register(self, name, function):
2451 """Register a hook"""
2452@@ -476,6 +500,10 @@
2453 hook_name = os.path.basename(args[0])
2454 if hook_name in self._hooks:
2455 self._hooks[hook_name]()
2456+ if self._config_save:
2457+ cfg = config()
2458+ if cfg.implicit_save:
2459+ cfg.save()
2460 else:
2461 raise UnregisteredHookError(hook_name)
2462
2463
2464=== modified file 'hooks/charmhelpers/core/host.py'
2465--- hooks/charmhelpers/core/host.py 2014-08-27 07:17:33 +0000
2466+++ hooks/charmhelpers/core/host.py 2014-10-06 15:38:41 +0000
2467@@ -12,7 +12,8 @@
2468 import string
2469 import subprocess
2470 import hashlib
2471-import apt_pkg
2472+import shutil
2473+from contextlib import contextmanager
2474
2475 from collections import OrderedDict
2476
2477@@ -53,7 +54,7 @@
2478 def service_running(service):
2479 """Determine whether a system service is running"""
2480 try:
2481- output = subprocess.check_output(['service', service, 'status'])
2482+ output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
2483 except subprocess.CalledProcessError:
2484 return False
2485 else:
2486@@ -63,6 +64,16 @@
2487 return False
2488
2489
2490+def service_available(service_name):
2491+ """Determine whether a system service is available"""
2492+ try:
2493+ subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
2494+ except subprocess.CalledProcessError as e:
2495+ return 'unrecognized service' not in e.output
2496+ else:
2497+ return True
2498+
2499+
2500 def adduser(username, password=None, shell='/bin/bash', system_user=False):
2501 """Add a user to the system"""
2502 try:
2503@@ -198,10 +209,15 @@
2504 return system_mounts
2505
2506
2507-def file_hash(path):
2508- """Generate a md5 hash of the contents of 'path' or None if not found """
2509+def file_hash(path, hash_type='md5'):
2510+ """
2511+ Generate a hash checksum of the contents of 'path' or None if not found.
2512+
2513+ :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
2514+ such as md5, sha1, sha256, sha512, etc.
2515+ """
2516 if os.path.exists(path):
2517- h = hashlib.md5()
2518+ h = getattr(hashlib, hash_type)()
2519 with open(path, 'r') as source:
2520 h.update(source.read()) # IGNORE:E1101 - it does have update
2521 return h.hexdigest()
2522@@ -209,16 +225,36 @@
2523 return None
2524
2525
2526+def check_hash(path, checksum, hash_type='md5'):
2527+ """
2528+ Validate a file using a cryptographic checksum.
2529+
2530+ :param str checksum: Value of the checksum used to validate the file.
2531+ :param str hash_type: Hash algorithm used to generate `checksum`.
2532+ Can be any hash alrgorithm supported by :mod:`hashlib`,
2533+ such as md5, sha1, sha256, sha512, etc.
2534+ :raises ChecksumError: If the file fails the checksum
2535+
2536+ """
2537+ actual_checksum = file_hash(path, hash_type)
2538+ if checksum != actual_checksum:
2539+ raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
2540+
2541+
2542+class ChecksumError(ValueError):
2543+ pass
2544+
2545+
2546 def restart_on_change(restart_map, stopstart=False):
2547 """Restart services based on configuration files changing
2548
2549- This function is used a decorator, for example
2550+ This function is used a decorator, for example::
2551
2552 @restart_on_change({
2553 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
2554 })
2555 def ceph_client_changed():
2556- ...
2557+ pass # your code here
2558
2559 In this example, the cinder-api and cinder-volume services
2560 would be restarted if /etc/ceph/ceph.conf is changed by the
2561@@ -314,12 +350,40 @@
2562
2563 def cmp_pkgrevno(package, revno, pkgcache=None):
2564 '''Compare supplied revno with the revno of the installed package
2565- 1 => Installed revno is greater than supplied arg
2566- 0 => Installed revno is the same as supplied arg
2567- -1 => Installed revno is less than supplied arg
2568+
2569+ * 1 => Installed revno is greater than supplied arg
2570+ * 0 => Installed revno is the same as supplied arg
2571+ * -1 => Installed revno is less than supplied arg
2572+
2573 '''
2574- from charmhelpers.fetch import apt_cache
2575+<<<<<<< TREE
2576+ from charmhelpers.fetch import apt_cache
2577+=======
2578+ import apt_pkg
2579+ from charmhelpers.fetch import apt_cache
2580+>>>>>>> MERGE-SOURCE
2581 if not pkgcache:
2582 pkgcache = apt_cache()
2583 pkg = pkgcache[package]
2584 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
2585+
2586+
2587+@contextmanager
2588+def chdir(d):
2589+ cur = os.getcwd()
2590+ try:
2591+ yield os.chdir(d)
2592+ finally:
2593+ os.chdir(cur)
2594+
2595+
2596+def chownr(path, owner, group):
2597+ uid = pwd.getpwnam(owner).pw_uid
2598+ gid = grp.getgrnam(group).gr_gid
2599+
2600+ for root, dirs, files in os.walk(path):
2601+ for name in dirs + files:
2602+ full = os.path.join(root, name)
2603+ broken_symlink = os.path.lexists(full) and not os.path.exists(full)
2604+ if not broken_symlink:
2605+ os.chown(full, uid, gid)
2606
2607=== added directory 'hooks/charmhelpers/core/services'
2608=== added file 'hooks/charmhelpers/core/services/__init__.py'
2609--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
2610+++ hooks/charmhelpers/core/services/__init__.py 2014-10-06 15:38:41 +0000
2611@@ -0,0 +1,2 @@
2612+from .base import *
2613+from .helpers import *
2614
2615=== added file 'hooks/charmhelpers/core/services/base.py'
2616--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
2617+++ hooks/charmhelpers/core/services/base.py 2014-10-06 15:38:41 +0000
2618@@ -0,0 +1,313 @@
2619+import os
2620+import re
2621+import json
2622+from collections import Iterable
2623+
2624+from charmhelpers.core import host
2625+from charmhelpers.core import hookenv
2626+
2627+
2628+__all__ = ['ServiceManager', 'ManagerCallback',
2629+ 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
2630+ 'service_restart', 'service_stop']
2631+
2632+
2633+class ServiceManager(object):
2634+ def __init__(self, services=None):
2635+ """
2636+ Register a list of services, given their definitions.
2637+
2638+ Service definitions are dicts in the following formats (all keys except
2639+ 'service' are optional)::
2640+
2641+ {
2642+ "service": <service name>,
2643+ "required_data": <list of required data contexts>,
2644+ "provided_data": <list of provided data contexts>,
2645+ "data_ready": <one or more callbacks>,
2646+ "data_lost": <one or more callbacks>,
2647+ "start": <one or more callbacks>,
2648+ "stop": <one or more callbacks>,
2649+ "ports": <list of ports to manage>,
2650+ }
2651+
2652+ The 'required_data' list should contain dicts of required data (or
2653+ dependency managers that act like dicts and know how to collect the data).
2654+ Only when all items in the 'required_data' list are populated are the list
2655+ of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
2656+ information.
2657+
2658+ The 'provided_data' list should contain relation data providers, most likely
2659+ a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
2660+ that will indicate a set of data to set on a given relation.
2661+
2662+ The 'data_ready' value should be either a single callback, or a list of
2663+ callbacks, to be called when all items in 'required_data' pass `is_ready()`.
2664+ Each callback will be called with the service name as the only parameter.
2665+ After all of the 'data_ready' callbacks are called, the 'start' callbacks
2666+ are fired.
2667+
2668+ The 'data_lost' value should be either a single callback, or a list of
2669+ callbacks, to be called when a 'required_data' item no longer passes
2670+ `is_ready()`. Each callback will be called with the service name as the
2671+ only parameter. After all of the 'data_lost' callbacks are called,
2672+ the 'stop' callbacks are fired.
2673+
2674+ The 'start' value should be either a single callback, or a list of
2675+ callbacks, to be called when starting the service, after the 'data_ready'
2676+ callbacks are complete. Each callback will be called with the service
2677+ name as the only parameter. This defaults to
2678+ `[host.service_start, services.open_ports]`.
2679+
2680+ The 'stop' value should be either a single callback, or a list of
2681+ callbacks, to be called when stopping the service. If the service is
2682+ being stopped because it no longer has all of its 'required_data', this
2683+ will be called after all of the 'data_lost' callbacks are complete.
2684+ Each callback will be called with the service name as the only parameter.
2685+ This defaults to `[services.close_ports, host.service_stop]`.
2686+
2687+ The 'ports' value should be a list of ports to manage. The default
2688+ 'start' handler will open the ports after the service is started,
2689+ and the default 'stop' handler will close the ports prior to stopping
2690+ the service.
2691+
2692+
2693+ Examples:
2694+
2695+ The following registers an Upstart service called bingod that depends on
2696+ a mongodb relation and which runs a custom `db_migrate` function prior to
2697+ restarting the service, and a Runit service called spadesd::
2698+
2699+ manager = services.ServiceManager([
2700+ {
2701+ 'service': 'bingod',
2702+ 'ports': [80, 443],
2703+ 'required_data': [MongoRelation(), config(), {'my': 'data'}],
2704+ 'data_ready': [
2705+ services.template(source='bingod.conf'),
2706+ services.template(source='bingod.ini',
2707+ target='/etc/bingod.ini',
2708+ owner='bingo', perms=0400),
2709+ ],
2710+ },
2711+ {
2712+ 'service': 'spadesd',
2713+ 'data_ready': services.template(source='spadesd_run.j2',
2714+ target='/etc/sv/spadesd/run',
2715+ perms=0555),
2716+ 'start': runit_start,
2717+ 'stop': runit_stop,
2718+ },
2719+ ])
2720+ manager.manage()
2721+ """
2722+ self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
2723+ self._ready = None
2724+ self.services = {}
2725+ for service in services or []:
2726+ service_name = service['service']
2727+ self.services[service_name] = service
2728+
2729+ def manage(self):
2730+ """
2731+ Handle the current hook by doing The Right Thing with the registered services.
2732+ """
2733+ hook_name = hookenv.hook_name()
2734+ if hook_name == 'stop':
2735+ self.stop_services()
2736+ else:
2737+ self.provide_data()
2738+ self.reconfigure_services()
2739+ cfg = hookenv.config()
2740+ if cfg.implicit_save:
2741+ cfg.save()
2742+
2743+ def provide_data(self):
2744+ """
2745+ Set the relation data for each provider in the ``provided_data`` list.
2746+
2747+ A provider must have a `name` attribute, which indicates which relation
2748+ to set data on, and a `provide_data()` method, which returns a dict of
2749+ data to set.
2750+ """
2751+ hook_name = hookenv.hook_name()
2752+ for service in self.services.values():
2753+ for provider in service.get('provided_data', []):
2754+ if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
2755+ data = provider.provide_data()
2756+ _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
2757+ if _ready:
2758+ hookenv.relation_set(None, data)
2759+
2760+ def reconfigure_services(self, *service_names):
2761+ """
2762+ Update all files for one or more registered services, and,
2763+ if ready, optionally restart them.
2764+
2765+ If no service names are given, reconfigures all registered services.
2766+ """
2767+ for service_name in service_names or self.services.keys():
2768+ if self.is_ready(service_name):
2769+ self.fire_event('data_ready', service_name)
2770+ self.fire_event('start', service_name, default=[
2771+ service_restart,
2772+ manage_ports])
2773+ self.save_ready(service_name)
2774+ else:
2775+ if self.was_ready(service_name):
2776+ self.fire_event('data_lost', service_name)
2777+ self.fire_event('stop', service_name, default=[
2778+ manage_ports,
2779+ service_stop])
2780+ self.save_lost(service_name)
2781+
2782+ def stop_services(self, *service_names):
2783+ """
2784+ Stop one or more registered services, by name.
2785+
2786+ If no service names are given, stops all registered services.
2787+ """
2788+ for service_name in service_names or self.services.keys():
2789+ self.fire_event('stop', service_name, default=[
2790+ manage_ports,
2791+ service_stop])
2792+
2793+ def get_service(self, service_name):
2794+ """
2795+ Given the name of a registered service, return its service definition.
2796+ """
2797+ service = self.services.get(service_name)
2798+ if not service:
2799+ raise KeyError('Service not registered: %s' % service_name)
2800+ return service
2801+
2802+ def fire_event(self, event_name, service_name, default=None):
2803+ """
2804+ Fire a data_ready, data_lost, start, or stop event on a given service.
2805+ """
2806+ service = self.get_service(service_name)
2807+ callbacks = service.get(event_name, default)
2808+ if not callbacks:
2809+ return
2810+ if not isinstance(callbacks, Iterable):
2811+ callbacks = [callbacks]
2812+ for callback in callbacks:
2813+ if isinstance(callback, ManagerCallback):
2814+ callback(self, service_name, event_name)
2815+ else:
2816+ callback(service_name)
2817+
2818+ def is_ready(self, service_name):
2819+ """
2820+ Determine if a registered service is ready, by checking its 'required_data'.
2821+
2822+ A 'required_data' item can be any mapping type, and is considered ready
2823+ if `bool(item)` evaluates as True.
2824+ """
2825+ service = self.get_service(service_name)
2826+ reqs = service.get('required_data', [])
2827+ return all(bool(req) for req in reqs)
2828+
2829+ def _load_ready_file(self):
2830+ if self._ready is not None:
2831+ return
2832+ if os.path.exists(self._ready_file):
2833+ with open(self._ready_file) as fp:
2834+ self._ready = set(json.load(fp))
2835+ else:
2836+ self._ready = set()
2837+
2838+ def _save_ready_file(self):
2839+ if self._ready is None:
2840+ return
2841+ with open(self._ready_file, 'w') as fp:
2842+ json.dump(list(self._ready), fp)
2843+
2844+ def save_ready(self, service_name):
2845+ """
2846+ Save an indicator that the given service is now data_ready.
2847+ """
2848+ self._load_ready_file()
2849+ self._ready.add(service_name)
2850+ self._save_ready_file()
2851+
2852+ def save_lost(self, service_name):
2853+ """
2854+ Save an indicator that the given service is no longer data_ready.
2855+ """
2856+ self._load_ready_file()
2857+ self._ready.discard(service_name)
2858+ self._save_ready_file()
2859+
2860+ def was_ready(self, service_name):
2861+ """
2862+ Determine if the given service was previously data_ready.
2863+ """
2864+ self._load_ready_file()
2865+ return service_name in self._ready
2866+
2867+
2868+class ManagerCallback(object):
2869+ """
2870+ Special case of a callback that takes the `ServiceManager` instance
2871+ in addition to the service name.
2872+
2873+ Subclasses should implement `__call__` which should accept three parameters:
2874+
2875+ * `manager` The `ServiceManager` instance
2876+ * `service_name` The name of the service it's being triggered for
2877+ * `event_name` The name of the event that this callback is handling
2878+ """
2879+ def __call__(self, manager, service_name, event_name):
2880+ raise NotImplementedError()
2881+
2882+
2883+class PortManagerCallback(ManagerCallback):
2884+ """
2885+ Callback class that will open or close ports, for use as either
2886+ a start or stop action.
2887+ """
2888+ def __call__(self, manager, service_name, event_name):
2889+ service = manager.get_service(service_name)
2890+ new_ports = service.get('ports', [])
2891+ port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
2892+ if os.path.exists(port_file):
2893+ with open(port_file) as fp:
2894+ old_ports = fp.read().split(',')
2895+ for old_port in old_ports:
2896+ if bool(old_port):
2897+ old_port = int(old_port)
2898+ if old_port not in new_ports:
2899+ hookenv.close_port(old_port)
2900+ with open(port_file, 'w') as fp:
2901+ fp.write(','.join(str(port) for port in new_ports))
2902+ for port in new_ports:
2903+ if event_name == 'start':
2904+ hookenv.open_port(port)
2905+ elif event_name == 'stop':
2906+ hookenv.close_port(port)
2907+
2908+
2909+def service_stop(service_name):
2910+ """
2911+ Wrapper around host.service_stop to prevent spurious "unknown service"
2912+ messages in the logs.
2913+ """
2914+ if host.service_running(service_name):
2915+ host.service_stop(service_name)
2916+
2917+
2918+def service_restart(service_name):
2919+ """
2920+ Wrapper around host.service_restart to prevent spurious "unknown service"
2921+ messages in the logs.
2922+ """
2923+ if host.service_available(service_name):
2924+ if host.service_running(service_name):
2925+ host.service_restart(service_name)
2926+ else:
2927+ host.service_start(service_name)
2928+
2929+
2930+# Convenience aliases
2931+open_ports = close_ports = manage_ports = PortManagerCallback()
2932
2933=== added file 'hooks/charmhelpers/core/services/helpers.py'
2934--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
2935+++ hooks/charmhelpers/core/services/helpers.py 2014-10-06 15:38:41 +0000
2936@@ -0,0 +1,239 @@
2937+import os
2938+import yaml
2939+from charmhelpers.core import hookenv
2940+from charmhelpers.core import templating
2941+
2942+from charmhelpers.core.services.base import ManagerCallback
2943+
2944+
2945+__all__ = ['RelationContext', 'TemplateCallback',
2946+ 'render_template', 'template']
2947+
2948+
2949+class RelationContext(dict):
2950+ """
2951+ Base class for a context generator that gets relation data from juju.
2952+
2953+ Subclasses must provide the attributes `name`, which is the name of the
2954+ interface of interest, `interface`, which is the type of the interface of
2955+ interest, and `required_keys`, which is the set of keys required for the
2956+ relation to be considered complete. The data for all interfaces matching
2957+ the `name` attribute that are complete will used to populate the dictionary
2958+ values (see `get_data`, below).
2959+
2960+ The generated context will be namespaced under the relation :attr:`name`,
2961+ to prevent potential naming conflicts.
2962+
2963+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
2964+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
2965+ """
2966+ name = None
2967+ interface = None
2968+ required_keys = []
2969+
2970+ def __init__(self, name=None, additional_required_keys=None):
2971+ if name is not None:
2972+ self.name = name
2973+ if additional_required_keys is not None:
2974+ self.required_keys.extend(additional_required_keys)
2975+ self.get_data()
2976+
2977+ def __bool__(self):
2978+ """
2979+ Returns True if all of the required_keys are available.
2980+ """
2981+ return self.is_ready()
2982+
2983+ __nonzero__ = __bool__
2984+
2985+ def __repr__(self):
2986+ return super(RelationContext, self).__repr__()
2987+
2988+ def is_ready(self):
2989+ """
2990+ Returns True if all of the `required_keys` are available from any units.
2991+ """
2992+ ready = len(self.get(self.name, [])) > 0
2993+ if not ready:
2994+ hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
2995+ return ready
2996+
2997+ def _is_ready(self, unit_data):
2998+ """
2999+ Helper method that tests a set of relation data and returns True if
3000+ all of the `required_keys` are present.
3001+ """
3002+ return set(unit_data.keys()).issuperset(set(self.required_keys))
3003+
3004+ def get_data(self):
3005+ """
3006+ Retrieve the relation data for each unit involved in a relation and,
3007+ if complete, store it in a list under `self[self.name]`. This
3008+ is automatically called when the RelationContext is instantiated.
3009+
3010+ The units are sorted lexographically first by the service ID, then by
3011+ the unit ID. Thus, if an interface has two other services, 'db:1'
3012+ and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
3013+ and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
3014+ set of data, the relation data for the units will be stored in the
3015+ order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
3016+
3017+ If you only care about a single unit on the relation, you can just
3018+ access it as `{{ interface[0]['key'] }}`. However, if you can at all
3019+ support multiple units on a relation, you should iterate over the list,
3020+ like::
3021+
3022+ {% for unit in interface -%}
3023+ {{ unit['key'] }}{% if not loop.last %},{% endif %}
3024+ {%- endfor %}
3025+
3026+ Note that since all sets of relation data from all related services and
3027+ units are in a single list, if you need to know which service or unit a
3028+ set of data came from, you'll need to extend this class to preserve
3029+ that information.
3030+ """
3031+ if not hookenv.relation_ids(self.name):
3032+ return
3033+
3034+ ns = self.setdefault(self.name, [])
3035+ for rid in sorted(hookenv.relation_ids(self.name)):
3036+ for unit in sorted(hookenv.related_units(rid)):
3037+ reldata = hookenv.relation_get(rid=rid, unit=unit)
3038+ if self._is_ready(reldata):
3039+ ns.append(reldata)
3040+
3041+ def provide_data(self):
3042+ """
3043+ Return data to be relation_set for this interface.
3044+ """
3045+ return {}
3046+
3047+
3048+class MysqlRelation(RelationContext):
3049+ """
3050+ Relation context for the `mysql` interface.
3051+
3052+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
3053+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
3054+ """
3055+ name = 'db'
3056+ interface = 'mysql'
3057+ required_keys = ['host', 'user', 'password', 'database']
3058+
3059+
3060+class HttpRelation(RelationContext):
3061+ """
3062+ Relation context for the `http` interface.
3063+
3064+ :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
3065+ :param list additional_required_keys: Extend the list of :attr:`required_keys`
3066+ """
3067+ name = 'website'
3068+ interface = 'http'
3069+ required_keys = ['host', 'port']
3070+
3071+ def provide_data(self):
3072+ return {
3073+ 'host': hookenv.unit_get('private-address'),
3074+ 'port': 80,
3075+ }
3076+
3077+
3078+class RequiredConfig(dict):
3079+ """
3080+ Data context that loads config options with one or more mandatory options.
3081+
3082+ Once the required options have been changed from their default values, all
3083+ config options will be available, namespaced under `config` to prevent
3084+ potential naming conflicts (for example, between a config option and a
3085+ relation property).
3086+
3087+ :param list *args: List of options that must be changed from their default values.
3088+ """
3089+
3090+ def __init__(self, *args):
3091+ self.required_options = args
3092+ self['config'] = hookenv.config()
3093+ with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
3094+ self.config = yaml.load(fp).get('options', {})
3095+
3096+ def __bool__(self):
3097+ for option in self.required_options:
3098+ if option not in self['config']:
3099+ return False
3100+ current_value = self['config'][option]
3101+ default_value = self.config[option].get('default')
3102+ if current_value == default_value:
3103+ return False
3104+ if current_value in (None, '') and default_value in (None, ''):
3105+ return False
3106+ return True
3107+
3108+ def __nonzero__(self):
3109+ return self.__bool__()
3110+
3111+
3112+class StoredContext(dict):
3113+ """
3114+ A data context that always returns the data that it was first created with.
3115+
3116+ This is useful to do a one-time generation of things like passwords, that
3117+ will thereafter use the same value that was originally generated, instead
3118+ of generating a new value each time it is run.
3119+ """
3120+ def __init__(self, file_name, config_data):
3121+ """
3122+ If the file exists, populate `self` with the data from the file.
3123+ Otherwise, populate with the given data and persist it to the file.
3124+ """
3125+ if os.path.exists(file_name):
3126+ self.update(self.read_context(file_name))
3127+ else:
3128+ self.store_context(file_name, config_data)
3129+ self.update(config_data)
3130+
3131+ def store_context(self, file_name, config_data):
3132+ if not os.path.isabs(file_name):
3133+ file_name = os.path.join(hookenv.charm_dir(), file_name)
3134+ with open(file_name, 'w') as file_stream:
3135+ os.fchmod(file_stream.fileno(), 0600)
3136+ yaml.dump(config_data, file_stream)
3137+
3138+ def read_context(self, file_name):
3139+ if not os.path.isabs(file_name):
3140+ file_name = os.path.join(hookenv.charm_dir(), file_name)
3141+ with open(file_name, 'r') as file_stream:
3142+ data = yaml.load(file_stream)
3143+ if not data:
3144+ raise OSError("%s is empty" % file_name)
3145+ return data
3146+
3147+
3148+class TemplateCallback(ManagerCallback):
3149+ """
3150+ Callback class that will render a Jinja2 template, for use as a ready action.
3151+
3152+ :param str source: The template source file, relative to `$CHARM_DIR/templates`
3153+ :param str target: The target to write the rendered template to
3154+ :param str owner: The owner of the rendered file
3155+ :param str group: The group of the rendered file
3156+ :param int perms: The permissions of the rendered file
3157+ """
3158+ def __init__(self, source, target, owner='root', group='root', perms=0444):
3159+ self.source = source
3160+ self.target = target
3161+ self.owner = owner
3162+ self.group = group
3163+ self.perms = perms
3164+
3165+ def __call__(self, manager, service_name, event_name):
3166+ service = manager.get_service(service_name)
3167+ context = {}
3168+ for ctx in service.get('required_data', []):
3169+ context.update(ctx)
3170+ templating.render(self.source, self.target, context,
3171+ self.owner, self.group, self.perms)
3172+
3173+
3174+# Convenience aliases for templates
3175+render_template = template = TemplateCallback
3176
3177=== added file 'hooks/charmhelpers/core/templating.py'
3178--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
3179+++ hooks/charmhelpers/core/templating.py 2014-10-06 15:38:41 +0000
3180@@ -0,0 +1,51 @@
3181+import os
3182+
3183+from charmhelpers.core import host
3184+from charmhelpers.core import hookenv
3185+
3186+
3187+def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
3188+ """
3189+ Render a template.
3190+
3191+ The `source` path, if not absolute, is relative to the `templates_dir`.
3192+
3193+ The `target` path should be absolute.
3194+
3195+ The context should be a dict containing the values to be replaced in the
3196+ template.
3197+
3198+ The `owner`, `group`, and `perms` options will be passed to `write_file`.
3199+
3200+ If omitted, `templates_dir` defaults to the `templates` folder in the charm.
3201+
3202+ Note: Using this requires python-jinja2; if it is not installed, calling
3203+ this will attempt to use charmhelpers.fetch.apt_install to install it.
3204+ """
3205+ try:
3206+ from jinja2 import FileSystemLoader, Environment, exceptions
3207+ except ImportError:
3208+ try:
3209+ from charmhelpers.fetch import apt_install
3210+ except ImportError:
3211+ hookenv.log('Could not import jinja2, and could not import '
3212+ 'charmhelpers.fetch to install it',
3213+ level=hookenv.ERROR)
3214+ raise
3215+ apt_install('python-jinja2', fatal=True)
3216+ from jinja2 import FileSystemLoader, Environment, exceptions
3217+
3218+ if templates_dir is None:
3219+ templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
3220+ loader = Environment(loader=FileSystemLoader(templates_dir))
3221+ try:
3222+ source = source
3223+ template = loader.get_template(source)
3224+ except exceptions.TemplateNotFound as e:
3225+ hookenv.log('Could not load template %s from %s.' %
3226+ (source, templates_dir),
3227+ level=hookenv.ERROR)
3228+ raise e
3229+ content = template.render(context)
3230+ host.mkdir(os.path.dirname(target))
3231+ host.write_file(target, content, owner, group, perms)
3232
3233=== modified file 'hooks/charmhelpers/fetch/__init__.py'
3234--- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:17:33 +0000
3235+++ hooks/charmhelpers/fetch/__init__.py 2014-10-06 15:38:41 +0000
3236@@ -1,4 +1,5 @@
3237 import importlib
3238+from tempfile import NamedTemporaryFile
3239 import time
3240 from yaml import safe_load
3241 from charmhelpers.core.host import (
3242@@ -13,7 +14,6 @@
3243 config,
3244 log,
3245 )
3246-import apt_pkg
3247 import os
3248
3249
3250@@ -56,6 +56,15 @@
3251 'icehouse/proposed': 'precise-proposed/icehouse',
3252 'precise-icehouse/proposed': 'precise-proposed/icehouse',
3253 'precise-proposed/icehouse': 'precise-proposed/icehouse',
3254+ # Juno
3255+ 'juno': 'trusty-updates/juno',
3256+ 'trusty-juno': 'trusty-updates/juno',
3257+ 'trusty-juno/updates': 'trusty-updates/juno',
3258+ 'trusty-updates/juno': 'trusty-updates/juno',
3259+ 'juno/proposed': 'trusty-proposed/juno',
3260+ 'juno/proposed': 'trusty-proposed/juno',
3261+ 'trusty-juno/proposed': 'trusty-proposed/juno',
3262+ 'trusty-proposed/juno': 'trusty-proposed/juno',
3263 }
3264
3265 # The order of this list is very important. Handlers should be listed in from
3266@@ -108,8 +117,12 @@
3267
3268 def filter_installed_packages(packages):
3269 """Returns a list of packages that require installation"""
3270+<<<<<<< TREE
3271
3272 cache = apt_cache()
3273+=======
3274+ cache = apt_cache()
3275+>>>>>>> MERGE-SOURCE
3276 _pkgs = []
3277 for package in packages:
3278 try:
3279@@ -122,15 +135,28 @@
3280 return _pkgs
3281
3282
3283-def apt_cache(in_memory=True):
3284- """Build and return an apt cache"""
3285- apt_pkg.init()
3286- if in_memory:
3287- apt_pkg.config.set("Dir::Cache::pkgcache", "")
3288- apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3289- return apt_pkg.Cache()
3290-
3291-
3292+<<<<<<< TREE
3293+def apt_cache(in_memory=True):
3294+ """Build and return an apt cache"""
3295+ apt_pkg.init()
3296+ if in_memory:
3297+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
3298+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3299+ return apt_pkg.Cache()
3300+
3301+
3302+=======
3303+def apt_cache(in_memory=True):
3304+ """Build and return an apt cache"""
3305+ import apt_pkg
3306+ apt_pkg.init()
3307+ if in_memory:
3308+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
3309+ apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
3310+ return apt_pkg.Cache()
3311+
3312+
3313+>>>>>>> MERGE-SOURCE
3314 def apt_install(packages, options=None, fatal=False):
3315 """Install one or more packages"""
3316 if options is None:
3317@@ -196,6 +222,28 @@
3318
3319
3320 def add_source(source, key=None):
3321+ """Add a package source to this system.
3322+
3323+ @param source: a URL or sources.list entry, as supported by
3324+ add-apt-repository(1). Examples::
3325+
3326+ ppa:charmers/example
3327+ deb https://stub:key@private.example.com/ubuntu trusty main
3328+
3329+ In addition:
3330+ 'proposed:' may be used to enable the standard 'proposed'
3331+ pocket for the release.
3332+ 'cloud:' may be used to activate official cloud archive pockets,
3333+ such as 'cloud:icehouse'
3334+
3335+ @param key: A key to be added to the system's APT keyring and used
3336+ to verify the signatures on packages. Ideally, this should be an
3337+ ASCII format GPG public key including the block headers. A GPG key
3338+ id may also be used, but be aware that only insecure protocols are
3339+ available to retrieve the actual public key from a public keyserver
3340+ placing your Juju environment at risk. ppa and cloud archive keys
3341+ are securely added automtically, so sould not be provided.
3342+ """
3343 if source is None:
3344 log('Source is not present. Skipping')
3345 return
3346@@ -220,61 +268,96 @@
3347 release = lsb_release()['DISTRIB_CODENAME']
3348 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
3349 apt.write(PROPOSED_POCKET.format(release))
3350+ else:
3351+ raise SourceConfigError("Unknown source: {!r}".format(source))
3352+
3353 if key:
3354- subprocess.check_call(['apt-key', 'adv', '--keyserver',
3355- 'hkp://keyserver.ubuntu.com:80', '--recv',
3356- key])
3357+ if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
3358+ with NamedTemporaryFile() as key_file:
3359+ key_file.write(key)
3360+ key_file.flush()
3361+ key_file.seek(0)
3362+ subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
3363+ else:
3364+ # Note that hkp: is in no way a secure protocol. Using a
3365+ # GPG key id is pointless from a security POV unless you
3366+ # absolutely trust your network and DNS.
3367+ subprocess.check_call(['apt-key', 'adv', '--keyserver',
3368+ 'hkp://keyserver.ubuntu.com:80', '--recv',
3369+ key])
3370
3371
3372 def configure_sources(update=False,
3373 sources_var='install_sources',
3374 keys_var='install_keys'):
3375 """
3376- Configure multiple sources from charm configuration
3377+ Configure multiple sources from charm configuration.
3378+
3379+ The lists are encoded as yaml fragments in the configuration.
3380+ The frament needs to be included as a string. Sources and their
3381+ corresponding keys are of the types supported by add_source().
3382
3383 Example config:
3384- install_sources:
3385+ install_sources: |
3386 - "ppa:foo"
3387 - "http://example.com/repo precise main"
3388- install_keys:
3389+ install_keys: |
3390 - null
3391 - "a1b2c3d4"
3392
3393 Note that 'null' (a.k.a. None) should not be quoted.
3394 """
3395- sources = safe_load(config(sources_var))
3396- keys = config(keys_var)
3397- if keys is not None:
3398- keys = safe_load(keys)
3399- if isinstance(sources, basestring) and (
3400- keys is None or isinstance(keys, basestring)):
3401- add_source(sources, keys)
3402+ sources = safe_load((config(sources_var) or '').strip()) or []
3403+ keys = safe_load((config(keys_var) or '').strip()) or None
3404+
3405+ if isinstance(sources, basestring):
3406+ sources = [sources]
3407+
3408+ if keys is None:
3409+ for source in sources:
3410+ add_source(source, None)
3411 else:
3412- if not len(sources) == len(keys):
3413- msg = 'Install sources and keys lists are different lengths'
3414- raise SourceConfigError(msg)
3415- for src_num in range(len(sources)):
3416- add_source(sources[src_num], keys[src_num])
3417+ if isinstance(keys, basestring):
3418+ keys = [keys]
3419+
3420+ if len(sources) != len(keys):
3421+ raise SourceConfigError(
3422+ 'Install sources and keys lists are different lengths')
3423+ for source, key in zip(sources, keys):
3424+ add_source(source, key)
3425 if update:
3426 apt_update(fatal=True)
3427
3428
3429-def install_remote(source):
3430+def install_remote(source, *args, **kwargs):
3431 """
3432 Install a file tree from a remote source
3433
3434 The specified source should be a url of the form:
3435 scheme://[host]/path[#[option=value][&...]]
3436
3437- Schemes supported are based on this modules submodules
3438- Options supported are submodule-specific"""
3439+ Schemes supported are based on this modules submodules.
3440+ Options supported are submodule-specific.
3441+ Additional arguments are passed through to the submodule.
3442+
3443+ For example::
3444+
3445+ dest = install_remote('http://example.com/archive.tgz',
3446+ checksum='deadbeef',
3447+ hash_type='sha1')
3448+
3449+ This will download `archive.tgz`, validate it using SHA1 and, if
3450+ the file is ok, extract it and return the directory in which it
3451+ was extracted. If the checksum fails, it will raise
3452+ :class:`charmhelpers.core.host.ChecksumError`.
3453+ """
3454 # We ONLY check for True here because can_handle may return a string
3455 # explaining why it can't handle a given source.
3456 handlers = [h for h in plugins() if h.can_handle(source) is True]
3457 installed_to = None
3458 for handler in handlers:
3459 try:
3460- installed_to = handler.install(source)
3461+ installed_to = handler.install(source, *args, **kwargs)
3462 except UnhandledSource:
3463 pass
3464 if not installed_to:
3465
3466=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
3467--- hooks/charmhelpers/fetch/archiveurl.py 2014-03-20 13:47:46 +0000
3468+++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-06 15:38:41 +0000
3469@@ -1,6 +1,8 @@
3470 import os
3471 import urllib2
3472+from urllib import urlretrieve
3473 import urlparse
3474+import hashlib
3475
3476 from charmhelpers.fetch import (
3477 BaseFetchHandler,
3478@@ -10,11 +12,19 @@
3479 get_archive_handler,
3480 extract,
3481 )
3482-from charmhelpers.core.host import mkdir
3483+from charmhelpers.core.host import mkdir, check_hash
3484
3485
3486 class ArchiveUrlFetchHandler(BaseFetchHandler):
3487- """Handler for archives via generic URLs"""
3488+ """
3489+ Handler to download archive files from arbitrary URLs.
3490+
3491+ Can fetch from http, https, ftp, and file URLs.
3492+
3493+ Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
3494+
3495+ Installs the contents of the archive in $CHARM_DIR/fetched/.
3496+ """
3497 def can_handle(self, source):
3498 url_parts = self.parse_url(source)
3499 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
3500@@ -24,6 +34,12 @@
3501 return False
3502
3503 def download(self, source, dest):
3504+ """
3505+ Download an archive file.
3506+
3507+ :param str source: URL pointing to an archive file.
3508+ :param str dest: Local path location to download archive file to.
3509+ """
3510 # propogate all exceptions
3511 # URLError, OSError, etc
3512 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
3513@@ -48,7 +64,30 @@
3514 os.unlink(dest)
3515 raise e
3516
3517- def install(self, source):
3518+ # Mandatory file validation via Sha1 or MD5 hashing.
3519+ def download_and_validate(self, url, hashsum, validate="sha1"):
3520+ tempfile, headers = urlretrieve(url)
3521+ check_hash(tempfile, hashsum, validate)
3522+ return tempfile
3523+
3524+ def install(self, source, dest=None, checksum=None, hash_type='sha1'):
3525+ """
3526+ Download and install an archive file, with optional checksum validation.
3527+
3528+ The checksum can also be given on the `source` URL's fragment.
3529+ For example::
3530+
3531+ handler.install('http://example.com/file.tgz#sha1=deadbeef')
3532+
3533+ :param str source: URL pointing to an archive file.
3534+ :param str dest: Local destination path to install to. If not given,
3535+ installs to `$CHARM_DIR/archives/archive_file_name`.
3536+ :param str checksum: If given, validate the archive file after download.
3537+ :param str hash_type: Algorithm used to generate `checksum`.
3538+ Can be any hash alrgorithm supported by :mod:`hashlib`,
3539+ such as md5, sha1, sha256, sha512, etc.
3540+
3541+ """
3542 url_parts = self.parse_url(source)
3543 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
3544 if not os.path.exists(dest_dir):
3545@@ -60,4 +99,10 @@
3546 raise UnhandledSource(e.reason)
3547 except OSError as e:
3548 raise UnhandledSource(e.strerror)
3549- return extract(dld_file)
3550+ options = urlparse.parse_qs(url_parts.fragment)
3551+ for key, value in options.items():
3552+ if key in hashlib.algorithms:
3553+ check_hash(dld_file, value, key)
3554+ if checksum:
3555+ check_hash(dld_file, checksum, hash_type)
3556+ return extract(dld_file, dest)
3557
3558=== modified file 'hooks/swift_context.py'
3559--- hooks/swift_context.py 2014-04-10 16:52:10 +0000
3560+++ hooks/swift_context.py 2014-10-06 15:38:41 +0000
3561@@ -4,7 +4,8 @@
3562 relation_ids,
3563 related_units,
3564 relation_get,
3565- unit_get
3566+ unit_get,
3567+ service_name
3568 )
3569
3570 from charmhelpers.contrib.openstack.context import (
3571@@ -19,9 +20,14 @@
3572 determine_apache_port,
3573 )
3574
3575+from charmhelpers.contrib.network.ip import (
3576+ get_ipv6_addr
3577+)
3578+
3579 from charmhelpers.contrib.openstack.utils import get_host_ip
3580 import subprocess
3581 import os
3582+import uuid
3583
3584
3585 from charmhelpers.contrib.hahelpers.apache import (
3586@@ -116,7 +122,11 @@
3587 for relid in relation_ids('swift-storage'):
3588 for unit in related_units(relid):
3589 host = relation_get('private-address', unit, relid)
3590- allowed_hosts.append(get_host_ip(host))
3591+ if config('prefer-ipv6'):
3592+ host_ip = get_ipv6_addr(exc_list=[config('vip')])[0]
3593+ else:
3594+ host_ip = get_host_ip(host)
3595+ allowed_hosts.append(host_ip)
3596
3597 ctxt = {
3598 'www_dir': WWW_DIR,
3599@@ -134,12 +144,21 @@
3600 if workers == '0':
3601 import multiprocessing
3602 workers = multiprocessing.cpu_count()
3603+ if config('prefer-ipv6'):
3604+ proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0]
3605+ memcached_ip = 'ip6-localhost'
3606+ else:
3607+ proxy_ip = get_host_ip(unit_get('private-address'))
3608+ memcached_ip = get_host_ip(unit_get('private-address'))
3609 ctxt = {
3610- 'proxy_ip': get_host_ip(unit_get('private-address')),
3611+ 'proxy_ip': proxy_ip,
3612+ 'memcached_ip': memcached_ip,
3613 'bind_port': determine_api_port(bind_port),
3614 'workers': workers,
3615 'operator_roles': config('operator-roles'),
3616- 'delay_auth_decision': config('delay-auth-decision')
3617+ 'delay_auth_decision': config('delay-auth-decision'),
3618+ 'node_timeout': config('node-timeout'),
3619+ 'recoverable_node_timeout': config('recoverable-node-timeout'),
3620 }
3621
3622 ctxt['ssl'] = False
3623@@ -194,9 +213,11 @@
3624 class MemcachedContext(OSContextGenerator):
3625
3626 def __call__(self):
3627- ctxt = {
3628- 'proxy_ip': get_host_ip(unit_get('private-address'))
3629- }
3630+ ctxt = {}
3631+ if config('prefer-ipv6'):
3632+ ctxt['memcached_ip'] = 'ip6-localhost'
3633+ else:
3634+ ctxt['memcached_ip'] = get_host_ip(unit_get('private-address'))
3635 return ctxt
3636
3637 SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
3638@@ -211,10 +232,8 @@
3639 with open(SWIFT_HASH_FILE, 'w') as hashfile:
3640 hashfile.write(swift_hash)
3641 else:
3642- cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n']
3643- rand = open('/dev/random', 'r')
3644- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand)
3645- swift_hash = p.communicate()[0].strip()
3646+ swift_hash = str(uuid.uuid3(uuid.UUID(os.environ.get("JUJU_ENV_UUID")),
3647+ service_name()))
3648 with open(SWIFT_HASH_FILE, 'w') as hashfile:
3649 hashfile.write(swift_hash)
3650 return swift_hash
3651
3652=== modified file 'hooks/swift_hooks.py'
3653--- hooks/swift_hooks.py 2014-04-10 16:52:10 +0000
3654+++ hooks/swift_hooks.py 2014-10-06 15:38:41 +0000
3655@@ -24,7 +24,8 @@
3656 add_to_ring,
3657 should_balance,
3658 do_openstack_upgrade,
3659- write_rc_script
3660+ write_rc_script,
3661+ setup_ipv6
3662 )
3663 from swift_context import get_swift_hash
3664
3665@@ -48,6 +49,19 @@
3666 )
3667 from charmhelpers.payload.execd import execd_preinstall
3668
3669+from charmhelpers.contrib.openstack.ip import (
3670+ canonical_url,
3671+ PUBLIC, INTERNAL, ADMIN
3672+)
3673+from charmhelpers.contrib.network.ip import (
3674+ get_iface_for_address,
3675+ get_netmask_for_address,
3676+ get_address_in_network,
3677+ get_ipv6_addr,
3678+ format_ipv6_addr,
3679+ is_ipv6
3680+)
3681+
3682 extra_pkgs = [
3683 "haproxy",
3684 "python-jinja2"
3685@@ -71,7 +85,6 @@
3686 pkgs = determine_packages(rel)
3687 apt_install(pkgs, fatal=True)
3688 apt_install(extra_pkgs, fatal=True)
3689-
3690 ensure_swift_dir()
3691 # initialize new storage rings.
3692 for ring in SWIFT_RINGS.iteritems():
3693@@ -92,20 +105,16 @@
3694 def keystone_joined(relid=None):
3695 if not cluster.eligible_leader(SWIFT_HA_RES):
3696 return
3697- if cluster.is_clustered():
3698- hostname = config('vip')
3699- else:
3700- hostname = unit_get('private-address')
3701 port = config('bind-port')
3702- if cluster.https():
3703- proto = 'https'
3704- else:
3705- proto = 'http'
3706- admin_url = '%s://%s:%s' % (proto, hostname, port)
3707- internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
3708+ admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
3709+ internal_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
3710+ (canonical_url(CONFIGS, INTERNAL), port)
3711+ public_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
3712+ (canonical_url(CONFIGS, PUBLIC), port)
3713 relation_set(service='swift',
3714 region=config('region'),
3715- public_url=public_url, internal_url=internal_url,
3716+ public_url=public_url,
3717+ internal_url=internal_url,
3718 admin_url=admin_url,
3719 requested_roles=config('operator-roles'),
3720 relation_id=relid)
3721@@ -142,9 +151,12 @@
3722
3723 if cluster.is_clustered():
3724 hostname = config('vip')
3725+ elif config('prefer-ipv6'):
3726+ hostname = get_ipv6_addr(exc_list=[config('vip')])[0]
3727 else:
3728 hostname = unit_get('private-address')
3729
3730+ hostname = format_ipv6_addr(hostname) or hostname
3731 rings_url = 'http://%s/%s' % (hostname, path)
3732 # notify storage nodes that there is a new ring to fetch.
3733 for relid in relation_ids('swift-storage'):
3734@@ -157,9 +169,14 @@
3735 @hooks.hook('swift-storage-relation-changed')
3736 @restart_on_change(restart_map())
3737 def storage_changed():
3738+ if config('prefer-ipv6'):
3739+ host_ip = '[%s]' % relation_get('private-address')
3740+ else:
3741+ host_ip = openstack.get_host_ip(relation_get('private-address'))
3742+
3743 zone = get_zone(config('zone-assignment'))
3744 node_settings = {
3745- 'ip': openstack.get_host_ip(relation_get('private-address')),
3746+ 'ip': host_ip,
3747 'zone': zone,
3748 'account_port': relation_get('account_port'),
3749 'object_port': relation_get('object_port'),
3750@@ -195,16 +212,33 @@
3751 @hooks.hook('config-changed')
3752 @restart_on_change(restart_map())
3753 def config_changed():
3754+ if config('prefer-ipv6'):
3755+ setup_ipv6()
3756+
3757 configure_https()
3758 open_port(config('bind-port'))
3759 # Determine whether or not we should do an upgrade, based on the
3760 # the version offered in keyston-release.
3761 if (openstack.openstack_upgrade_available('python-swift')):
3762 do_openstack_upgrade(CONFIGS)
3763-
3764-
3765-@hooks.hook('cluster-relation-changed',
3766- 'cluster-relation-joined')
3767+ for r_id in relation_ids('identity-service'):
3768+ keystone_joined(relid=r_id)
3769+
3770+
3771+@hooks.hook('cluster-relation-joined')
3772+def cluster_joined(relation_id=None):
3773+ if config('prefer-ipv6'):
3774+ private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
3775+ else:
3776+ private_addr = unit_get('private-address')
3777+
3778+ address = get_address_in_network(config('os-internal-network'),
3779+ private_addr)
3780+ relation_set(relation_id=relation_id,
3781+ relation_settings={'private-address': address})
3782+
3783+
3784+@hooks.hook('cluster-relation-changed')
3785 @restart_on_change(restart_map())
3786 def cluster_changed():
3787 CONFIGS.write_all()
3788@@ -229,8 +263,6 @@
3789 corosync_bindiface = config('ha-bindiface')
3790 corosync_mcastport = config('ha-mcastport')
3791 vip = config('vip')
3792- vip_cidr = config('vip_cidr')
3793- vip_iface = config('vip_iface')
3794 if not vip:
3795 log('Unable to configure hacluster as vip not provided',
3796 level=ERROR)
3797@@ -238,14 +270,37 @@
3798
3799 # Obtain resources
3800 resources = {
3801- 'res_swift_vip': 'ocf:heartbeat:IPaddr2',
3802 'res_swift_haproxy': 'lsb:haproxy'
3803 }
3804 resource_params = {
3805- 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
3806- (vip, vip_cidr, vip_iface),
3807 'res_swift_haproxy': 'op monitor interval="5s"'
3808 }
3809+
3810+ vip_group = []
3811+ for vip in vip.split():
3812+ if is_ipv6(vip):
3813+ res_swift_vip = 'ocf:heartbeat:IPv6addr'
3814+ vip_params = 'ipv6addr'
3815+ else:
3816+ res_swift_vip = 'ocf:heartbeat:IPaddr2'
3817+ vip_params = 'ip'
3818+
3819+ iface = get_iface_for_address(vip)
3820+ if iface is not None:
3821+ vip_key = 'res_swift_{}_vip'.format(iface)
3822+ resources[vip_key] = res_swift_vip
3823+ resource_params[vip_key] = (
3824+ 'params {ip}="{vip}" cidr_netmask="{netmask}"'
3825+ ' nic="{iface}"'.format(ip=vip_params,
3826+ vip=vip,
3827+ iface=iface,
3828+ netmask=get_netmask_for_address(vip))
3829+ )
3830+ vip_group.append(vip_key)
3831+
3832+ if len(vip_group) >= 1:
3833+ relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
3834+
3835 init_services = {
3836 'res_swift_haproxy': 'haproxy'
3837 }
3838
3839=== modified file 'hooks/swift_utils.py'
3840--- hooks/swift_utils.py 2014-08-11 08:59:49 +0000
3841+++ hooks/swift_utils.py 2014-10-06 15:38:41 +0000
3842@@ -12,7 +12,13 @@
3843 )
3844 from charmhelpers.fetch import (
3845 apt_update,
3846- apt_upgrade
3847+ apt_upgrade,
3848+ apt_install,
3849+ add_source
3850+)
3851+
3852+from charmhelpers.core.host import (
3853+ lsb_release
3854 )
3855
3856 import charmhelpers.contrib.openstack.context as context
3857@@ -63,7 +69,7 @@
3858 # > Folsom specific packages
3859 FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
3860
3861-SWIFT_HA_RES = 'res_swift_vip'
3862+SWIFT_HA_RES = 'grp_swift_vips'
3863
3864 TEMPLATES = 'templates/'
3865
3866@@ -75,7 +81,8 @@
3867 'services': ['swift-proxy'],
3868 }),
3869 (SWIFT_PROXY_CONF, {
3870- 'hook_contexts': [swift_context.SwiftIdentityContext()],
3871+ 'hook_contexts': [swift_context.SwiftIdentityContext(),
3872+ context.BindHostContext()],
3873 'services': ['swift-proxy'],
3874 }),
3875 (HAPROXY_CONF, {
3876@@ -368,3 +375,19 @@
3877 apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
3878 configs.set_release(openstack_release=new_os_rel)
3879 configs.write_all()
3880+
3881+
3882+def setup_ipv6():
3883+ ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
3884+ if ubuntu_rel < "trusty":
3885+ raise Exception("IPv6 is not supported in the charms for Ubuntu "
3886+ "versions less than Trusty 14.04")
3887+
3888+ # NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports
3889+ # to support ipv6 address, so check is required to make sure not
3890+ # breaking other versions, IPv6 only support for >= Trusty
3891+ if ubuntu_rel == 'trusty':
3892+ add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports'
3893+ ' main')
3894+ apt_update()
3895+ apt_install('haproxy/trusty-backports', fatal=True)
3896
3897=== modified file 'revision'
3898--- revision 2013-09-27 12:02:37 +0000
3899+++ revision 2014-10-06 15:38:41 +0000
3900@@ -1,1 +1,1 @@
3901-146
3902+147
3903
3904=== modified file 'templates/essex/proxy-server.conf'
3905--- templates/essex/proxy-server.conf 2014-02-27 12:17:53 +0000
3906+++ templates/essex/proxy-server.conf 2014-10-06 15:38:41 +0000
3907@@ -19,6 +19,8 @@
3908 use = egg:swift#proxy
3909 allow_account_management = true
3910 {% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
3911+node_timeout = {{ node_timeout }}
3912+recoverable_node_timeout = {{ recoverable_node_timeout }}
3913
3914 [filter:tempauth]
3915 use = egg:swift#tempauth
3916
3917=== modified file 'templates/grizzly/proxy-server.conf'
3918--- templates/grizzly/proxy-server.conf 2014-03-27 11:23:24 +0000
3919+++ templates/grizzly/proxy-server.conf 2014-10-06 15:38:41 +0000
3920@@ -19,6 +19,8 @@
3921 use = egg:swift#proxy
3922 allow_account_management = true
3923 {% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
3924+node_timeout = {{ node_timeout }}
3925+recoverable_node_timeout = {{ recoverable_node_timeout }}
3926
3927 [filter:tempauth]
3928 use = egg:swift#tempauth
3929
3930=== modified file 'templates/havana/proxy-server.conf'
3931--- templates/havana/proxy-server.conf 2014-03-27 11:23:24 +0000
3932+++ templates/havana/proxy-server.conf 2014-10-06 15:38:41 +0000
3933@@ -19,6 +19,8 @@
3934 use = egg:swift#proxy
3935 allow_account_management = true
3936 {% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
3937+node_timeout = {{ node_timeout }}
3938+recoverable_node_timeout = {{ recoverable_node_timeout }}
3939
3940 [filter:tempauth]
3941 use = egg:swift#tempauth
3942
3943=== modified file 'templates/icehouse/proxy-server.conf'
3944--- templates/icehouse/proxy-server.conf 2014-04-07 14:44:39 +0000
3945+++ templates/icehouse/proxy-server.conf 2014-10-06 15:38:41 +0000
3946@@ -2,6 +2,7 @@
3947 bind_port = {{ bind_port }}
3948 workers = {{ workers }}
3949 user = swift
3950+bind_ip = {{ bind_host }}
3951 {% if ssl %}
3952 cert_file = {{ ssl_cert }}
3953 key_file = {{ ssl_key }}
3954@@ -19,6 +20,8 @@
3955 use = egg:swift#proxy
3956 allow_account_management = true
3957 {% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
3958+node_timeout = {{ node_timeout }}
3959+recoverable_node_timeout = {{ recoverable_node_timeout }}
3960
3961 [filter:tempauth]
3962 use = egg:swift#tempauth
3963@@ -29,7 +32,7 @@
3964
3965 [filter:cache]
3966 use = egg:swift#memcache
3967-memcache_servers = {{ proxy_ip }}:11211
3968+memcache_servers = {{ memcached_ip }}:11211
3969
3970 [filter:account-quotas]
3971 use = egg:swift#account_quotas
3972
3973=== modified file 'templates/memcached.conf'
3974--- templates/memcached.conf 2013-09-27 12:02:37 +0000
3975+++ templates/memcached.conf 2014-10-06 15:38:41 +0000
3976@@ -32,7 +32,7 @@
3977 # Specify which IP address to listen on. The default is to listen on all IP addresses
3978 # This parameter is one of the only security measures that memcached has, so make sure
3979 # it's listening on a firewalled interface.
3980--l {{ proxy_ip }}
3981+-l {{ memcached_ip }}
3982
3983 # Limit the number of simultaneous incoming connections. The daemon default is 1024
3984 # -c 1024
3985
3986=== added directory 'tests'
3987=== added file 'tests/00-setup'
3988--- tests/00-setup 1970-01-01 00:00:00 +0000
3989+++ tests/00-setup 2014-10-06 15:38:41 +0000
3990@@ -0,0 +1,11 @@
3991+#!/bin/bash
3992+
3993+set -ex
3994+
3995+sudo add-apt-repository --yes ppa:juju/stable
3996+sudo apt-get update --yes
3997+sudo apt-get install --yes python-amulet
3998+sudo apt-get install --yes python-swiftclient
3999+sudo apt-get install --yes python-glanceclient
4000+sudo apt-get install --yes python-keystoneclient
4001+sudo apt-get install --yes python-novaclient
4002
4003=== added file 'tests/10-basic-precise-essex'
4004--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
4005+++ tests/10-basic-precise-essex 2014-10-06 15:38:41 +0000
4006@@ -0,0 +1,9 @@
4007+#!/usr/bin/python
4008+
4009+"""Amulet tests on a basic swift-proxy deployment on precise-essex."""
4010+
4011+from basic_deployment import SwiftProxyBasicDeployment
4012+
4013+if __name__ == '__main__':
4014+ deployment = SwiftProxyBasicDeployment(series='precise')
4015+ deployment.run_tests()
4016
4017=== added file 'tests/11-basic-precise-folsom'
4018--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
4019+++ tests/11-basic-precise-folsom 2014-10-06 15:38:41 +0000
4020@@ -0,0 +1,11 @@
4021+#!/usr/bin/python
4022+
4023+"""Amulet tests on a basic swift-proxy deployment on precise-folsom."""
4024+
4025+from basic_deployment import SwiftProxyBasicDeployment
4026+
4027+if __name__ == '__main__':
4028+ deployment = SwiftProxyBasicDeployment(series='precise',
4029+ openstack='cloud:precise-folsom',
4030+ source='cloud:precise-updates/folsom')
4031+ deployment.run_tests()
4032
4033=== added file 'tests/12-basic-precise-grizzly'
4034--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
4035+++ tests/12-basic-precise-grizzly 2014-10-06 15:38:41 +0000
4036@@ -0,0 +1,11 @@
4037+#!/usr/bin/python
4038+
4039+"""Amulet tests on a basic swift-proxy deployment on precise-grizzly."""
4040+
4041+from basic_deployment import SwiftProxyBasicDeployment
4042+
4043+if __name__ == '__main__':
4044+ deployment = SwiftProxyBasicDeployment(series='precise',
4045+ openstack='cloud:precise-grizzly',
4046+ source='cloud:precise-updates/grizzly')
4047+ deployment.run_tests()
4048
4049=== added file 'tests/13-basic-precise-havana'
4050--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
4051+++ tests/13-basic-precise-havana 2014-10-06 15:38:41 +0000
4052@@ -0,0 +1,11 @@
4053+#!/usr/bin/python
4054+
4055+"""Amulet tests on a basic swift-proxy deployment on precise-havana."""
4056+
4057+from basic_deployment import SwiftProxyBasicDeployment
4058+
4059+if __name__ == '__main__':
4060+ deployment = SwiftProxyBasicDeployment(series='precise',
4061+ openstack='cloud:precise-havana',
4062+ source='cloud:precise-updates/havana')
4063+ deployment.run_tests()
4064
4065=== added file 'tests/14-basic-precise-icehouse'
4066--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
4067+++ tests/14-basic-precise-icehouse 2014-10-06 15:38:41 +0000
4068@@ -0,0 +1,11 @@
4069+#!/usr/bin/python
4070+
4071+"""Amulet tests on a basic swift-proxy deployment on precise-icehouse."""
4072+
4073+from basic_deployment import SwiftProxyBasicDeployment
4074+
4075+if __name__ == '__main__':
4076+ deployment = SwiftProxyBasicDeployment(series='precise',
4077+ openstack='cloud:precise-icehouse',
4078+ source='cloud:precise-updates/icehouse')
4079+ deployment.run_tests()
4080
4081=== added file 'tests/15-basic-trusty-icehouse'
4082--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
4083+++ tests/15-basic-trusty-icehouse 2014-10-06 15:38:41 +0000
4084@@ -0,0 +1,9 @@
4085+#!/usr/bin/python
4086+
4087+"""Amulet tests on a basic swift-proxy deployment on trusty-icehouse."""
4088+
4089+from basic_deployment import SwiftProxyBasicDeployment
4090+
4091+if __name__ == '__main__':
4092+ deployment = SwiftProxyBasicDeployment(series='trusty')
4093+ deployment.run_tests()
4094
4095=== added file 'tests/README'
4096--- tests/README 1970-01-01 00:00:00 +0000
4097+++ tests/README 2014-10-06 15:38:41 +0000
4098@@ -0,0 +1,52 @@
4099+This directory provides Amulet tests that focus on verification of swift-proxy
4100+deployments.
4101+
4102+If you use a web proxy server to access the web, you'll need to set the
4103+AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
4104+
4105+The following examples demonstrate different ways that tests can be executed.
4106+All examples are run from the charm's root directory.
4107+
4108+ * To run all tests (starting with 00-setup):
4109+
4110+ make test
4111+
4112+ * To run a specific test module (or modules):
4113+
4114+ juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4115+
4116+ * To run a specific test module (or modules), and keep the environment
4117+ deployed after a failure:
4118+
4119+ juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
4120+
4121+ * To re-run a test module against an already deployed environment (one
4122+ that was deployed by a previous call to 'juju test --set-e'):
4123+
4124+ ./tests/15-basic-trusty-icehouse
4125+
4126+For debugging and test development purposes, all code should be idempotent.
4127+In other words, the code should have the ability to be re-run without changing
4128+the results beyond the initial run. This enables editing and re-running of a
4129+test module against an already deployed environment, as described above.
4130+
4131+Manual debugging tips:
4132+
4133+ * Set the following env vars before using the OpenStack CLI as admin:
4134+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4135+ export OS_TENANT_NAME=admin
4136+ export OS_USERNAME=admin
4137+ export OS_PASSWORD=openstack
4138+ export OS_REGION_NAME=RegionOne
4139+
4140+ * Set the following env vars before using the OpenStack CLI as demoUser:
4141+ export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
4142+ export OS_TENANT_NAME=demoTenant
4143+ export OS_USERNAME=demoUser
4144+ export OS_PASSWORD=password
4145+ export OS_REGION_NAME=RegionOne
4146+
4147+ * Sample swift command:
4148+ swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \
4149+ --os-password password list
4150+ (where tenant/user names and password are in swift-proxy's nova.conf file)
4151
4152=== added file 'tests/basic_deployment.py'
4153--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
4154+++ tests/basic_deployment.py 2014-10-06 15:38:41 +0000
4155@@ -0,0 +1,827 @@
4156+#!/usr/bin/python
4157+
4158+import amulet
4159+import swiftclient
4160+
4161+from charmhelpers.contrib.openstack.amulet.deployment import (
4162+ OpenStackAmuletDeployment
4163+)
4164+
4165+from charmhelpers.contrib.openstack.amulet.utils import (
4166+ OpenStackAmuletUtils,
4167+ DEBUG, # flake8: noqa
4168+ ERROR
4169+)
4170+
4171+# Use DEBUG to turn on debug logging
4172+u = OpenStackAmuletUtils(ERROR)
4173+
4174+
4175+class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
4176+ """Amulet tests on a basic swift-proxy deployment."""
4177+
4178+ def __init__(self, series, openstack=None, source=None):
4179+ """Deploy the entire test environment."""
4180+ super(SwiftProxyBasicDeployment, self).__init__(series, openstack,
4181+ source)
4182+ self._add_services()
4183+ self._add_relations()
4184+ self._configure_services()
4185+ self._deploy()
4186+ self._initialize_tests()
4187+
4188+ def _add_services(self):
4189+ """Add the service that we're testing, including the number of units,
4190+ where swift-proxy is local, and the other charms are from
4191+ the charm store."""
4192+ this_service = ('swift-proxy', 1)
4193+ other_services = [('mysql', 1),
4194+ ('keystone', 1), ('glance', 1), ('swift-storage', 1)]
4195+ super(SwiftProxyBasicDeployment, self)._add_services(this_service,
4196+ other_services)
4197+
4198+ def _add_relations(self):
4199+ """Add all of the relations for the services."""
4200+ relations = {
4201+ 'keystone:shared-db': 'mysql:shared-db',
4202+ 'swift-proxy:identity-service': 'keystone:identity-service',
4203+ 'swift-storage:swift-storage': 'swift-proxy:swift-storage',
4204+ 'glance:identity-service': 'keystone:identity-service',
4205+ 'glance:shared-db': 'mysql:shared-db',
4206+ 'glance:object-store': 'swift-proxy:object-store'
4207+ }
4208+ super(SwiftProxyBasicDeployment, self)._add_relations(relations)
4209+
4210+ def _configure_services(self):
4211+ """Configure all of the services."""
4212+ keystone_config = {'admin-password': 'openstack',
4213+ 'admin-token': 'ubuntutesting'}
4214+ swift_proxy_config = {'zone-assignment': 'manual',
4215+ 'replicas': '1',
4216+ 'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
4217+ 'use-https': 'no'}
4218+ swift_storage_config = {'zone': '1',
4219+ 'block-device': 'vdb',
4220+ 'overwrite': 'true'}
4221+ configs = {'keystone': keystone_config,
4222+ 'swift-proxy': swift_proxy_config,
4223+ 'swift-storage': swift_storage_config}
4224+ super(SwiftProxyBasicDeployment, self)._configure_services(configs)
4225+
4226+ def _initialize_tests(self):
4227+ """Perform final initialization before tests get run."""
4228+ # Access the sentries for inspecting service units
4229+ self.mysql_sentry = self.d.sentry.unit['mysql/0']
4230+ self.keystone_sentry = self.d.sentry.unit['keystone/0']
4231+ self.glance_sentry = self.d.sentry.unit['glance/0']
4232+ self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0']
4233+ self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0']
4234+
4235+ # Authenticate admin with keystone
4236+ self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
4237+ user='admin',
4238+ password='openstack',
4239+ tenant='admin')
4240+
4241+ # Authenticate admin with glance endpoint
4242+ self.glance = u.authenticate_glance_admin(self.keystone)
4243+
4244+ # Authenticate swift user
4245+ keystone_relation = self.keystone_sentry.relation('identity-service',
4246+ 'swift-proxy:identity-service')
4247+ ep = self.keystone.service_catalog.url_for(service_type='identity',
4248+ endpoint_type='publicURL')
4249+ self.swift = swiftclient.Connection(authurl=ep,
4250+ user=keystone_relation['service_username'],
4251+ key=keystone_relation['service_password'],
4252+ tenant_name=keystone_relation['service_tenant'],
4253+ auth_version='2.0')
4254+
4255+ # Create a demo tenant/role/user
4256+ self.demo_tenant = 'demoTenant'
4257+ self.demo_role = 'demoRole'
4258+ self.demo_user = 'demoUser'
4259+ if not u.tenant_exists(self.keystone, self.demo_tenant):
4260+ tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
4261+ description='demo tenant',
4262+ enabled=True)
4263+ self.keystone.roles.create(name=self.demo_role)
4264+ self.keystone.users.create(name=self.demo_user,
4265+ password='password',
4266+ tenant_id=tenant.id,
4267+ email='demo@demo.com')
4268+
4269+ # Authenticate demo user with keystone
4270+ self.keystone_demo = \
4271+ u.authenticate_keystone_user(self.keystone, user=self.demo_user,
4272+ password='password',
4273+ tenant=self.demo_tenant)
4274+
4275+ def test_services(self):
4276+ """Verify the expected services are running on the corresponding
4277+ service units."""
4278+ swift_storage_services = ['status swift-account',
4279+ 'status swift-account-auditor',
4280+ 'status swift-account-reaper',
4281+ 'status swift-account-replicator',
4282+ 'status swift-container',
4283+ 'status swift-container-auditor',
4284+ 'status swift-container-replicator',
4285+ 'status swift-container-updater',
4286+ 'status swift-object',
4287+ 'status swift-object-auditor',
4288+ 'status swift-object-replicator',
4289+ 'status swift-object-updater']
4290+ if self._get_openstack_release() >= self.precise_icehouse:
4291+ swift_storage_services.append('status swift-container-sync')
4292+
4293+ commands = {
4294+ self.mysql_sentry: ['status mysql'],
4295+ self.keystone_sentry: ['status keystone'],
4296+ self.glance_sentry: ['status glance-registry', 'status glance-api'],
4297+ self.swift_proxy_sentry: ['status swift-proxy'],
4298+ self.swift_storage_sentry: swift_storage_services
4299+ }
4300+
4301+ ret = u.validate_services(commands)
4302+ if ret:
4303+ amulet.raise_status(amulet.FAIL, msg=ret)
4304+
4305+ def test_users(self):
4306+ """Verify all existing roles."""
4307+ user1 = {'name': 'demoUser',
4308+ 'enabled': True,
4309+ 'tenantId': u.not_null,
4310+ 'id': u.not_null,
4311+ 'email': 'demo@demo.com'}
4312+ user2 = {'name': 'admin',
4313+ 'enabled': True,
4314+ 'tenantId': u.not_null,
4315+ 'id': u.not_null,
4316+ 'email': 'juju@localhost'}
4317+ user3 = {'name': 'glance',
4318+ 'enabled': True,
4319+ 'tenantId': u.not_null,
4320+ 'id': u.not_null,
4321+ 'email': u'juju@localhost'}
4322+ user4 = {'name': 'swift',
4323+ 'enabled': True,
4324+ 'tenantId': u.not_null,
4325+ 'id': u.not_null,
4326+ 'email': u'juju@localhost'}
4327+ expected = [user1, user2, user3, user4]
4328+ actual = self.keystone.users.list()
4329+
4330+ ret = u.validate_user_data(expected, actual)
4331+ if ret:
4332+ amulet.raise_status(amulet.FAIL, msg=ret)
4333+
4334+ def test_service_catalog(self):
4335+ """Verify that the service catalog endpoint data is valid."""
4336+ endpoint_vol = {'adminURL': u.valid_url,
4337+ 'region': 'RegionOne',
4338+ 'publicURL': u.valid_url,
4339+ 'internalURL': u.valid_url}
4340+ endpoint_id = {'adminURL': u.valid_url,
4341+ 'region': 'RegionOne',
4342+ 'publicURL': u.valid_url,
4343+ 'internalURL': u.valid_url}
4344+ if self._get_openstack_release() >= self.precise_folsom:
4345+ endpoint_vol['id'] = u.not_null
4346+ endpoint_id['id'] = u.not_null
4347+ expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
4348+ 'identity': [endpoint_id]}
4349+ actual = self.keystone_demo.service_catalog.get_endpoints()
4350+
4351+ ret = u.validate_svc_catalog_endpoint_data(expected, actual)
4352+ if ret:
4353+ amulet.raise_status(amulet.FAIL, msg=ret)
4354+
4355+ def test_openstack_object_store_endpoint(self):
4356+ """Verify the swift object-store endpoint data."""
4357+ endpoints = self.keystone.endpoints.list()
4358+ admin_port = internal_port = public_port = '8080'
4359+ expected = {'id': u.not_null,
4360+ 'region': 'RegionOne',
4361+ 'adminurl': u.valid_url,
4362+ 'internalurl': u.valid_url,
4363+ 'publicurl': u.valid_url,
4364+ 'service_id': u.not_null}
4365+
4366+ ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
4367+ public_port, expected)
4368+ if ret:
4369+ message = 'object-store endpoint: {}'.format(ret)
4370+ amulet.raise_status(amulet.FAIL, msg=message)
4371+
4372+ def test_swift_proxy_identity_service_relation(self):
4373+ """Verify the swift-proxy to keystone identity-service relation data."""
4374+ unit = self.swift_proxy_sentry
4375+ relation = ['identity-service', 'keystone:identity-service']
4376+ expected = {
4377+ 'service': 'swift',
4378+ 'region': 'RegionOne',
4379+ 'public_url': u.valid_url,
4380+ 'internal_url': u.valid_url,
4381+ 'private-address': u.valid_ip,
4382+ 'requested_roles': 'Member,Admin',
4383+ 'admin_url': u.valid_url
4384+ }
4385+
4386+ ret = u.validate_relation_data(unit, relation, expected)
4387+ if ret:
4388+ message = u.relation_error('swift-proxy identity-service', ret)
4389+ amulet.raise_status(amulet.FAIL, msg=message)
4390+
4391+ def test_keystone_identity_service_relation(self):
4392+ """Verify the keystone to swift-proxy identity-service relation data."""
4393+ unit = self.keystone_sentry
4394+ relation = ['identity-service', 'swift-proxy:identity-service']
4395+ expected = {
4396+ 'service_protocol': 'http',
4397+ 'service_tenant': 'services',
4398+ 'admin_token': 'ubuntutesting',
4399+ 'service_password': u.not_null,
4400+ 'service_port': '5000',
4401+ 'auth_port': '35357',
4402+ 'auth_protocol': 'http',
4403+ 'private-address': u.valid_ip,
4404+ 'https_keystone': 'False',
4405+ 'auth_host': u.valid_ip,
4406+ 'service_username': 'swift',
4407+ 'service_tenant_id': u.not_null,
4408+ 'service_host': u.valid_ip
4409+ }
4410+
4411+ ret = u.validate_relation_data(unit, relation, expected)
4412+ if ret:
4413+ message = u.relation_error('keystone identity-service', ret)
4414+ amulet.raise_status(amulet.FAIL, msg=message)
4415+
4416+ def test_swift_storage_swift_storage_relation(self):
4417+ """Verify the swift-storage to swift-proxy swift-storage relation
4418+ data."""
4419+ unit = self.swift_storage_sentry
4420+ relation = ['swift-storage', 'swift-proxy:swift-storage']
4421+ expected = {
4422+ 'account_port': '6002',
4423+ 'zone': '1',
4424+ 'object_port': '6000',
4425+ 'container_port': '6001',
4426+ 'private-address': u.valid_ip,
4427+ 'device': 'vdb'
4428+ }
4429+
4430+ ret = u.validate_relation_data(unit, relation, expected)
4431+ if ret:
4432+ message = u.relation_error('swift-storage swift-storage', ret)
4433+ amulet.raise_status(amulet.FAIL, msg=message)
4434+
4435+ def test_swift_proxy_swift_storage_relation(self):
4436+ """Verify the swift-proxy to swift-storage swift-storage relation
4437+ data."""
4438+ unit = self.swift_proxy_sentry
4439+ relation = ['swift-storage', 'swift-storage:swift-storage']
4440+ expected = {
4441+ 'private-address': u.valid_ip,
4442+ 'trigger': u.not_null,
4443+ 'rings_url': u.valid_url,
4444+ 'swift_hash': u.not_null
4445+ }
4446+
4447+ ret = u.validate_relation_data(unit, relation, expected)
4448+ if ret:
4449+ message = u.relation_error('swift-proxy swift-storage', ret)
4450+ amulet.raise_status(amulet.FAIL, msg=message)
4451+
4452+ def test_glance_object_store_relation(self):
4453+ """Verify the glance to swift-proxy object-store relation data."""
4454+ unit = self.glance_sentry
4455+ relation = ['object-store', 'swift-proxy:object-store']
4456+ expected = { 'private-address': u.valid_ip }
4457+
4458+ ret = u.validate_relation_data(unit, relation, expected)
4459+ if ret:
4460+ message = u.relation_error('glance object-store', ret)
4461+ amulet.raise_status(amulet.FAIL, msg=message)
4462+
4463+ def test_swift_proxy_object_store_relation(self):
4464+ """Verify the swift-proxy to glance object-store relation data."""
4465+ unit = self.swift_proxy_sentry
4466+ relation = ['object-store', 'glance:object-store']
4467+ expected = {'private-address': u.valid_ip}
4468+ ret = u.validate_relation_data(unit, relation, expected)
4469+ if ret:
4470+ message = u.relation_error('swift-proxy object-store', ret)
4471+ amulet.raise_status(amulet.FAIL, msg=message)
4472+
4473+ def test_restart_on_config_change(self):
4474+ """Verify that the specified services are restarted when the config
4475+ is changed."""
4476+ svc = 'swift-proxy'
4477+ self.d.configure('swift-proxy', {'node-timeout': '90'})
4478+
4479+ if not u.service_restarted(self.swift_proxy_sentry, svc,
4480+ '/etc/swift/proxy-server.conf'):
4481+ msg = "service {} didn't restart after config change".format(svc)
4482+ amulet.raise_status(amulet.FAIL, msg=msg)
4483+
4484+ self.d.configure('swift-proxy', {'node-timeout': '60'})
4485+
4486+ def test_swift_config(self):
4487+ """Verify the data in the swift config file."""
4488+ unit = self.swift_proxy_sentry
4489+ conf = '/etc/swift/swift.conf'
4490+ swift_proxy_relation = unit.relation('swift-storage',
4491+ 'swift-storage:swift-storage')
4492+ expected = {
4493+ 'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
4494+ }
4495+
4496+ ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
4497+ if ret:
4498+ message = "swift config error: {}".format(ret)
4499+ amulet.raise_status(amulet.FAIL, msg=message)
4500+
4501+ def test_proxy_server_icehouse_config(self):
4502+ """Verify the data in the proxy-server config file."""
4503+ if self._get_openstack_release() < self.precise_icehouse:
4504+ return
4505+
4506+ unit = self.swift_proxy_sentry
4507+ conf = '/etc/swift/proxy-server.conf'
4508+ keystone_relation = self.keystone_sentry.relation('identity-service',
4509+ 'swift-proxy:identity-service')
4510+ swift_proxy_relation = unit.relation('identity-service',
4511+ 'keystone:identity-service')
4512+ swift_proxy_ip = swift_proxy_relation['private-address']
4513+ auth_host = keystone_relation['auth_host']
4514+ auth_protocol = keystone_relation['auth_protocol']
4515+
4516+ expected = {
4517+ 'DEFAULT': {
4518+ 'bind_port': '8080',
4519+ 'workers': '0',
4520+ 'user': 'swift'
4521+ },
4522+ 'pipeline:main': {
4523+ 'pipeline': 'gatekeeper healthcheck cache swift3 s3token '
4524+ 'container_sync bulk tempurl slo dlo formpost '
4525+ 'authtoken keystoneauth staticweb '
4526+ 'container-quotas account-quotas proxy-server'
4527+ },
4528+ 'app:proxy-server': {
4529+ 'use': 'egg:swift#proxy',
4530+ 'allow_account_management': 'true',
4531+ 'account_autocreate': 'true',
4532+ 'node_timeout': '60',
4533+ 'recoverable_node_timeout': '30'
4534+ },
4535+ 'filter:tempauth': {
4536+ 'use': 'egg:swift#tempauth',
4537+ 'user_system_root': 'testpass .admin https://{}:8080/v1/'
4538+ 'AUTH_system'.format(swift_proxy_ip)
4539+ },
4540+ 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
4541+ 'filter:cache': {
4542+ 'use': 'egg:swift#memcache',
4543+ 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
4544+ },
4545+ 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
4546+ 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
4547+ 'filter:staticweb': {'use': 'egg:swift#staticweb'},
4548+ 'filter:bulk': {'use': 'egg:swift#bulk'},
4549+ 'filter:slo': {'use': 'egg:swift#slo'},
4550+ 'filter:dlo': {'use': 'egg:swift#dlo'},
4551+ 'filter:formpost': {'use': 'egg:swift#formpost'},
4552+ 'filter:tempurl': {'use': 'egg:swift#tempurl'},
4553+ 'filter:container_sync': {'use': 'egg:swift#container_sync'},
4554+ 'filter:gatekeeper': {'use': 'egg:swift#gatekeeper'},
4555+ 'filter:keystoneauth': {
4556+ 'use': 'egg:swift#keystoneauth',
4557+ 'operator_roles': 'Member,Admin'
4558+ },
4559+ 'filter:authtoken': {
4560+ 'paste.filter_factory': 'keystoneclient.middleware.'
4561+ 'auth_token:filter_factory',
4562+ 'auth_host': auth_host,
4563+ 'auth_port': keystone_relation['auth_port'],
4564+ 'auth_protocol': auth_protocol,
4565+ 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
4566+ keystone_relation['service_port']),
4567+ 'admin_tenant_name': keystone_relation['service_tenant'],
4568+ 'admin_user': keystone_relation['service_username'],
4569+ 'admin_password': keystone_relation['service_password'],
4570+ 'delay_auth_decision': 'true',
4571+ 'signing_dir': '/etc/swift',
4572+ 'cache': 'swift.cache'
4573+ },
4574+ 'filter:s3token': {
4575+ 'paste.filter_factory': 'keystoneclient.middleware.'
4576+ 's3_token:filter_factory',
4577+ 'service_host': keystone_relation['service_host'],
4578+ 'service_port': keystone_relation['service_port'],
4579+ 'auth_port': keystone_relation['auth_port'],
4580+ 'auth_host': keystone_relation['auth_host'],
4581+ 'auth_protocol': keystone_relation['auth_protocol'],
4582+ 'auth_token': keystone_relation['admin_token'],
4583+ 'admin_token': keystone_relation['admin_token']
4584+ },
4585+ 'filter:swift3': {'use': 'egg:swift3#swift3'}
4586+ }
4587+
4588+ for section, pairs in expected.iteritems():
4589+ ret = u.validate_config_data(unit, conf, section, pairs)
4590+ if ret:
4591+ message = "proxy-server config error: {}".format(ret)
4592+ amulet.raise_status(amulet.FAIL, msg=message)
4593+
4594+ def test_proxy_server_havana_config(self):
4595+ """Verify the data in the proxy-server config file."""
4596+ if self._get_openstack_release() != self.precise_havana:
4597+ return
4598+
4599+ unit = self.swift_proxy_sentry
4600+ conf = '/etc/swift/proxy-server.conf'
4601+ keystone_relation = self.keystone_sentry.relation('identity-service',
4602+ 'swift-proxy:identity-service')
4603+ swift_proxy_relation = unit.relation('identity-service',
4604+ 'keystone:identity-service')
4605+ swift_proxy_ip = swift_proxy_relation['private-address']
4606+ auth_host = keystone_relation['auth_host']
4607+ auth_protocol = keystone_relation['auth_protocol']
4608+
4609+ expected = {
4610+ 'DEFAULT': {
4611+ 'bind_port': '8080',
4612+ 'workers': '0',
4613+ 'user': 'swift'
4614+ },
4615+ 'pipeline:main': {
4616+ 'pipeline': 'healthcheck cache swift3 authtoken '
4617+ 'keystoneauth container-quotas account-quotas '
4618+ 'proxy-server'
4619+ },
4620+ 'app:proxy-server': {
4621+ 'use': 'egg:swift#proxy',
4622+ 'allow_account_management': 'true',
4623+ 'account_autocreate': 'true',
4624+ 'node_timeout': '60',
4625+ 'recoverable_node_timeout': '30'
4626+ },
4627+ 'filter:tempauth': {
4628+ 'use': 'egg:swift#tempauth',
4629+ 'user_system_root': 'testpass .admin https://{}:8080/v1/'
4630+ 'AUTH_system'.format(swift_proxy_ip)
4631+ },
4632+ 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
4633+ 'filter:cache': {
4634+ 'use': 'egg:swift#memcache',
4635+ 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
4636+ },
4637+ 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
4638+ 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
4639+ 'filter:keystoneauth': {
4640+ 'use': 'egg:swift#keystoneauth',
4641+ 'operator_roles': 'Member,Admin'
4642+ },
4643+ 'filter:authtoken': {
4644+ 'paste.filter_factory': 'keystoneclient.middleware.'
4645+ 'auth_token:filter_factory',
4646+ 'auth_host': auth_host,
4647+ 'auth_port': keystone_relation['auth_port'],
4648+ 'auth_protocol': auth_protocol,
4649+ 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
4650+ keystone_relation['service_port']),
4651+ 'admin_tenant_name': keystone_relation['service_tenant'],
4652+ 'admin_user': keystone_relation['service_username'],
4653+ 'admin_password': keystone_relation['service_password'],
4654+ 'delay_auth_decision': 'true',
4655+ 'signing_dir': '/etc/swift',
4656+ 'cache': 'swift.cache'
4657+ },
4658+ 'filter:s3token': {
4659+ 'paste.filter_factory': 'keystone.middleware.s3_token:'
4660+ 'filter_factory',
4661+ 'service_host': keystone_relation['service_host'],
4662+ 'service_port': keystone_relation['service_port'],
4663+ 'auth_port': keystone_relation['auth_port'],
4664+ 'auth_host': keystone_relation['auth_host'],
4665+ 'auth_protocol': keystone_relation['auth_protocol'],
4666+ 'auth_token': keystone_relation['admin_token'],
4667+ 'admin_token': keystone_relation['admin_token'],
4668+ 'service_protocol': keystone_relation['service_protocol']
4669+ },
4670+ 'filter:swift3': {'use': 'egg:swift3#swift3'}
4671+ }
4672+
4673+ for section, pairs in expected.iteritems():
4674+ ret = u.validate_config_data(unit, conf, section, pairs)
4675+ if ret:
4676+ message = "proxy-server config error: {}".format(ret)
4677+ amulet.raise_status(amulet.FAIL, msg=message)
4678+
4679+ def test_proxy_server_grizzly_config(self):
4680+ """Verify the data in the proxy-server config file."""
4681+ if self._get_openstack_release() != self.precise_grizzly:
4682+ return
4683+
4684+ unit = self.swift_proxy_sentry
4685+ conf = '/etc/swift/proxy-server.conf'
4686+ keystone_relation = self.keystone_sentry.relation('identity-service',
4687+ 'swift-proxy:identity-service')
4688+ swift_proxy_relation = unit.relation('identity-service',
4689+ 'keystone:identity-service')
4690+ swift_proxy_ip = swift_proxy_relation['private-address']
4691+ auth_host = keystone_relation['auth_host']
4692+ auth_protocol = keystone_relation['auth_protocol']
4693+
4694+ expected = {
4695+ 'DEFAULT': {
4696+ 'bind_port': '8080',
4697+ 'workers': '0',
4698+ 'user': 'swift'
4699+ },
4700+ 'pipeline:main': {
4701+ 'pipeline': 'healthcheck cache swift3 s3token authtoken '
4702+ 'keystone container-quotas account-quotas '
4703+ 'proxy-server'
4704+ },
4705+ 'app:proxy-server': {
4706+ 'use': 'egg:swift#proxy',
4707+ 'allow_account_management': 'true',
4708+ 'account_autocreate': 'true',
4709+ 'node_timeout': '60',
4710+ 'recoverable_node_timeout': '30'
4711+ },
4712+ 'filter:tempauth': {
4713+ 'use': 'egg:swift#tempauth',
4714+ 'user_system_root': 'testpass .admin https://{}:8080/v1/'
4715+ 'AUTH_system'.format(swift_proxy_ip)
4716+ },
4717+ 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
4718+ 'filter:cache': {
4719+ 'use': 'egg:swift#memcache',
4720+ 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
4721+ },
4722+ 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
4723+ 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
4724+ 'filter:keystone': {
4725+ 'paste.filter_factory': 'swift.common.middleware.'
4726+ 'keystoneauth:filter_factory',
4727+ 'operator_roles': 'Member,Admin'
4728+ },
4729+ 'filter:authtoken': {
4730+ 'paste.filter_factory': 'keystone.middleware.auth_token:'
4731+ 'filter_factory',
4732+ 'auth_host': auth_host,
4733+ 'auth_port': keystone_relation['auth_port'],
4734+ 'auth_protocol': auth_protocol,
4735+ 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
4736+ keystone_relation['service_port']),
4737+ 'admin_tenant_name': keystone_relation['service_tenant'],
4738+ 'admin_user': keystone_relation['service_username'],
4739+ 'admin_password': keystone_relation['service_password'],
4740+ 'delay_auth_decision': 'true',
4741+ 'signing_dir': '/etc/swift'
4742+ },
4743+ 'filter:s3token': {
4744+ 'paste.filter_factory': 'keystone.middleware.s3_token:'
4745+ 'filter_factory',
4746+ 'service_host': keystone_relation['service_host'],
4747+ 'service_port': keystone_relation['service_port'],
4748+ 'auth_port': keystone_relation['auth_port'],
4749+ 'auth_host': keystone_relation['auth_host'],
4750+ 'auth_protocol': keystone_relation['auth_protocol'],
4751+ 'auth_token': keystone_relation['admin_token'],
4752+ 'admin_token': keystone_relation['admin_token'],
4753+ 'service_protocol': keystone_relation['service_protocol']
4754+ },
4755+ 'filter:swift3': {'use': 'egg:swift3#swift3'}
4756+ }
4757+
4758+ for section, pairs in expected.iteritems():
4759+ ret = u.validate_config_data(unit, conf, section, pairs)
4760+ if ret:
4761+ message = "proxy-server config error: {}".format(ret)
4762+ amulet.raise_status(amulet.FAIL, msg=message)
4763+
4764+ def test_proxy_server_folsom_config(self):
4765+ """Verify the data in the proxy-server config file."""
4766+ if self._get_openstack_release() != self.precise_folsom:
4767+ return
4768+
4769+ unit = self.swift_proxy_sentry
4770+ conf = '/etc/swift/proxy-server.conf'
4771+ keystone_relation = self.keystone_sentry.relation('identity-service',
4772+ 'swift-proxy:identity-service')
4773+ swift_proxy_relation = unit.relation('identity-service',
4774+ 'keystone:identity-service')
4775+ swift_proxy_ip = swift_proxy_relation['private-address']
4776+ auth_host = keystone_relation['auth_host']
4777+ auth_protocol = keystone_relation['auth_protocol']
4778+
4779+ expected = {
4780+ 'DEFAULT': {
4781+ 'bind_port': '8080',
4782+ 'workers': '0',
4783+ 'user': 'swift'
4784+ },
4785+ 'pipeline:main': {
4786+ 'pipeline': 'healthcheck cache swift3 s3token authtoken '
4787+ 'keystone proxy-server'
4788+ },
4789+ 'app:proxy-server': {
4790+ 'use': 'egg:swift#proxy',
4791+ 'allow_account_management': 'true',
4792+ 'account_autocreate': 'true',
4793+ 'node_timeout': '60',
4794+ 'recoverable_node_timeout': '30'
4795+ },
4796+ 'filter:tempauth': {
4797+ 'use': 'egg:swift#tempauth',
4798+ 'user_system_root': 'testpass .admin https://{}:8080/v1/'
4799+ 'AUTH_system'.format(swift_proxy_ip)
4800+ },
4801+ 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
4802+ 'filter:cache': {
4803+ 'use': 'egg:swift#memcache',
4804+ 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
4805+ },
4806+ 'filter:keystone': {
4807+ 'paste.filter_factory': 'keystone.middleware.swift_auth:'
4808+ 'filter_factory',
4809+ 'operator_roles': 'Member,Admin'
4810+ },
4811+ 'filter:authtoken': {
4812+ 'paste.filter_factory': 'keystone.middleware.auth_token:'
4813+ 'filter_factory',
4814+ 'auth_host': auth_host,
4815+ 'auth_port': keystone_relation['auth_port'],
4816+ 'auth_protocol': auth_protocol,
4817+ 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
4818+ keystone_relation['service_port']),
4819+ 'admin_tenant_name': keystone_relation['service_tenant'],
4820+ 'admin_user': keystone_relation['service_username'],
4821+ 'admin_password': keystone_relation['service_password'],
4822+ 'delay_auth_decision': '1'
4823+ },
4824+ 'filter:s3token': {
4825+ 'paste.filter_factory': 'keystone.middleware.s3_token:'
4826+ 'filter_factory',
4827+ 'service_host': keystone_relation['service_host'],
4828+ 'service_port': keystone_relation['service_port'],
4829+ 'auth_port': keystone_relation['auth_port'],
4830+ 'auth_host': keystone_relation['auth_host'],
4831+ 'auth_protocol': keystone_relation['auth_protocol'],
4832+ 'auth_token': keystone_relation['admin_token'],
4833+ 'admin_token': keystone_relation['admin_token'],
4834+ 'service_protocol': keystone_relation['service_protocol']
4835+ },
4836+ 'filter:swift3': {'use': 'egg:swift#swift3'}
4837+ }
4838+
4839+ for section, pairs in expected.iteritems():
4840+ ret = u.validate_config_data(unit, conf, section, pairs)
4841+ if ret:
4842+ message = "proxy-server config error: {}".format(ret)
4843+ amulet.raise_status(amulet.FAIL, msg=message)
4844+
4845+ def test_proxy_server_essex_config(self):
4846+ """Verify the data in the proxy-server config file."""
4847+ if self._get_openstack_release() != self.precise_essex:
4848+ return
4849+
4850+ unit = self.swift_proxy_sentry
4851+ conf = '/etc/swift/proxy-server.conf'
4852+ keystone_relation = self.keystone_sentry.relation('identity-service',
4853+ 'swift-proxy:identity-service')
4854+ swift_proxy_relation = unit.relation('identity-service',
4855+ 'keystone:identity-service')
4856+ swift_proxy_ip = swift_proxy_relation['private-address']
4857+ auth_host = keystone_relation['auth_host']
4858+ auth_protocol = keystone_relation['auth_protocol']
4859+
4860+ expected = {
4861+ 'DEFAULT': {
4862+ 'bind_port': '8080',
4863+ 'workers': '0',
4864+ 'user': 'swift'
4865+ },
4866+ 'pipeline:main': {
4867+ 'pipeline': 'healthcheck cache swift3 s3token authtoken '
4868+ 'keystone proxy-server'
4869+ },
4870+ 'app:proxy-server': {
4871+ 'use': 'egg:swift#proxy',
4872+ 'allow_account_management': 'true',
4873+ 'account_autocreate': 'true',
4874+ 'node_timeout': '60',
4875+ 'recoverable_node_timeout': '30'
4876+ },
4877+ 'filter:tempauth': {
4878+ 'use': 'egg:swift#tempauth',
4879+ 'user_system_root': 'testpass .admin https://{}:8080/v1/'
4880+ 'AUTH_system'.format(swift_proxy_ip)
4881+ },
4882+ 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
4883+ 'filter:cache': {
4884+ 'use': 'egg:swift#memcache',
4885+ 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
4886+ },
4887+ 'filter:keystone': {
4888+ 'paste.filter_factory': 'keystone.middleware.swift_auth:'
4889+ 'filter_factory',
4890+ 'operator_roles': 'Member,Admin'
4891+ },
4892+ 'filter:authtoken': {
4893+ 'paste.filter_factory': 'keystone.middleware.auth_token:'
4894+ 'filter_factory',
4895+ 'auth_host': auth_host,
4896+ 'auth_port': keystone_relation['auth_port'],
4897+ 'auth_protocol': auth_protocol,
4898+ 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
4899+ keystone_relation['service_port']),
4900+ 'admin_tenant_name': keystone_relation['service_tenant'],
4901+ 'admin_user': keystone_relation['service_username'],
4902+ 'admin_password': keystone_relation['service_password'],
4903+ 'delay_auth_decision': '1'
4904+ },
4905+ 'filter:s3token': {
4906+ 'paste.filter_factory': 'keystone.middleware.s3_token:'
4907+ 'filter_factory',
4908+ 'service_host': keystone_relation['service_host'],
4909+ 'service_port': keystone_relation['service_port'],
4910+ 'auth_port': keystone_relation['auth_port'],
4911+ 'auth_host': keystone_relation['auth_host'],
4912+ 'auth_protocol': keystone_relation['auth_protocol'],
4913+ 'auth_token': keystone_relation['admin_token'],
4914+ 'admin_token': keystone_relation['admin_token'],
4915+ 'service_protocol': keystone_relation['service_protocol']
4916+ },
4917+ 'filter:swift3': {'use': 'egg:swift#swift3'}
4918+ }
4919+
4920+ for section, pairs in expected.iteritems():
4921+ ret = u.validate_config_data(unit, conf, section, pairs)
4922+ if ret:
4923+ message = "proxy-server config error: {}".format(ret)
4924+ amulet.raise_status(amulet.FAIL, msg=message)
4925+
4926+ def test_image_create(self):
4927+ """Create an instance in glance, which is backed by swift, and validate
4928+ that some of the metadata for the image match in glance and swift."""
4929+ # NOTE(coreycb): Skipping failing test on folsom until resolved. On
4930+ # folsom only, uploading an image to glance gets 400 Bad
4931+ # Request - Error uploading image: (error): [Errno 111]
4932+ # ECONNREFUSED (HTTP 400)
4933+ if self._get_openstack_release() == self.precise_folsom:
4934+ u.log.error("Skipping failing test until resolved")
4935+ return
4936+
4937+ # Create glance image
4938+ image = u.create_cirros_image(self.glance, "cirros-image")
4939+ if not image:
4940+ amulet.raise_status(amulet.FAIL, msg="Image create failed")
4941+
4942+ # Validate that cirros image exists in glance and get its checksum/size
4943+ images = list(self.glance.images.list())
4944+ if len(images) != 1:
4945+ msg = "Expected 1 glance image, found {}".format(len(images))
4946+ amulet.raise_status(amulet.FAIL, msg=msg)
4947+
4948+ if images[0].name != 'cirros-image':
4949+ message = "cirros image does not exist"
4950+ amulet.raise_status(amulet.FAIL, msg=message)
4951+
4952+ glance_image_md5 = image.checksum
4953+ glance_image_size = image.size
4954+
4955+ # Validate that swift object's checksum/size match that from glance
4956+ headers, containers = self.swift.get_account()
4957+ if len(containers) != 1:
4958+ msg = "Expected 1 swift container, found {}".format(len(containers))
4959+ amulet.raise_status(amulet.FAIL, msg=msg)
4960+
4961+ container_name = containers[0].get('name')
4962+
4963+ headers, objects = self.swift.get_container(container_name)
4964+ if len(objects) != 1:
4965+ msg = "Expected 1 swift object, found {}".format(len(objects))
4966+ amulet.raise_status(amulet.FAIL, msg=msg)
4967+
4968+ swift_object_size = objects[0].get('bytes')
4969+ swift_object_md5 = objects[0].get('hash')
4970+
4971+ if glance_image_size != swift_object_size:
4972+ msg = "Glance image size {} != swift object size {}".format( \
4973+ glance_image_size, swift_object_size)
4974+ amulet.raise_status(amulet.FAIL, msg=msg)
4975+
4976+ if glance_image_md5 != swift_object_md5:
4977+ msg = "Glance image hash {} != swift object hash {}".format( \
4978+ glance_image_md5, swift_object_md5)
4979+ amulet.raise_status(amulet.FAIL, msg=msg)
4980+
4981+ # Cleanup
4982+ u.delete_image(self.glance, image)
4983
4984=== added directory 'tests/charmhelpers'
4985=== added file 'tests/charmhelpers/__init__.py'
4986=== added directory 'tests/charmhelpers/contrib'
4987=== added file 'tests/charmhelpers/contrib/__init__.py'
4988=== added directory 'tests/charmhelpers/contrib/amulet'
4989=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
4990=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
4991--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
4992+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-06 15:38:41 +0000
4993@@ -0,0 +1,72 @@
4994+import amulet
4995+
4996+import os
4997+
4998+
4999+class AmuletDeployment(object):
5000+ """Amulet deployment.
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches