Merge lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name into lp:~openstack-charmers-archive/charms/trusty/swift-proxy/trunk

Proposed by JuanJo Ciarlante
Status: Superseded
Proposed branch: lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name
Merge into: lp:~openstack-charmers-archive/charms/trusty/swift-proxy/trunk
Diff against target: 5652 lines (+4171/-249) (has conflicts)
52 files modified
.bzrignore (+2/-0)
Makefile (+24/-1)
charm-helpers-hooks.yaml (+13/-0)
charm-helpers-tests.yaml (+5/-0)
config.yaml (+53/-9)
hooks/charmhelpers/contrib/hahelpers/apache.py (+10/-3)
hooks/charmhelpers/contrib/hahelpers/cluster.py (+59/-17)
hooks/charmhelpers/contrib/network/ip.py (+343/-0)
hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
hooks/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
hooks/charmhelpers/contrib/openstack/context.py (+237/-59)
hooks/charmhelpers/contrib/openstack/ip.py (+79/-0)
hooks/charmhelpers/contrib/openstack/neutron.py (+14/-0)
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+18/-4)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+9/-8)
hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+9/-8)
hooks/charmhelpers/contrib/openstack/templating.py (+22/-23)
hooks/charmhelpers/contrib/openstack/utils.py (+45/-6)
hooks/charmhelpers/contrib/peerstorage/__init__.py (+131/-0)
hooks/charmhelpers/contrib/storage/linux/ceph.py (+1/-1)
hooks/charmhelpers/core/fstab.py (+116/-0)
hooks/charmhelpers/core/hookenv.py (+49/-21)
hooks/charmhelpers/core/host.py (+75/-11)
hooks/charmhelpers/core/services/__init__.py (+2/-0)
hooks/charmhelpers/core/services/base.py (+313/-0)
hooks/charmhelpers/core/services/helpers.py (+239/-0)
hooks/charmhelpers/core/templating.py (+51/-0)
hooks/charmhelpers/fetch/__init__.py (+115/-32)
hooks/charmhelpers/fetch/archiveurl.py (+49/-4)
hooks/swift_context.py (+30/-11)
hooks/swift_hooks.py (+78/-23)
hooks/swift_utils.py (+26/-3)
revision (+1/-1)
templates/essex/proxy-server.conf (+2/-0)
templates/grizzly/proxy-server.conf (+2/-0)
templates/havana/proxy-server.conf (+2/-0)
templates/icehouse/proxy-server.conf (+4/-1)
templates/memcached.conf (+1/-1)
tests/00-setup (+11/-0)
tests/10-basic-precise-essex (+9/-0)
tests/11-basic-precise-folsom (+11/-0)
tests/12-basic-precise-grizzly (+11/-0)
tests/13-basic-precise-havana (+11/-0)
tests/14-basic-precise-icehouse (+11/-0)
tests/15-basic-trusty-icehouse (+9/-0)
tests/README (+52/-0)
tests/basic_deployment.py (+827/-0)
tests/charmhelpers/contrib/amulet/deployment.py (+72/-0)
tests/charmhelpers/contrib/amulet/utils.py (+176/-0)
tests/charmhelpers/contrib/openstack/amulet/deployment.py (+94/-0)
tests/charmhelpers/contrib/openstack/amulet/utils.py (+276/-0)
unit_tests/test_templates.py (+2/-2)
Conflict adding file .bzrignore.  Moved existing file to .bzrignore.moved.
Text conflict in Makefile
Contents conflict in charm-helpers.yaml
Text conflict in hooks/charmhelpers/contrib/openstack/context.py
Text conflict in hooks/charmhelpers/contrib/openstack/utils.py
Conflict adding file hooks/charmhelpers/core/fstab.py.  Moved existing file to hooks/charmhelpers/core/fstab.py.moved.
Text conflict in hooks/charmhelpers/core/host.py
Text conflict in hooks/charmhelpers/fetch/__init__.py
To merge this branch: bzr merge lp:~jjo/charms/trusty/swift-proxy/swift_hash-from-JUJU_ENV_UUID-and-service_name
Reviewer Review Type Date Requested Status
OpenStack Charmers Pending
Review via email: mp+237290@code.launchpad.net

This proposal supersedes a proposal from 2014-10-06.

This proposal has been superseded by a proposal from 2014-10-06.

To post a comment you must log in.

Unmerged revisions

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file '.bzrignore'
--- .bzrignore 1970-01-01 00:00:00 +0000
+++ .bzrignore 2014-10-06 15:38:41 +0000
@@ -0,0 +1,2 @@
1.coverage
2bin
03
=== renamed file '.bzrignore' => '.bzrignore.moved'
=== modified file 'Makefile'
--- Makefile 2014-08-13 15:57:07 +0000
+++ Makefile 2014-10-06 15:38:41 +0000
@@ -3,10 +3,15 @@
33
4lint:4lint:
5 @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks5 @flake8 --exclude hooks/charmhelpers --ignore=E125 hooks
6 @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests6 @flake8 --exclude hooks/charmhelpers --ignore=E125 unit_tests tests
7 @charm proof7 @charm proof
88
9unit_test:
10 @echo Starting unit tests...
11 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
12
9test:13test:
14<<<<<<< TREE
10 @echo Starting tests...15 @echo Starting tests...
11 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests16 @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
1217
@@ -19,5 +24,23 @@
19 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml24 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml
2025
21publish: lint test26publish: lint test
27=======
28 @echo Starting Amulet tests...
29 # coreycb note: The -v should only be temporary until Amulet sends
30 # raise_status() messages to stderr:
31 # https://bugs.launchpad.net/amulet/+bug/1320357
32 @juju test -v -p AMULET_HTTP_PROXY
33
34bin/charm_helpers_sync.py:
35 @mkdir -p bin
36 @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \
37 > bin/charm_helpers_sync.py
38
39sync: bin/charm_helpers_sync.py
40 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
41 @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
42
43publish: lint unit_test
44>>>>>>> MERGE-SOURCE
22 bzr push lp:charms/swift-proxy45 bzr push lp:charms/swift-proxy
23 bzr push lp:charms/trusty/swift-proxy46 bzr push lp:charms/trusty/swift-proxy
2447
=== added file 'charm-helpers-hooks.yaml'
--- charm-helpers-hooks.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-hooks.yaml 2014-10-06 15:38:41 +0000
@@ -0,0 +1,13 @@
1branch: lp:charm-helpers
2destination: hooks/charmhelpers
3include:
4 - core
5 - fetch
6 - contrib.openstack|inc=*
7 - contrib.storage.linux
8 - contrib.hahelpers:
9 - apache
10 - cluster
11 - payload.execd
12 - contrib.network.ip
13 - contrib.peerstorage
014
=== added file 'charm-helpers-tests.yaml'
--- charm-helpers-tests.yaml 1970-01-01 00:00:00 +0000
+++ charm-helpers-tests.yaml 2014-10-06 15:38:41 +0000
@@ -0,0 +1,5 @@
1branch: lp:charm-helpers
2destination: tests/charmhelpers
3include:
4 - contrib.amulet
5 - contrib.openstack.amulet
06
=== renamed file 'charm-helpers.yaml' => 'charm-helpers.yaml.THIS'
=== modified file 'config.yaml'
--- config.yaml 2013-04-15 19:41:45 +0000
+++ config.yaml 2014-10-06 15:38:41 +0000
@@ -106,6 +106,19 @@
106 default: true106 default: true
107 type: boolean107 type: boolean
108 description: Delay authentication to downstream WSGI services.108 description: Delay authentication to downstream WSGI services.
109 node-timeout:
110 default: 60
111 type: int
112 description: How long the proxy server will wait on responses from the a/c/o servers.
113 recoverable-node-timeout:
114 default: 30
115 type: int
116 description: |
117 How long the proxy server will wait for an initial response and to read a
118 chunk of data from the object servers while serving GET / HEAD requests.
119 Timeouts from these requests can be recovered from so setting this to
120 something lower than node-timeout would provide quicker error recovery
121 while allowing for a longer timeout for non-recoverable requests (PUTs).
109 # Manual Keystone configuration.122 # Manual Keystone configuration.
110 keystone-auth-host:123 keystone-auth-host:
111 type: string124 type: string
@@ -134,15 +147,11 @@
134 description: Hash to use across all swift-proxy servers - don't loose147 description: Hash to use across all swift-proxy servers - don't loose
135 vip:148 vip:
136 type: string149 type: string
137 description: "Virtual IP to use to front swift-proxy in ha configuration"150 description: |
138 vip_iface:151 Virtual IP(s) to use to front API services in HA configuration.
139 type: string152 .
140 default: eth0153 If multiple networks are being used, a VIP should be provided for each
141 description: "Network Interface where to place the Virtual IP"154 network, separated by spaces.
142 vip_cidr:
143 type: int
144 default: 24
145 description: "Netmask that will be used for the Virtual IP"
146 ha-bindiface:155 ha-bindiface:
147 type: string156 type: string
148 default: eth0157 default: eth0
@@ -155,3 +164,38 @@
155 description: |164 description: |
156 Default multicast port number that will be used to communicate between165 Default multicast port number that will be used to communicate between
157 HA Cluster nodes.166 HA Cluster nodes.
167 # Network configuration options
168 # by default all access is over 'private-address'
169 os-admin-network:
170 type: string
171 description: |
172 The IP address and netmask of the OpenStack Admin network (e.g.,
173 192.168.0.0/24)
174 .
175 This network will be used for admin endpoints.
176 os-internal-network:
177 type: string
178 description: |
179 The IP address and netmask of the OpenStack Internal network (e.g.,
180 192.168.0.0/24)
181 .
182 This network will be used for internal endpoints.
183 os-public-network:
184 type: string
185 description: |
186 The IP address and netmask of the OpenStack Public network (e.g.,
187 192.168.0.0/24)
188 .
189 This network will be used for public endpoints.
190 prefer-ipv6:
191 type: boolean
192 default: False
193 description: |
194 If True enables IPv6 support. The charm will expect network interfaces
195 to be configured with an IPv6 address. If set to False (default) IPv4
196 is expected.
197 .
198 NOTE: these charms do not currently support IPv6 privacy extension. In
199 order for this charm to function correctly, the privacy extension must be
200 disabled and a non-temporary address must be configured/available on
201 your network interface.
158202
=== modified file 'hooks/charmhelpers/contrib/hahelpers/apache.py'
--- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-03-27 11:23:24 +0000
+++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-06 15:38:41 +0000
@@ -20,20 +20,27 @@
20)20)
2121
2222
23def get_cert():23def get_cert(cn=None):
24 # TODO: deal with multiple https endpoints via charm config
24 cert = config_get('ssl_cert')25 cert = config_get('ssl_cert')
25 key = config_get('ssl_key')26 key = config_get('ssl_key')
26 if not (cert and key):27 if not (cert and key):
27 log("Inspecting identity-service relations for SSL certificate.",28 log("Inspecting identity-service relations for SSL certificate.",
28 level=INFO)29 level=INFO)
29 cert = key = None30 cert = key = None
31 if cn:
32 ssl_cert_attr = 'ssl_cert_{}'.format(cn)
33 ssl_key_attr = 'ssl_key_{}'.format(cn)
34 else:
35 ssl_cert_attr = 'ssl_cert'
36 ssl_key_attr = 'ssl_key'
30 for r_id in relation_ids('identity-service'):37 for r_id in relation_ids('identity-service'):
31 for unit in relation_list(r_id):38 for unit in relation_list(r_id):
32 if not cert:39 if not cert:
33 cert = relation_get('ssl_cert',40 cert = relation_get(ssl_cert_attr,
34 rid=r_id, unit=unit)41 rid=r_id, unit=unit)
35 if not key:42 if not key:
36 key = relation_get('ssl_key',43 key = relation_get(ssl_key_attr,
37 rid=r_id, unit=unit)44 rid=r_id, unit=unit)
38 return (cert, key)45 return (cert, key)
3946
4047
=== modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py'
--- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-03-27 11:23:24 +0000
+++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-06 15:38:41 +0000
@@ -6,6 +6,11 @@
6# Adam Gandelman <adamg@ubuntu.com>6# Adam Gandelman <adamg@ubuntu.com>
7#7#
88
9"""
10Helpers for clustering and determining "cluster leadership" and other
11clustering-related helpers.
12"""
13
9import subprocess14import subprocess
10import os15import os
1116
@@ -19,6 +24,7 @@
19 config as config_get,24 config as config_get,
20 INFO,25 INFO,
21 ERROR,26 ERROR,
27 WARNING,
22 unit_get,28 unit_get,
23)29)
2430
@@ -27,6 +33,29 @@
27 pass33 pass
2834
2935
36def is_elected_leader(resource):
37 """
38 Returns True if the charm executing this is the elected cluster leader.
39
40 It relies on two mechanisms to determine leadership:
41 1. If the charm is part of a corosync cluster, call corosync to
42 determine leadership.
43 2. If the charm is not part of a corosync cluster, the leader is
44 determined as being "the alive unit with the lowest unit numer". In
45 other words, the oldest surviving unit.
46 """
47 if is_clustered():
48 if not is_crm_leader(resource):
49 log('Deferring action to CRM leader.', level=INFO)
50 return False
51 else:
52 peers = peer_units()
53 if peers and not oldest_peer(peers):
54 log('Deferring action to oldest service unit.', level=INFO)
55 return False
56 return True
57
58
30def is_clustered():59def is_clustered():
31 for r_id in (relation_ids('ha') or []):60 for r_id in (relation_ids('ha') or []):
32 for unit in (relation_list(r_id) or []):61 for unit in (relation_list(r_id) or []):
@@ -38,7 +67,11 @@
38 return False67 return False
3968
4069
41def is_leader(resource):70def is_crm_leader(resource):
71 """
72 Returns True if the charm calling this is the elected corosync leader,
73 as returned by calling the external "crm" command.
74 """
42 cmd = [75 cmd = [
43 "crm", "resource",76 "crm", "resource",
44 "show", resource77 "show", resource
@@ -54,15 +87,31 @@
54 return False87 return False
5588
5689
57def peer_units():90def is_leader(resource):
91 log("is_leader is deprecated. Please consider using is_crm_leader "
92 "instead.", level=WARNING)
93 return is_crm_leader(resource)
94
95
96def peer_units(peer_relation="cluster"):
58 peers = []97 peers = []
59 for r_id in (relation_ids('cluster') or []):98 for r_id in (relation_ids(peer_relation) or []):
60 for unit in (relation_list(r_id) or []):99 for unit in (relation_list(r_id) or []):
61 peers.append(unit)100 peers.append(unit)
62 return peers101 return peers
63102
64103
104def peer_ips(peer_relation='cluster', addr_key='private-address'):
105 '''Return a dict of peers and their private-address'''
106 peers = {}
107 for r_id in relation_ids(peer_relation):
108 for unit in relation_list(r_id):
109 peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
110 return peers
111
112
65def oldest_peer(peers):113def oldest_peer(peers):
114 """Determines who the oldest peer is by comparing unit numbers."""
66 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])115 local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
67 for peer in peers:116 for peer in peers:
68 remote_unit_no = int(peer.split('/')[1])117 remote_unit_no = int(peer.split('/')[1])
@@ -72,16 +121,9 @@
72121
73122
74def eligible_leader(resource):123def eligible_leader(resource):
75 if is_clustered():124 log("eligible_leader is deprecated. Please consider using "
76 if not is_leader(resource):125 "is_elected_leader instead.", level=WARNING)
77 log('Deferring action to CRM leader.', level=INFO)126 return is_elected_leader(resource)
78 return False
79 else:
80 peers = peer_units()
81 if peers and not oldest_peer(peers):
82 log('Deferring action to oldest service unit.', level=INFO)
83 return False
84 return True
85127
86128
87def https():129def https():
@@ -97,10 +139,9 @@
97 return True139 return True
98 for r_id in relation_ids('identity-service'):140 for r_id in relation_ids('identity-service'):
99 for unit in relation_list(r_id):141 for unit in relation_list(r_id):
142 # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
100 rel_state = [143 rel_state = [
101 relation_get('https_keystone', rid=r_id, unit=unit),144 relation_get('https_keystone', rid=r_id, unit=unit),
102 relation_get('ssl_cert', rid=r_id, unit=unit),
103 relation_get('ssl_key', rid=r_id, unit=unit),
104 relation_get('ca_cert', rid=r_id, unit=unit),145 relation_get('ca_cert', rid=r_id, unit=unit),
105 ]146 ]
106 # NOTE: works around (LP: #1203241)147 # NOTE: works around (LP: #1203241)
@@ -146,12 +187,12 @@
146 Obtains all relevant configuration from charm configuration required187 Obtains all relevant configuration from charm configuration required
147 for initiating a relation to hacluster:188 for initiating a relation to hacluster:
148189
149 ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr190 ha-bindiface, ha-mcastport, vip
150191
151 returns: dict: A dict containing settings keyed by setting name.192 returns: dict: A dict containing settings keyed by setting name.
152 raises: HAIncompleteConfig if settings are missing.193 raises: HAIncompleteConfig if settings are missing.
153 '''194 '''
154 settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']195 settings = ['ha-bindiface', 'ha-mcastport', 'vip']
155 conf = {}196 conf = {}
156 for setting in settings:197 for setting in settings:
157 conf[setting] = config_get(setting)198 conf[setting] = config_get(setting)
@@ -170,6 +211,7 @@
170211
171 :configs : OSTemplateRenderer: A config tempating object to inspect for212 :configs : OSTemplateRenderer: A config tempating object to inspect for
172 a complete https context.213 a complete https context.
214
173 :vip_setting: str: Setting in charm config that specifies215 :vip_setting: str: Setting in charm config that specifies
174 VIP address.216 VIP address.
175 '''217 '''
176218
=== added directory 'hooks/charmhelpers/contrib/network'
=== added file 'hooks/charmhelpers/contrib/network/__init__.py'
=== added file 'hooks/charmhelpers/contrib/network/ip.py'
--- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/network/ip.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,343 @@
1import glob
2import re
3import subprocess
4import sys
5
6from functools import partial
7
8from charmhelpers.core.hookenv import unit_get
9from charmhelpers.fetch import apt_install
10from charmhelpers.core.hookenv import (
11 WARNING,
12 ERROR,
13 log
14)
15
16try:
17 import netifaces
18except ImportError:
19 apt_install('python-netifaces')
20 import netifaces
21
22try:
23 import netaddr
24except ImportError:
25 apt_install('python-netaddr')
26 import netaddr
27
28
29def _validate_cidr(network):
30 try:
31 netaddr.IPNetwork(network)
32 except (netaddr.core.AddrFormatError, ValueError):
33 raise ValueError("Network (%s) is not in CIDR presentation format" %
34 network)
35
36
37def get_address_in_network(network, fallback=None, fatal=False):
38 """
39 Get an IPv4 or IPv6 address within the network from the host.
40
41 :param network (str): CIDR presentation format. For example,
42 '192.168.1.0/24'.
43 :param fallback (str): If no address is found, return fallback.
44 :param fatal (boolean): If no address is found, fallback is not
45 set and fatal is True then exit(1).
46
47 """
48
49 def not_found_error_out():
50 log("No IP address found in network: %s" % network,
51 level=ERROR)
52 sys.exit(1)
53
54 if network is None:
55 if fallback is not None:
56 return fallback
57 else:
58 if fatal:
59 not_found_error_out()
60
61 _validate_cidr(network)
62 network = netaddr.IPNetwork(network)
63 for iface in netifaces.interfaces():
64 addresses = netifaces.ifaddresses(iface)
65 if network.version == 4 and netifaces.AF_INET in addresses:
66 addr = addresses[netifaces.AF_INET][0]['addr']
67 netmask = addresses[netifaces.AF_INET][0]['netmask']
68 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
69 if cidr in network:
70 return str(cidr.ip)
71 if network.version == 6 and netifaces.AF_INET6 in addresses:
72 for addr in addresses[netifaces.AF_INET6]:
73 if not addr['addr'].startswith('fe80'):
74 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
75 addr['netmask']))
76 if cidr in network:
77 return str(cidr.ip)
78
79 if fallback is not None:
80 return fallback
81
82 if fatal:
83 not_found_error_out()
84
85 return None
86
87
88def is_ipv6(address):
89 '''Determine whether provided address is IPv6 or not'''
90 try:
91 address = netaddr.IPAddress(address)
92 except netaddr.AddrFormatError:
93 # probably a hostname - so not an address at all!
94 return False
95 else:
96 return address.version == 6
97
98
99def is_address_in_network(network, address):
100 """
101 Determine whether the provided address is within a network range.
102
103 :param network (str): CIDR presentation format. For example,
104 '192.168.1.0/24'.
105 :param address: An individual IPv4 or IPv6 address without a net
106 mask or subnet prefix. For example, '192.168.1.1'.
107 :returns boolean: Flag indicating whether address is in network.
108 """
109 try:
110 network = netaddr.IPNetwork(network)
111 except (netaddr.core.AddrFormatError, ValueError):
112 raise ValueError("Network (%s) is not in CIDR presentation format" %
113 network)
114 try:
115 address = netaddr.IPAddress(address)
116 except (netaddr.core.AddrFormatError, ValueError):
117 raise ValueError("Address (%s) is not in correct presentation format" %
118 address)
119 if address in network:
120 return True
121 else:
122 return False
123
124
125def _get_for_address(address, key):
126 """Retrieve an attribute of or the physical interface that
127 the IP address provided could be bound to.
128
129 :param address (str): An individual IPv4 or IPv6 address without a net
130 mask or subnet prefix. For example, '192.168.1.1'.
131 :param key: 'iface' for the physical interface name or an attribute
132 of the configured interface, for example 'netmask'.
133 :returns str: Requested attribute or None if address is not bindable.
134 """
135 address = netaddr.IPAddress(address)
136 for iface in netifaces.interfaces():
137 addresses = netifaces.ifaddresses(iface)
138 if address.version == 4 and netifaces.AF_INET in addresses:
139 addr = addresses[netifaces.AF_INET][0]['addr']
140 netmask = addresses[netifaces.AF_INET][0]['netmask']
141 cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
142 if address in cidr:
143 if key == 'iface':
144 return iface
145 else:
146 return addresses[netifaces.AF_INET][0][key]
147 if address.version == 6 and netifaces.AF_INET6 in addresses:
148 for addr in addresses[netifaces.AF_INET6]:
149 if not addr['addr'].startswith('fe80'):
150 cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
151 addr['netmask']))
152 if address in cidr:
153 if key == 'iface':
154 return iface
155 else:
156 return addr[key]
157 return None
158
159
160get_iface_for_address = partial(_get_for_address, key='iface')
161
162get_netmask_for_address = partial(_get_for_address, key='netmask')
163
164
165def format_ipv6_addr(address):
166 """
167 IPv6 needs to be wrapped with [] in url link to parse correctly.
168 """
169 if is_ipv6(address):
170 address = "[%s]" % address
171 else:
172 log("Not a valid ipv6 address: %s" % address, level=WARNING)
173 address = None
174
175 return address
176
177
178def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
179 fatal=True, exc_list=None):
180 """
181 Return the assigned IP address for a given interface, if any, or [].
182 """
183 # Extract nic if passed /dev/ethX
184 if '/' in iface:
185 iface = iface.split('/')[-1]
186 if not exc_list:
187 exc_list = []
188 try:
189 inet_num = getattr(netifaces, inet_type)
190 except AttributeError:
191 raise Exception('Unknown inet type ' + str(inet_type))
192
193 interfaces = netifaces.interfaces()
194 if inc_aliases:
195 ifaces = []
196 for _iface in interfaces:
197 if iface == _iface or _iface.split(':')[0] == iface:
198 ifaces.append(_iface)
199 if fatal and not ifaces:
200 raise Exception("Invalid interface '%s'" % iface)
201 ifaces.sort()
202 else:
203 if iface not in interfaces:
204 if fatal:
205 raise Exception("%s not found " % (iface))
206 else:
207 return []
208 else:
209 ifaces = [iface]
210
211 addresses = []
212 for netiface in ifaces:
213 net_info = netifaces.ifaddresses(netiface)
214 if inet_num in net_info:
215 for entry in net_info[inet_num]:
216 if 'addr' in entry and entry['addr'] not in exc_list:
217 addresses.append(entry['addr'])
218 if fatal and not addresses:
219 raise Exception("Interface '%s' doesn't have any %s addresses." %
220 (iface, inet_type))
221 return addresses
222
223get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
224
225
226def get_iface_from_addr(addr):
227 """Work out on which interface the provided address is configured."""
228 for iface in netifaces.interfaces():
229 addresses = netifaces.ifaddresses(iface)
230 for inet_type in addresses:
231 for _addr in addresses[inet_type]:
232 _addr = _addr['addr']
233 # link local
234 ll_key = re.compile("(.+)%.*")
235 raw = re.match(ll_key, _addr)
236 if raw:
237 _addr = raw.group(1)
238 if _addr == addr:
239 log("Address '%s' is configured on iface '%s'" %
240 (addr, iface))
241 return iface
242
243 msg = "Unable to infer net iface on which '%s' is configured" % (addr)
244 raise Exception(msg)
245
246
247def sniff_iface(f):
248 """If no iface provided, inject net iface inferred from unit private
249 address.
250 """
251 def iface_sniffer(*args, **kwargs):
252 if not kwargs.get('iface', None):
253 kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
254
255 return f(*args, **kwargs)
256
257 return iface_sniffer
258
259
260@sniff_iface
261def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
262 dynamic_only=True):
263 """Get assigned IPv6 address for a given interface.
264
265 Returns list of addresses found. If no address found, returns empty list.
266
267 If iface is None, we infer the current primary interface by doing a reverse
268 lookup on the unit private-address.
269
270 We currently only support scope global IPv6 addresses i.e. non-temporary
271 addresses. If no global IPv6 address is found, return the first one found
272 in the ipv6 address list.
273 """
274 addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
275 inc_aliases=inc_aliases, fatal=fatal,
276 exc_list=exc_list)
277
278 if addresses:
279 global_addrs = []
280 for addr in addresses:
281 key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
282 m = re.match(key_scope_link_local, addr)
283 if m:
284 eui_64_mac = m.group(1)
285 iface = m.group(2)
286 else:
287 global_addrs.append(addr)
288
289 if global_addrs:
290 # Make sure any found global addresses are not temporary
291 cmd = ['ip', 'addr', 'show', iface]
292 out = subprocess.check_output(cmd)
293 if dynamic_only:
294 key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
295 else:
296 key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
297
298 addrs = []
299 for line in out.split('\n'):
300 line = line.strip()
301 m = re.match(key, line)
302 if m and 'temporary' not in line:
303 # Return the first valid address we find
304 for addr in global_addrs:
305 if m.group(1) == addr:
306 if not dynamic_only or \
307 m.group(1).endswith(eui_64_mac):
308 addrs.append(addr)
309
310 if addrs:
311 return addrs
312
313 if fatal:
314 raise Exception("Interface '%s' doesn't have a scope global "
315 "non-temporary ipv6 address." % iface)
316
317 return []
318
319
320def get_bridges(vnic_dir='/sys/devices/virtual/net'):
321 """
322 Return a list of bridges on the system or []
323 """
324 b_rgex = vnic_dir + '/*/bridge'
325 return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
326
327
328def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
329 """
330 Return a list of nics comprising a given bridge on the system or []
331 """
332 brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
333 return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
334
335
336def is_bridge_member(nic):
337 """
338 Check if a given nic is a member of a bridge
339 """
340 for bridge in get_bridges():
341 if nic in get_bridge_nics(bridge):
342 return True
343 return False
0344
=== added directory 'hooks/charmhelpers/contrib/openstack/amulet'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/__init__.py'
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py'
--- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,94 @@
1from bzrlib.branch import Branch
2import os
3import re
4from charmhelpers.contrib.amulet.deployment import (
5 AmuletDeployment
6)
7
8
9class OpenStackAmuletDeployment(AmuletDeployment):
10 """OpenStack amulet deployment.
11
12 This class inherits from AmuletDeployment and has additional support
13 that is specifically for use by OpenStack charms.
14 """
15
16 def __init__(self, series=None, openstack=None, source=None):
17 """Initialize the deployment environment."""
18 super(OpenStackAmuletDeployment, self).__init__(series)
19 self.openstack = openstack
20 self.source = source
21
22 def _is_dev_branch(self):
23 """Determine if branch being tested is a dev (i.e. next) branch."""
24 branch = Branch.open(os.getcwd())
25 parent = branch.get_parent()
26 pattern = re.compile("^.*/next/$")
27 if (pattern.match(parent)):
28 return True
29 else:
30 return False
31
32 def _determine_branch_locations(self, other_services):
33 """Determine the branch locations for the other services.
34
35 If the branch being tested is a dev branch, then determine the
36 development branch locations for the other services. Otherwise,
37 the default charm store branches will be used."""
38 name = 0
39 if self._is_dev_branch():
40 updated_services = []
41 for svc in other_services:
42 if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']:
43 location = 'lp:charms/{}'.format(svc[name])
44 else:
45 temp = 'lp:~openstack-charmers/charms/trusty/{}/next'
46 location = temp.format(svc[name])
47 updated_services.append(svc + (location,))
48 other_services = updated_services
49 return other_services
50
51 def _add_services(self, this_service, other_services):
52 """Add services to the deployment and set openstack-origin/source."""
53 name = 0
54 other_services = self._determine_branch_locations(other_services)
55 super(OpenStackAmuletDeployment, self)._add_services(this_service,
56 other_services)
57 services = other_services
58 services.append(this_service)
59 use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
60
61 if self.openstack:
62 for svc in services:
63 if svc[name] not in use_source:
64 config = {'openstack-origin': self.openstack}
65 self.d.configure(svc[name], config)
66
67 if self.source:
68 for svc in services:
69 if svc[name] in use_source:
70 config = {'source': self.source}
71 self.d.configure(svc[name], config)
72
73 def _configure_services(self, configs):
74 """Configure all of the services."""
75 for service, config in configs.iteritems():
76 self.d.configure(service, config)
77
78 def _get_openstack_release(self):
79 """Get openstack release.
80
81 Return an integer representing the enum value of the openstack
82 release.
83 """
84 (self.precise_essex, self.precise_folsom, self.precise_grizzly,
85 self.precise_havana, self.precise_icehouse,
86 self.trusty_icehouse) = range(6)
87 releases = {
88 ('precise', None): self.precise_essex,
89 ('precise', 'cloud:precise-folsom'): self.precise_folsom,
90 ('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
91 ('precise', 'cloud:precise-havana'): self.precise_havana,
92 ('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
93 ('trusty', None): self.trusty_icehouse}
94 return releases[(self.series, self.openstack)]
095
=== added file 'hooks/charmhelpers/contrib/openstack/amulet/utils.py'
--- hooks/charmhelpers/contrib/openstack/amulet/utils.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/amulet/utils.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,276 @@
1import logging
2import os
3import time
4import urllib
5
6import glanceclient.v1.client as glance_client
7import keystoneclient.v2_0 as keystone_client
8import novaclient.v1_1.client as nova_client
9
10from charmhelpers.contrib.amulet.utils import (
11 AmuletUtils
12)
13
14DEBUG = logging.DEBUG
15ERROR = logging.ERROR
16
17
18class OpenStackAmuletUtils(AmuletUtils):
19 """OpenStack amulet utilities.
20
21 This class inherits from AmuletUtils and has additional support
22 that is specifically for use by OpenStack charms.
23 """
24
25 def __init__(self, log_level=ERROR):
26 """Initialize the deployment environment."""
27 super(OpenStackAmuletUtils, self).__init__(log_level)
28
29 def validate_endpoint_data(self, endpoints, admin_port, internal_port,
30 public_port, expected):
31 """Validate endpoint data.
32
33 Validate actual endpoint data vs expected endpoint data. The ports
34 are used to find the matching endpoint.
35 """
36 found = False
37 for ep in endpoints:
38 self.log.debug('endpoint: {}'.format(repr(ep)))
39 if (admin_port in ep.adminurl and
40 internal_port in ep.internalurl and
41 public_port in ep.publicurl):
42 found = True
43 actual = {'id': ep.id,
44 'region': ep.region,
45 'adminurl': ep.adminurl,
46 'internalurl': ep.internalurl,
47 'publicurl': ep.publicurl,
48 'service_id': ep.service_id}
49 ret = self._validate_dict_data(expected, actual)
50 if ret:
51 return 'unexpected endpoint data - {}'.format(ret)
52
53 if not found:
54 return 'endpoint not found'
55
56 def validate_svc_catalog_endpoint_data(self, expected, actual):
57 """Validate service catalog endpoint data.
58
59 Validate a list of actual service catalog endpoints vs a list of
60 expected service catalog endpoints.
61 """
62 self.log.debug('actual: {}'.format(repr(actual)))
63 for k, v in expected.iteritems():
64 if k in actual:
65 ret = self._validate_dict_data(expected[k][0], actual[k][0])
66 if ret:
67 return self.endpoint_error(k, ret)
68 else:
69 return "endpoint {} does not exist".format(k)
70 return ret
71
72 def validate_tenant_data(self, expected, actual):
73 """Validate tenant data.
74
75 Validate a list of actual tenant data vs list of expected tenant
76 data.
77 """
78 self.log.debug('actual: {}'.format(repr(actual)))
79 for e in expected:
80 found = False
81 for act in actual:
82 a = {'enabled': act.enabled, 'description': act.description,
83 'name': act.name, 'id': act.id}
84 if e['name'] == a['name']:
85 found = True
86 ret = self._validate_dict_data(e, a)
87 if ret:
88 return "unexpected tenant data - {}".format(ret)
89 if not found:
90 return "tenant {} does not exist".format(e['name'])
91 return ret
92
93 def validate_role_data(self, expected, actual):
94 """Validate role data.
95
96 Validate a list of actual role data vs a list of expected role
97 data.
98 """
99 self.log.debug('actual: {}'.format(repr(actual)))
100 for e in expected:
101 found = False
102 for act in actual:
103 a = {'name': act.name, 'id': act.id}
104 if e['name'] == a['name']:
105 found = True
106 ret = self._validate_dict_data(e, a)
107 if ret:
108 return "unexpected role data - {}".format(ret)
109 if not found:
110 return "role {} does not exist".format(e['name'])
111 return ret
112
113 def validate_user_data(self, expected, actual):
114 """Validate user data.
115
116 Validate a list of actual user data vs a list of expected user
117 data.
118 """
119 self.log.debug('actual: {}'.format(repr(actual)))
120 for e in expected:
121 found = False
122 for act in actual:
123 a = {'enabled': act.enabled, 'name': act.name,
124 'email': act.email, 'tenantId': act.tenantId,
125 'id': act.id}
126 if e['name'] == a['name']:
127 found = True
128 ret = self._validate_dict_data(e, a)
129 if ret:
130 return "unexpected user data - {}".format(ret)
131 if not found:
132 return "user {} does not exist".format(e['name'])
133 return ret
134
135 def validate_flavor_data(self, expected, actual):
136 """Validate flavor data.
137
138 Validate a list of actual flavors vs a list of expected flavors.
139 """
140 self.log.debug('actual: {}'.format(repr(actual)))
141 act = [a.name for a in actual]
142 return self._validate_list_data(expected, act)
143
144 def tenant_exists(self, keystone, tenant):
145 """Return True if tenant exists."""
146 return tenant in [t.name for t in keystone.tenants.list()]
147
148 def authenticate_keystone_admin(self, keystone_sentry, user, password,
149 tenant):
150 """Authenticates admin user with the keystone admin endpoint."""
151 unit = keystone_sentry
152 service_ip = unit.relation('shared-db',
153 'mysql:shared-db')['private-address']
154 ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
155 return keystone_client.Client(username=user, password=password,
156 tenant_name=tenant, auth_url=ep)
157
158 def authenticate_keystone_user(self, keystone, user, password, tenant):
159 """Authenticates a regular user with the keystone public endpoint."""
160 ep = keystone.service_catalog.url_for(service_type='identity',
161 endpoint_type='publicURL')
162 return keystone_client.Client(username=user, password=password,
163 tenant_name=tenant, auth_url=ep)
164
165 def authenticate_glance_admin(self, keystone):
166 """Authenticates admin user with glance."""
167 ep = keystone.service_catalog.url_for(service_type='image',
168 endpoint_type='adminURL')
169 return glance_client.Client(ep, token=keystone.auth_token)
170
171 def authenticate_nova_user(self, keystone, user, password, tenant):
172 """Authenticates a regular user with nova-api."""
173 ep = keystone.service_catalog.url_for(service_type='identity',
174 endpoint_type='publicURL')
175 return nova_client.Client(username=user, api_key=password,
176 project_id=tenant, auth_url=ep)
177
178 def create_cirros_image(self, glance, image_name):
179 """Download the latest cirros image and upload it to glance."""
180 http_proxy = os.getenv('AMULET_HTTP_PROXY')
181 self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
182 if http_proxy:
183 proxies = {'http': http_proxy}
184 opener = urllib.FancyURLopener(proxies)
185 else:
186 opener = urllib.FancyURLopener()
187
188 f = opener.open("http://download.cirros-cloud.net/version/released")
189 version = f.read().strip()
190 cirros_img = "cirros-{}-x86_64-disk.img".format(version)
191 local_path = os.path.join('tests', cirros_img)
192
193 if not os.path.exists(local_path):
194 cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
195 version, cirros_img)
196 opener.retrieve(cirros_url, local_path)
197 f.close()
198
199 with open(local_path) as f:
200 image = glance.images.create(name=image_name, is_public=True,
201 disk_format='qcow2',
202 container_format='bare', data=f)
203 count = 1
204 status = image.status
205 while status != 'active' and count < 10:
206 time.sleep(3)
207 image = glance.images.get(image.id)
208 status = image.status
209 self.log.debug('image status: {}'.format(status))
210 count += 1
211
212 if status != 'active':
213 self.log.error('image creation timed out')
214 return None
215
216 return image
217
218 def delete_image(self, glance, image):
219 """Delete the specified image."""
220 num_before = len(list(glance.images.list()))
221 glance.images.delete(image)
222
223 count = 1
224 num_after = len(list(glance.images.list()))
225 while num_after != (num_before - 1) and count < 10:
226 time.sleep(3)
227 num_after = len(list(glance.images.list()))
228 self.log.debug('number of images: {}'.format(num_after))
229 count += 1
230
231 if num_after != (num_before - 1):
232 self.log.error('image deletion timed out')
233 return False
234
235 return True
236
237 def create_instance(self, nova, image_name, instance_name, flavor):
238 """Create the specified instance."""
239 image = nova.images.find(name=image_name)
240 flavor = nova.flavors.find(name=flavor)
241 instance = nova.servers.create(name=instance_name, image=image,
242 flavor=flavor)
243
244 count = 1
245 status = instance.status
246 while status != 'ACTIVE' and count < 60:
247 time.sleep(3)
248 instance = nova.servers.get(instance.id)
249 status = instance.status
250 self.log.debug('instance status: {}'.format(status))
251 count += 1
252
253 if status != 'ACTIVE':
254 self.log.error('instance creation timed out')
255 return None
256
257 return instance
258
259 def delete_instance(self, nova, instance):
260 """Delete the specified instance."""
261 num_before = len(list(nova.servers.list()))
262 nova.servers.delete(instance)
263
264 count = 1
265 num_after = len(list(nova.servers.list()))
266 while num_after != (num_before - 1) and count < 10:
267 time.sleep(3)
268 num_after = len(list(nova.servers.list()))
269 self.log.debug('number of instances: {}'.format(num_after))
270 count += 1
271
272 if num_after != (num_before - 1):
273 self.log.error('instance deletion timed out')
274 return False
275
276 return True
0277
=== modified file 'hooks/charmhelpers/contrib/openstack/context.py'
--- hooks/charmhelpers/contrib/openstack/context.py 2014-08-13 15:57:07 +0000
+++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-06 15:38:41 +0000
@@ -8,7 +8,6 @@
8 check_call8 check_call
9)9)
1010
11
12from charmhelpers.fetch import (11from charmhelpers.fetch import (
13 apt_install,12 apt_install,
14 filter_installed_packages,13 filter_installed_packages,
@@ -21,10 +20,20 @@
21 relation_get,20 relation_get,
22 relation_ids,21 relation_ids,
23 related_units,22 related_units,
23 relation_set,
24 unit_get,24 unit_get,
25 unit_private_ip,25 unit_private_ip,
26 ERROR,26 ERROR,
27 INFO27<<<<<<< TREE
28 INFO
29=======
30 INFO
31)
32
33from charmhelpers.core.host import (
34 mkdir,
35 write_file
36>>>>>>> MERGE-SOURCE
28)37)
2938
30from charmhelpers.contrib.hahelpers.cluster import (39from charmhelpers.contrib.hahelpers.cluster import (
@@ -37,12 +46,20 @@
37from charmhelpers.contrib.hahelpers.apache import (46from charmhelpers.contrib.hahelpers.apache import (
38 get_cert,47 get_cert,
39 get_ca_cert,48 get_ca_cert,
49 install_ca_cert,
40)50)
4151
42from charmhelpers.contrib.openstack.neutron import (52from charmhelpers.contrib.openstack.neutron import (
43 neutron_plugin_attribute,53 neutron_plugin_attribute,
44)54)
4555
56from charmhelpers.contrib.network.ip import (
57 get_address_in_network,
58 get_ipv6_addr,
59 format_ipv6_addr,
60 is_address_in_network
61)
62
46CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'63CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
4764
4865
@@ -135,8 +152,26 @@
135 'Missing required charm config options. '152 'Missing required charm config options. '
136 '(database name and user)')153 '(database name and user)')
137 raise OSContextError154 raise OSContextError
155
138 ctxt = {}156 ctxt = {}
139157
158 # NOTE(jamespage) if mysql charm provides a network upon which
159 # access to the database should be made, reconfigure relation
160 # with the service units local address and defer execution
161 access_network = relation_get('access-network')
162 if access_network is not None:
163 if self.relation_prefix is not None:
164 hostname_key = "{}_hostname".format(self.relation_prefix)
165 else:
166 hostname_key = "hostname"
167 access_hostname = get_address_in_network(access_network,
168 unit_get('private-address'))
169 set_hostname = relation_get(attribute=hostname_key,
170 unit=local_unit())
171 if set_hostname != access_hostname:
172 relation_set(relation_settings={hostname_key: access_hostname})
173 return ctxt # Defer any further hook execution for now....
174
140 password_setting = 'password'175 password_setting = 'password'
141 if self.relation_prefix:176 if self.relation_prefix:
142 password_setting = self.relation_prefix + '_password'177 password_setting = self.relation_prefix + '_password'
@@ -144,8 +179,10 @@
144 for rid in relation_ids('shared-db'):179 for rid in relation_ids('shared-db'):
145 for unit in related_units(rid):180 for unit in related_units(rid):
146 rdata = relation_get(rid=rid, unit=unit)181 rdata = relation_get(rid=rid, unit=unit)
182 host = rdata.get('db_host')
183 host = format_ipv6_addr(host) or host
147 ctxt = {184 ctxt = {
148 'database_host': rdata.get('db_host'),185 'database_host': host,
149 'database': self.database,186 'database': self.database,
150 'database_user': self.user,187 'database_user': self.user,
151 'database_password': rdata.get(password_setting),188 'database_password': rdata.get(password_setting),
@@ -221,10 +258,15 @@
221 for rid in relation_ids('identity-service'):258 for rid in relation_ids('identity-service'):
222 for unit in related_units(rid):259 for unit in related_units(rid):
223 rdata = relation_get(rid=rid, unit=unit)260 rdata = relation_get(rid=rid, unit=unit)
261 serv_host = rdata.get('service_host')
262 serv_host = format_ipv6_addr(serv_host) or serv_host
263 auth_host = rdata.get('auth_host')
264 auth_host = format_ipv6_addr(auth_host) or auth_host
265
224 ctxt = {266 ctxt = {
225 'service_port': rdata.get('service_port'),267 'service_port': rdata.get('service_port'),
226 'service_host': rdata.get('service_host'),268 'service_host': serv_host,
227 'auth_host': rdata.get('auth_host'),269 'auth_host': auth_host,
228 'auth_port': rdata.get('auth_port'),270 'auth_port': rdata.get('auth_port'),
229 'admin_tenant_name': rdata.get('service_tenant'),271 'admin_tenant_name': rdata.get('service_tenant'),
230 'admin_user': rdata.get('service_username'),272 'admin_user': rdata.get('service_username'),
@@ -244,32 +286,42 @@
244286
245287
246class AMQPContext(OSContextGenerator):288class AMQPContext(OSContextGenerator):
247 interfaces = ['amqp']
248289
249 def __init__(self, ssl_dir=None):290 def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
250 self.ssl_dir = ssl_dir291 self.ssl_dir = ssl_dir
292 self.rel_name = rel_name
293 self.relation_prefix = relation_prefix
294 self.interfaces = [rel_name]
251295
252 def __call__(self):296 def __call__(self):
253 log('Generating template context for amqp')297 log('Generating template context for amqp')
254 conf = config()298 conf = config()
299 user_setting = 'rabbit-user'
300 vhost_setting = 'rabbit-vhost'
301 if self.relation_prefix:
302 user_setting = self.relation_prefix + '-rabbit-user'
303 vhost_setting = self.relation_prefix + '-rabbit-vhost'
304
255 try:305 try:
256 username = conf['rabbit-user']306 username = conf[user_setting]
257 vhost = conf['rabbit-vhost']307 vhost = conf[vhost_setting]
258 except KeyError as e:308 except KeyError as e:
259 log('Could not generate shared_db context. '309 log('Could not generate shared_db context. '
260 'Missing required charm config options: %s.' % e)310 'Missing required charm config options: %s.' % e)
261 raise OSContextError311 raise OSContextError
262 ctxt = {}312 ctxt = {}
263 for rid in relation_ids('amqp'):313 for rid in relation_ids(self.rel_name):
264 ha_vip_only = False314 ha_vip_only = False
265 for unit in related_units(rid):315 for unit in related_units(rid):
266 if relation_get('clustered', rid=rid, unit=unit):316 if relation_get('clustered', rid=rid, unit=unit):
267 ctxt['clustered'] = True317 ctxt['clustered'] = True
268 ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,318 vip = relation_get('vip', rid=rid, unit=unit)
269 unit=unit)319 vip = format_ipv6_addr(vip) or vip
320 ctxt['rabbitmq_host'] = vip
270 else:321 else:
271 ctxt['rabbitmq_host'] = relation_get('private-address',322 host = relation_get('private-address', rid=rid, unit=unit)
272 rid=rid, unit=unit)323 host = format_ipv6_addr(host) or host
324 ctxt['rabbitmq_host'] = host
273 ctxt.update({325 ctxt.update({
274 'rabbitmq_user': username,326 'rabbitmq_user': username,
275 'rabbitmq_password': relation_get('password', rid=rid,327 'rabbitmq_password': relation_get('password', rid=rid,
@@ -308,8 +360,9 @@
308 and len(related_units(rid)) > 1:360 and len(related_units(rid)) > 1:
309 rabbitmq_hosts = []361 rabbitmq_hosts = []
310 for unit in related_units(rid):362 for unit in related_units(rid):
311 rabbitmq_hosts.append(relation_get('private-address',363 host = relation_get('private-address', rid=rid, unit=unit)
312 rid=rid, unit=unit))364 host = format_ipv6_addr(host) or host
365 rabbitmq_hosts.append(host)
313 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)366 ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
314 if not context_complete(ctxt):367 if not context_complete(ctxt):
315 return {}368 return {}
@@ -333,10 +386,13 @@
333 use_syslog = str(config('use-syslog')).lower()386 use_syslog = str(config('use-syslog')).lower()
334 for rid in relation_ids('ceph'):387 for rid in relation_ids('ceph'):
335 for unit in related_units(rid):388 for unit in related_units(rid):
336 mon_hosts.append(relation_get('private-address', rid=rid,
337 unit=unit))
338 auth = relation_get('auth', rid=rid, unit=unit)389 auth = relation_get('auth', rid=rid, unit=unit)
339 key = relation_get('key', rid=rid, unit=unit)390 key = relation_get('key', rid=rid, unit=unit)
391 ceph_addr = \
392 relation_get('ceph-public-address', rid=rid, unit=unit) or \
393 relation_get('private-address', rid=rid, unit=unit)
394 ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
395 mon_hosts.append(ceph_addr)
340396
341 ctxt = {397 ctxt = {
342 'mon_hosts': ' '.join(mon_hosts),398 'mon_hosts': ' '.join(mon_hosts),
@@ -370,7 +426,14 @@
370426
371 cluster_hosts = {}427 cluster_hosts = {}
372 l_unit = local_unit().replace('/', '-')428 l_unit = local_unit().replace('/', '-')
373 cluster_hosts[l_unit] = unit_get('private-address')429
430 if config('prefer-ipv6'):
431 addr = get_ipv6_addr(exc_list=[config('vip')])[0]
432 else:
433 addr = unit_get('private-address')
434
435 cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
436 addr)
374437
375 for rid in relation_ids('cluster'):438 for rid in relation_ids('cluster'):
376 for unit in related_units(rid):439 for unit in related_units(rid):
@@ -381,6 +444,21 @@
381 ctxt = {444 ctxt = {
382 'units': cluster_hosts,445 'units': cluster_hosts,
383 }446 }
447
448 if config('haproxy-server-timeout'):
449 ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
450 if config('haproxy-client-timeout'):
451 ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
452
453 if config('prefer-ipv6'):
454 ctxt['local_host'] = 'ip6-localhost'
455 ctxt['haproxy_host'] = '::'
456 ctxt['stat_port'] = ':::8888'
457 else:
458 ctxt['local_host'] = '127.0.0.1'
459 ctxt['haproxy_host'] = '0.0.0.0'
460 ctxt['stat_port'] = ':8888'
461
384 if len(cluster_hosts.keys()) > 1:462 if len(cluster_hosts.keys()) > 1:
385 # Enable haproxy when we have enough peers.463 # Enable haproxy when we have enough peers.
386 log('Ensuring haproxy enabled in /etc/default/haproxy.')464 log('Ensuring haproxy enabled in /etc/default/haproxy.')
@@ -419,12 +497,13 @@
419 """497 """
420 Generates a context for an apache vhost configuration that configures498 Generates a context for an apache vhost configuration that configures
421 HTTPS reverse proxying for one or many endpoints. Generated context499 HTTPS reverse proxying for one or many endpoints. Generated context
422 looks something like:500 looks something like::
423 {501
424 'namespace': 'cinder',502 {
425 'private_address': 'iscsi.mycinderhost.com',503 'namespace': 'cinder',
426 'endpoints': [(8776, 8766), (8777, 8767)]504 'private_address': 'iscsi.mycinderhost.com',
427 }505 'endpoints': [(8776, 8766), (8777, 8767)]
506 }
428507
429 The endpoints list consists of a tuples mapping external ports508 The endpoints list consists of a tuples mapping external ports
430 to internal ports.509 to internal ports.
@@ -440,22 +519,36 @@
440 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']519 cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
441 check_call(cmd)520 check_call(cmd)
442521
443 def configure_cert(self):522 def configure_cert(self, cn=None):
444 if not os.path.isdir('/etc/apache2/ssl'):
445 os.mkdir('/etc/apache2/ssl')
446 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)523 ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
447 if not os.path.isdir(ssl_dir):524 mkdir(path=ssl_dir)
448 os.mkdir(ssl_dir)525 cert, key = get_cert(cn)
449 cert, key = get_cert()526 if cn:
450 with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:527 cert_filename = 'cert_{}'.format(cn)
451 cert_out.write(b64decode(cert))528 key_filename = 'key_{}'.format(cn)
452 with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:529 else:
453 key_out.write(b64decode(key))530 cert_filename = 'cert'
531 key_filename = 'key'
532 write_file(path=os.path.join(ssl_dir, cert_filename),
533 content=b64decode(cert))
534 write_file(path=os.path.join(ssl_dir, key_filename),
535 content=b64decode(key))
536
537 def configure_ca(self):
454 ca_cert = get_ca_cert()538 ca_cert = get_ca_cert()
455 if ca_cert:539 if ca_cert:
456 with open(CA_CERT_PATH, 'w') as ca_out:540 install_ca_cert(b64decode(ca_cert))
457 ca_out.write(b64decode(ca_cert))541
458 check_call(['update-ca-certificates'])542 def canonical_names(self):
543 '''Figure out which canonical names clients will access this service'''
544 cns = []
545 for r_id in relation_ids('identity-service'):
546 for unit in related_units(r_id):
547 rdata = relation_get(rid=r_id, unit=unit)
548 for k in rdata:
549 if k.startswith('ssl_key_'):
550 cns.append(k.lstrip('ssl_key_'))
551 return list(set(cns))
459552
460 def __call__(self):553 def __call__(self):
461 if isinstance(self.external_ports, basestring):554 if isinstance(self.external_ports, basestring):
@@ -463,21 +556,47 @@
463 if (not self.external_ports or not https()):556 if (not self.external_ports or not https()):
464 return {}557 return {}
465558
466 self.configure_cert()559 self.configure_ca()
467 self.enable_modules()560 self.enable_modules()
468561
469 ctxt = {562 ctxt = {
470 'namespace': self.service_namespace,563 'namespace': self.service_namespace,
471 'private_address': unit_get('private-address'),564 'endpoints': [],
472 'endpoints': []565 'ext_ports': []
473 }566 }
474 if is_clustered():567
475 ctxt['private_address'] = config('vip')568 for cn in self.canonical_names():
476 for api_port in self.external_ports:569 self.configure_cert(cn)
477 ext_port = determine_apache_port(api_port)570
478 int_port = determine_api_port(api_port)571 addresses = []
479 portmap = (int(ext_port), int(int_port))572 vips = []
480 ctxt['endpoints'].append(portmap)573 if config('vip'):
574 vips = config('vip').split()
575
576 for network_type in ['os-internal-network',
577 'os-admin-network',
578 'os-public-network']:
579 address = get_address_in_network(config(network_type),
580 unit_get('private-address'))
581 if len(vips) > 0 and is_clustered():
582 for vip in vips:
583 if is_address_in_network(config(network_type),
584 vip):
585 addresses.append((address, vip))
586 break
587 elif is_clustered():
588 addresses.append((address, config('vip')))
589 else:
590 addresses.append((address, address))
591
592 for address, endpoint in set(addresses):
593 for api_port in self.external_ports:
594 ext_port = determine_apache_port(api_port)
595 int_port = determine_api_port(api_port)
596 portmap = (address, endpoint, int(ext_port), int(int_port))
597 ctxt['endpoints'].append(portmap)
598 ctxt['ext_ports'].append(int(ext_port))
599 ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
481 return ctxt600 return ctxt
482601
483602
@@ -542,6 +661,26 @@
542661
543 return nvp_ctxt662 return nvp_ctxt
544663
664 def n1kv_ctxt(self):
665 driver = neutron_plugin_attribute(self.plugin, 'driver',
666 self.network_manager)
667 n1kv_config = neutron_plugin_attribute(self.plugin, 'config',
668 self.network_manager)
669 n1kv_ctxt = {
670 'core_plugin': driver,
671 'neutron_plugin': 'n1kv',
672 'neutron_security_groups': self.neutron_security_groups,
673 'local_ip': unit_private_ip(),
674 'config': n1kv_config,
675 'vsm_ip': config('n1kv-vsm-ip'),
676 'vsm_username': config('n1kv-vsm-username'),
677 'vsm_password': config('n1kv-vsm-password'),
678 'restrict_policy_profiles': config(
679 'n1kv_restrict_policy_profiles'),
680 }
681
682 return n1kv_ctxt
683
545 def neutron_ctxt(self):684 def neutron_ctxt(self):
546 if https():685 if https():
547 proto = 'https'686 proto = 'https'
@@ -573,6 +712,8 @@
573 ctxt.update(self.ovs_ctxt())712 ctxt.update(self.ovs_ctxt())
574 elif self.plugin in ['nvp', 'nsx']:713 elif self.plugin in ['nvp', 'nsx']:
575 ctxt.update(self.nvp_ctxt())714 ctxt.update(self.nvp_ctxt())
715 elif self.plugin == 'n1kv':
716 ctxt.update(self.n1kv_ctxt())
576717
577 alchemy_flags = config('neutron-alchemy-flags')718 alchemy_flags = config('neutron-alchemy-flags')
578 if alchemy_flags:719 if alchemy_flags:
@@ -612,7 +753,7 @@
612 The subordinate interface allows subordinates to export their753 The subordinate interface allows subordinates to export their
613 configuration requirements to the principle for multiple config754 configuration requirements to the principle for multiple config
614 files and multiple serivces. Ie, a subordinate that has interfaces755 files and multiple serivces. Ie, a subordinate that has interfaces
615 to both glance and nova may export to following yaml blob as json:756 to both glance and nova may export to following yaml blob as json::
616757
617 glance:758 glance:
618 /etc/glance/glance-api.conf:759 /etc/glance/glance-api.conf:
@@ -631,7 +772,8 @@
631772
632 It is then up to the principle charms to subscribe this context to773 It is then up to the principle charms to subscribe this context to
633 the service+config file it is interestd in. Configuration data will774 the service+config file it is interestd in. Configuration data will
634 be available in the template context, in glance's case, as:775 be available in the template context, in glance's case, as::
776
635 ctxt = {777 ctxt = {
636 ... other context ...778 ... other context ...
637 'subordinate_config': {779 'subordinate_config': {
@@ -684,15 +826,38 @@
684826
685 sub_config = sub_config[self.config_file]827 sub_config = sub_config[self.config_file]
686 for k, v in sub_config.iteritems():828 for k, v in sub_config.iteritems():
687 if k == 'sections':829<<<<<<< TREE
688 for section, config_dict in v.iteritems():830 if k == 'sections':
689 log("adding section '%s'" % (section))831 for section, config_dict in v.iteritems():
690 ctxt[k][section] = config_dict832 log("adding section '%s'" % (section))
691 else:833 ctxt[k][section] = config_dict
692 ctxt[k] = v834 else:
693835 ctxt[k] = v
694 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)836
695837 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
838
839=======
840 if k == 'sections':
841 for section, config_dict in v.iteritems():
842 log("adding section '%s'" % (section))
843 ctxt[k][section] = config_dict
844 else:
845 ctxt[k] = v
846
847 log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
848
849 return ctxt
850
851
852class LogLevelContext(OSContextGenerator):
853
854 def __call__(self):
855 ctxt = {}
856 ctxt['debug'] = \
857 False if config('debug') is None else config('debug')
858 ctxt['verbose'] = \
859 False if config('verbose') is None else config('verbose')
860>>>>>>> MERGE-SOURCE
696 return ctxt861 return ctxt
697862
698863
@@ -703,3 +868,16 @@
703 'use_syslog': config('use-syslog')868 'use_syslog': config('use-syslog')
704 }869 }
705 return ctxt870 return ctxt
871
872
873class BindHostContext(OSContextGenerator):
874
875 def __call__(self):
876 if config('prefer-ipv6'):
877 return {
878 'bind_host': '::'
879 }
880 else:
881 return {
882 'bind_host': '0.0.0.0'
883 }
706884
=== added file 'hooks/charmhelpers/contrib/openstack/ip.py'
--- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,79 @@
1from charmhelpers.core.hookenv import (
2 config,
3 unit_get,
4)
5
6from charmhelpers.contrib.network.ip import (
7 get_address_in_network,
8 is_address_in_network,
9 is_ipv6,
10 get_ipv6_addr,
11)
12
13from charmhelpers.contrib.hahelpers.cluster import is_clustered
14
15PUBLIC = 'public'
16INTERNAL = 'int'
17ADMIN = 'admin'
18
19_address_map = {
20 PUBLIC: {
21 'config': 'os-public-network',
22 'fallback': 'public-address'
23 },
24 INTERNAL: {
25 'config': 'os-internal-network',
26 'fallback': 'private-address'
27 },
28 ADMIN: {
29 'config': 'os-admin-network',
30 'fallback': 'private-address'
31 }
32}
33
34
35def canonical_url(configs, endpoint_type=PUBLIC):
36 '''
37 Returns the correct HTTP URL to this host given the state of HTTPS
38 configuration, hacluster and charm configuration.
39
40 :configs OSTemplateRenderer: A config tempating object to inspect for
41 a complete https context.
42 :endpoint_type str: The endpoint type to resolve.
43
44 :returns str: Base URL for services on the current service unit.
45 '''
46 scheme = 'http'
47 if 'https' in configs.complete_contexts():
48 scheme = 'https'
49 address = resolve_address(endpoint_type)
50 if is_ipv6(address):
51 address = "[{}]".format(address)
52 return '%s://%s' % (scheme, address)
53
54
55def resolve_address(endpoint_type=PUBLIC):
56 resolved_address = None
57 if is_clustered():
58 if config(_address_map[endpoint_type]['config']) is None:
59 # Assume vip is simple and pass back directly
60 resolved_address = config('vip')
61 else:
62 for vip in config('vip').split():
63 if is_address_in_network(
64 config(_address_map[endpoint_type]['config']),
65 vip):
66 resolved_address = vip
67 else:
68 if config('prefer-ipv6'):
69 fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
70 else:
71 fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
72 resolved_address = get_address_in_network(
73 config(_address_map[endpoint_type]['config']), fallback_addr)
74
75 if resolved_address is None:
76 raise ValueError('Unable to resolve a suitable IP address'
77 ' based on charm state and configuration')
78 else:
79 return resolved_address
080
=== modified file 'hooks/charmhelpers/contrib/openstack/neutron.py'
--- hooks/charmhelpers/contrib/openstack/neutron.py 2014-05-19 11:39:11 +0000
+++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-10-06 15:38:41 +0000
@@ -128,6 +128,20 @@
128 'server_packages': ['neutron-server',128 'server_packages': ['neutron-server',
129 'neutron-plugin-vmware'],129 'neutron-plugin-vmware'],
130 'server_services': ['neutron-server']130 'server_services': ['neutron-server']
131 },
132 'n1kv': {
133 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
134 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
135 'contexts': [
136 context.SharedDBContext(user=config('neutron-database-user'),
137 database=config('neutron-database'),
138 relation_prefix='neutron',
139 ssl_dir=NEUTRON_CONF_DIR)],
140 'services': [],
141 'packages': [['neutron-plugin-cisco']],
142 'server_packages': ['neutron-server',
143 'neutron-plugin-cisco'],
144 'server_services': ['neutron-server']
131 }145 }
132 }146 }
133 if release >= 'icehouse':147 if release >= 'icehouse':
134148
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg'
--- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-03-27 11:23:24 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-10-06 15:38:41 +0000
@@ -1,6 +1,6 @@
1global1global
2 log 127.0.0.1 local02 log {{ local_host }} local0
3 log 127.0.0.1 local1 notice3 log {{ local_host }} local1 notice
4 maxconn 200004 maxconn 20000
5 user haproxy5 user haproxy
6 group haproxy6 group haproxy
@@ -14,10 +14,19 @@
14 retries 314 retries 3
15 timeout queue 100015 timeout queue 1000
16 timeout connect 100016 timeout connect 1000
17{% if haproxy_client_timeout -%}
18 timeout client {{ haproxy_client_timeout }}
19{% else -%}
17 timeout client 3000020 timeout client 30000
21{% endif -%}
22
23{% if haproxy_server_timeout -%}
24 timeout server {{ haproxy_server_timeout }}
25{% else -%}
18 timeout server 3000026 timeout server 30000
27{% endif -%}
1928
20listen stats :888829listen stats {{ stat_port }}
21 mode http30 mode http
22 stats enable31 stats enable
23 stats hide-version32 stats hide-version
@@ -27,7 +36,12 @@
2736
28{% if units -%}37{% if units -%}
29{% for service, ports in service_ports.iteritems() -%}38{% for service, ports in service_ports.iteritems() -%}
30listen {{ service }} 0.0.0.0:{{ ports[0] }}39listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }}
40 balance roundrobin
41 {% for unit, address in units.iteritems() -%}
42 server {{ unit }} {{ address }}:{{ ports[1] }} check
43 {% endfor %}
44listen {{ service }}_ipv6 :::{{ ports[0] }}
31 balance roundrobin45 balance roundrobin
32 {% for unit, address in units.iteritems() -%}46 {% for unit, address in units.iteritems() -%}
33 server {{ unit }} {{ address }}:{{ ports[1] }} check47 server {{ unit }} {{ address }}:{{ ports[1] }} check
3448
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend'
--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2013-09-27 12:02:37 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2014-10-06 15:38:41 +0000
@@ -1,16 +1,18 @@
1{% if endpoints -%}1{% if endpoints -%}
2{% for ext, int in endpoints -%}2{% for ext_port in ext_ports -%}
3Listen {{ ext }}3Listen {{ ext_port }}
4NameVirtualHost *:{{ ext }}4{% endfor -%}
5<VirtualHost *:{{ ext }}>5{% for address, endpoint, ext, int in endpoints -%}
6 ServerName {{ private_address }}6<VirtualHost {{ address }}:{{ ext }}>
7 ServerName {{ endpoint }}
7 SSLEngine on8 SSLEngine on
8 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert9 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
9 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key10 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
10 ProxyPass / http://localhost:{{ int }}/11 ProxyPass / http://localhost:{{ int }}/
11 ProxyPassReverse / http://localhost:{{ int }}/12 ProxyPassReverse / http://localhost:{{ int }}/
12 ProxyPreserveHost on13 ProxyPreserveHost on
13</VirtualHost>14</VirtualHost>
15{% endfor -%}
14<Proxy *>16<Proxy *>
15 Order deny,allow17 Order deny,allow
16 Allow from all18 Allow from all
@@ -19,5 +21,4 @@
19 Order allow,deny21 Order allow,deny
20 Allow from all22 Allow from all
21</Location>23</Location>
22{% endfor -%}
23{% endif -%}24{% endif -%}
2425
=== modified file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf'
--- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2013-09-27 12:02:37 +0000
+++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2014-10-06 15:38:41 +0000
@@ -1,16 +1,18 @@
1{% if endpoints -%}1{% if endpoints -%}
2{% for ext, int in endpoints -%}2{% for ext_port in ext_ports -%}
3Listen {{ ext }}3Listen {{ ext_port }}
4NameVirtualHost *:{{ ext }}4{% endfor -%}
5<VirtualHost *:{{ ext }}>5{% for address, endpoint, ext, int in endpoints -%}
6 ServerName {{ private_address }}6<VirtualHost {{ address }}:{{ ext }}>
7 ServerName {{ endpoint }}
7 SSLEngine on8 SSLEngine on
8 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert9 SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
9 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key10 SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
10 ProxyPass / http://localhost:{{ int }}/11 ProxyPass / http://localhost:{{ int }}/
11 ProxyPassReverse / http://localhost:{{ int }}/12 ProxyPassReverse / http://localhost:{{ int }}/
12 ProxyPreserveHost on13 ProxyPreserveHost on
13</VirtualHost>14</VirtualHost>
15{% endfor -%}
14<Proxy *>16<Proxy *>
15 Order deny,allow17 Order deny,allow
16 Allow from all18 Allow from all
@@ -19,5 +21,4 @@
19 Order allow,deny21 Order allow,deny
20 Allow from all22 Allow from all
21</Location>23</Location>
22{% endfor -%}
23{% endif -%}24{% endif -%}
2425
=== modified file 'hooks/charmhelpers/contrib/openstack/templating.py'
--- hooks/charmhelpers/contrib/openstack/templating.py 2013-09-27 12:02:37 +0000
+++ hooks/charmhelpers/contrib/openstack/templating.py 2014-10-06 15:38:41 +0000
@@ -30,17 +30,17 @@
30 loading dir.30 loading dir.
3131
32 A charm may also ship a templates dir with this module32 A charm may also ship a templates dir with this module
33 and it will be appended to the bottom of the search list, eg:33 and it will be appended to the bottom of the search list, eg::
34 hooks/charmhelpers/contrib/openstack/templates.34
3535 hooks/charmhelpers/contrib/openstack/templates
36 :param templates_dir: str: Base template directory containing release36
37 sub-directories.37 :param templates_dir (str): Base template directory containing release
38 :param os_release : str: OpenStack release codename to construct template38 sub-directories.
39 loader.39 :param os_release (str): OpenStack release codename to construct template
4040 loader.
41 :returns : jinja2.ChoiceLoader constructed with a list of41 :returns: jinja2.ChoiceLoader constructed with a list of
42 jinja2.FilesystemLoaders, ordered in descending42 jinja2.FilesystemLoaders, ordered in descending
43 order by OpenStack release.43 order by OpenStack release.
44 """44 """
45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))45 tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
46 for rel in OPENSTACK_CODENAMES.itervalues()]46 for rel in OPENSTACK_CODENAMES.itervalues()]
@@ -111,7 +111,8 @@
111 and ease the burden of managing config templates across multiple OpenStack111 and ease the burden of managing config templates across multiple OpenStack
112 releases.112 releases.
113113
114 Basic usage:114 Basic usage::
115
115 # import some common context generates from charmhelpers116 # import some common context generates from charmhelpers
116 from charmhelpers.contrib.openstack import context117 from charmhelpers.contrib.openstack import context
117118
@@ -131,21 +132,19 @@
131 # write out all registered configs132 # write out all registered configs
132 configs.write_all()133 configs.write_all()
133134
134 Details:135 **OpenStack Releases and template loading**
135136
136 OpenStack Releases and template loading
137 ---------------------------------------
138 When the object is instantiated, it is associated with a specific OS137 When the object is instantiated, it is associated with a specific OS
139 release. This dictates how the template loader will be constructed.138 release. This dictates how the template loader will be constructed.
140139
141 The constructed loader attempts to load the template from several places140 The constructed loader attempts to load the template from several places
142 in the following order:141 in the following order:
143 - from the most recent OS release-specific template dir (if one exists)142 - from the most recent OS release-specific template dir (if one exists)
144 - the base templates_dir143 - the base templates_dir
145 - a template directory shipped in the charm with this helper file.144 - a template directory shipped in the charm with this helper file.
146145
147146 For the example above, '/tmp/templates' contains the following structure::
148 For the example above, '/tmp/templates' contains the following structure:147
149 /tmp/templates/nova.conf148 /tmp/templates/nova.conf
150 /tmp/templates/api-paste.ini149 /tmp/templates/api-paste.ini
151 /tmp/templates/grizzly/api-paste.ini150 /tmp/templates/grizzly/api-paste.ini
@@ -169,8 +168,8 @@
169 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows168 $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
170 us to ship common templates (haproxy, apache) with the helpers.169 us to ship common templates (haproxy, apache) with the helpers.
171170
172 Context generators171 **Context generators**
173 ---------------------------------------172
174 Context generators are used to generate template contexts during hook173 Context generators are used to generate template contexts during hook
175 execution. Doing so may require inspecting service relations, charm174 execution. Doing so may require inspecting service relations, charm
176 config, etc. When registered, a config file is associated with a list175 config, etc. When registered, a config file is associated with a list
177176
=== modified file 'hooks/charmhelpers/contrib/openstack/utils.py'
--- hooks/charmhelpers/contrib/openstack/utils.py 2014-08-27 07:17:33 +0000
+++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-06 15:38:41 +0000
@@ -3,8 +3,8 @@
3# Common python helper functions used for OpenStack charms.3# Common python helper functions used for OpenStack charms.
4from collections import OrderedDict4from collections import OrderedDict
55
6import apt_pkg as apt
7import subprocess6import subprocess
7import json
8import os8import os
9import socket9import socket
10import sys10import sys
@@ -14,7 +14,9 @@
14 log as juju_log,14 log as juju_log,
15 charm_dir,15 charm_dir,
16 ERROR,16 ERROR,
17 INFO17 INFO,
18 relation_ids,
19 relation_set
18)20)
1921
20from charmhelpers.contrib.storage.linux.lvm import (22from charmhelpers.contrib.storage.linux.lvm import (
@@ -23,6 +25,10 @@
23 remove_lvm_physical_volume,25 remove_lvm_physical_volume,
24)26)
2527
28from charmhelpers.contrib.network.ip import (
29 get_ipv6_addr
30)
31
26from charmhelpers.core.host import lsb_release, mounts, umount32from charmhelpers.core.host import lsb_release, mounts, umount
27from charmhelpers.fetch import apt_install, apt_cache33from charmhelpers.fetch import apt_install, apt_cache
28from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk34from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@@ -41,7 +47,8 @@
41 ('quantal', 'folsom'),47 ('quantal', 'folsom'),
42 ('raring', 'grizzly'),48 ('raring', 'grizzly'),
43 ('saucy', 'havana'),49 ('saucy', 'havana'),
44 ('trusty', 'icehouse')50 ('trusty', 'icehouse'),
51 ('utopic', 'juno'),
45])52])
4653
4754
@@ -52,6 +59,7 @@
52 ('2013.1', 'grizzly'),59 ('2013.1', 'grizzly'),
53 ('2013.2', 'havana'),60 ('2013.2', 'havana'),
54 ('2014.1', 'icehouse'),61 ('2014.1', 'icehouse'),
62 ('2014.2', 'juno'),
55])63])
5664
57# The ugly duckling65# The ugly duckling
@@ -69,6 +77,7 @@
69 ('1.13.0', 'icehouse'),77 ('1.13.0', 'icehouse'),
70 ('1.12.0', 'icehouse'),78 ('1.12.0', 'icehouse'),
71 ('1.11.0', 'icehouse'),79 ('1.11.0', 'icehouse'),
80 ('2.0.0', 'juno'),
72])81])
7382
74DEFAULT_LOOPBACK_SIZE = '5G'83DEFAULT_LOOPBACK_SIZE = '5G'
@@ -83,6 +92,8 @@
83 '''Derive OpenStack release codename from a given installation source.'''92 '''Derive OpenStack release codename from a given installation source.'''
84 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']93 ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
85 rel = ''94 rel = ''
95 if src is None:
96 return rel
86 if src in ['distro', 'distro-proposed']:97 if src in ['distro', 'distro-proposed']:
87 try:98 try:
88 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]99 rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
@@ -130,8 +141,14 @@
130141
131def get_os_codename_package(package, fatal=True):142def get_os_codename_package(package, fatal=True):
132 '''Derive OpenStack release codename from an installed package.'''143 '''Derive OpenStack release codename from an installed package.'''
133144<<<<<<< TREE
134 cache = apt_cache()145
146 cache = apt_cache()
147=======
148 import apt_pkg as apt
149
150 cache = apt_cache()
151>>>>>>> MERGE-SOURCE
135152
136 try:153 try:
137 pkg = cache[package]154 pkg = cache[package]
@@ -182,7 +199,7 @@
182 for version, cname in vers_map.iteritems():199 for version, cname in vers_map.iteritems():
183 if cname == codename:200 if cname == codename:
184 return version201 return version
185 #e = "Could not determine OpenStack version for package: %s" % pkg202 # e = "Could not determine OpenStack version for package: %s" % pkg
186 # error_out(e)203 # error_out(e)
187204
188205
@@ -268,6 +285,9 @@
268 'icehouse': 'precise-updates/icehouse',285 'icehouse': 'precise-updates/icehouse',
269 'icehouse/updates': 'precise-updates/icehouse',286 'icehouse/updates': 'precise-updates/icehouse',
270 'icehouse/proposed': 'precise-proposed/icehouse',287 'icehouse/proposed': 'precise-proposed/icehouse',
288 'juno': 'trusty-updates/juno',
289 'juno/updates': 'trusty-updates/juno',
290 'juno/proposed': 'trusty-proposed/juno',
271 }291 }
272292
273 try:293 try:
@@ -315,6 +335,7 @@
315335
316 """336 """
317337
338 import apt_pkg as apt
318 src = config('openstack-origin')339 src = config('openstack-origin')
319 cur_vers = get_os_version_package(package)340 cur_vers = get_os_version_package(package)
320 available_vers = get_os_version_install_source(src)341 available_vers = get_os_version_install_source(src)
@@ -448,3 +469,21 @@
448 return result469 return result
449 else:470 else:
450 return result.split('.')[0]471 return result.split('.')[0]
472
473
474def sync_db_with_multi_ipv6_addresses(database, database_user,
475 relation_prefix=None):
476 hosts = get_ipv6_addr(dynamic_only=False)
477
478 kwargs = {'database': database,
479 'username': database_user,
480 'hostname': json.dumps(hosts)}
481
482 if relation_prefix:
483 keys = kwargs.keys()
484 for key in keys:
485 kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
486 del kwargs[key]
487
488 for rid in relation_ids('shared-db'):
489 relation_set(relation_id=rid, **kwargs)
451490
=== added directory 'hooks/charmhelpers/contrib/peerstorage'
=== added file 'hooks/charmhelpers/contrib/peerstorage/__init__.py'
--- hooks/charmhelpers/contrib/peerstorage/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/contrib/peerstorage/__init__.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,131 @@
1from charmhelpers.core.hookenv import relation_id as current_relation_id
2from charmhelpers.core.hookenv import (
3 is_relation_made,
4 relation_ids,
5 relation_get,
6 local_unit,
7 relation_set,
8)
9
10
11"""
12This helper provides functions to support use of a peer relation
13for basic key/value storage, with the added benefit that all storage
14can be replicated across peer units.
15
16Requirement to use:
17
18To use this, the "peer_echo()" method has to be called form the peer
19relation's relation-changed hook:
20
21@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
22def cluster_relation_changed():
23 peer_echo()
24
25Once this is done, you can use peer storage from anywhere:
26
27@hooks.hook("some-hook")
28def some_hook():
29 # You can store and retrieve key/values this way:
30 if is_relation_made("cluster"): # from charmhelpers.core.hookenv
31 # There are peers available so we can work with peer storage
32 peer_store("mykey", "myvalue")
33 value = peer_retrieve("mykey")
34 print value
35 else:
36 print "No peers joind the relation, cannot share key/values :("
37"""
38
39
40def peer_retrieve(key, relation_name='cluster'):
41 """Retrieve a named key from peer relation `relation_name`."""
42 cluster_rels = relation_ids(relation_name)
43 if len(cluster_rels) > 0:
44 cluster_rid = cluster_rels[0]
45 return relation_get(attribute=key, rid=cluster_rid,
46 unit=local_unit())
47 else:
48 raise ValueError('Unable to detect'
49 'peer relation {}'.format(relation_name))
50
51
52def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
53 inc_list=None, exc_list=None):
54 """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
55 inc_list = inc_list if inc_list else []
56 exc_list = exc_list if exc_list else []
57 peerdb_settings = peer_retrieve('-', relation_name=relation_name)
58 matched = {}
59 for k, v in peerdb_settings.items():
60 full_prefix = prefix + delimiter
61 if k.startswith(full_prefix):
62 new_key = k.replace(full_prefix, '')
63 if new_key in exc_list:
64 continue
65 if new_key in inc_list or len(inc_list) == 0:
66 matched[new_key] = v
67 return matched
68
69
70def peer_store(key, value, relation_name='cluster'):
71 """Store the key/value pair on the named peer relation `relation_name`."""
72 cluster_rels = relation_ids(relation_name)
73 if len(cluster_rels) > 0:
74 cluster_rid = cluster_rels[0]
75 relation_set(relation_id=cluster_rid,
76 relation_settings={key: value})
77 else:
78 raise ValueError('Unable to detect '
79 'peer relation {}'.format(relation_name))
80
81
82def peer_echo(includes=None):
83 """Echo filtered attributes back onto the same relation for storage.
84
85 This is a requirement to use the peerstorage module - it needs to be called
86 from the peer relation's changed hook.
87 """
88 rdata = relation_get()
89 echo_data = {}
90 if includes is None:
91 echo_data = rdata.copy()
92 for ex in ['private-address', 'public-address']:
93 if ex in echo_data:
94 echo_data.pop(ex)
95 else:
96 for attribute, value in rdata.iteritems():
97 for include in includes:
98 if include in attribute:
99 echo_data[attribute] = value
100 if len(echo_data) > 0:
101 relation_set(relation_settings=echo_data)
102
103
104def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
105 peer_store_fatal=False, relation_settings=None,
106 delimiter='_', **kwargs):
107 """Store passed-in arguments both in argument relation and in peer storage.
108
109 It functions like doing relation_set() and peer_store() at the same time,
110 with the same data.
111
112 @param relation_id: the id of the relation to store the data on. Defaults
113 to the current relation.
114 @param peer_store_fatal: Set to True, the function will raise an exception
115 should the peer sotrage not be avialable."""
116
117 relation_settings = relation_settings if relation_settings else {}
118 relation_set(relation_id=relation_id,
119 relation_settings=relation_settings,
120 **kwargs)
121 if is_relation_made(peer_relation_name):
122 for key, value in dict(kwargs.items() +
123 relation_settings.items()).iteritems():
124 key_prefix = relation_id or current_relation_id()
125 peer_store(key_prefix + delimiter + key,
126 value,
127 relation_name=peer_relation_name)
128 else:
129 if peer_store_fatal:
130 raise ValueError('Unable to detect '
131 'peer relation {}'.format(peer_relation_name))
0132
=== modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py'
--- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-02-24 17:51:34 +0000
+++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-10-06 15:38:41 +0000
@@ -303,7 +303,7 @@
303 blk_device, fstype, system_services=[]):303 blk_device, fstype, system_services=[]):
304 """304 """
305 NOTE: This function must only be called from a single service unit for305 NOTE: This function must only be called from a single service unit for
306 the same rbd_img otherwise data loss will occur.306 the same rbd_img otherwise data loss will occur.
307307
308 Ensures given pool and RBD image exists, is mapped to a block device,308 Ensures given pool and RBD image exists, is mapped to a block device,
309 and the device is formatted and mounted at the given mount_point.309 and the device is formatted and mounted at the given mount_point.
310310
=== added file 'hooks/charmhelpers/core/fstab.py'
--- hooks/charmhelpers/core/fstab.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/fstab.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,116 @@
1#!/usr/bin/env python
2# -*- coding: utf-8 -*-
3
4__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
5
6import os
7
8
9class Fstab(file):
10 """This class extends file in order to implement a file reader/writer
11 for file `/etc/fstab`
12 """
13
14 class Entry(object):
15 """Entry class represents a non-comment line on the `/etc/fstab` file
16 """
17 def __init__(self, device, mountpoint, filesystem,
18 options, d=0, p=0):
19 self.device = device
20 self.mountpoint = mountpoint
21 self.filesystem = filesystem
22
23 if not options:
24 options = "defaults"
25
26 self.options = options
27 self.d = d
28 self.p = p
29
30 def __eq__(self, o):
31 return str(self) == str(o)
32
33 def __str__(self):
34 return "{} {} {} {} {} {}".format(self.device,
35 self.mountpoint,
36 self.filesystem,
37 self.options,
38 self.d,
39 self.p)
40
41 DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
42
43 def __init__(self, path=None):
44 if path:
45 self._path = path
46 else:
47 self._path = self.DEFAULT_PATH
48 file.__init__(self, self._path, 'r+')
49
50 def _hydrate_entry(self, line):
51 # NOTE: use split with no arguments to split on any
52 # whitespace including tabs
53 return Fstab.Entry(*filter(
54 lambda x: x not in ('', None),
55 line.strip("\n").split()))
56
57 @property
58 def entries(self):
59 self.seek(0)
60 for line in self.readlines():
61 try:
62 if not line.startswith("#"):
63 yield self._hydrate_entry(line)
64 except ValueError:
65 pass
66
67 def get_entry_by_attr(self, attr, value):
68 for entry in self.entries:
69 e_attr = getattr(entry, attr)
70 if e_attr == value:
71 return entry
72 return None
73
74 def add_entry(self, entry):
75 if self.get_entry_by_attr('device', entry.device):
76 return False
77
78 self.write(str(entry) + '\n')
79 self.truncate()
80 return entry
81
82 def remove_entry(self, entry):
83 self.seek(0)
84
85 lines = self.readlines()
86
87 found = False
88 for index, line in enumerate(lines):
89 if not line.startswith("#"):
90 if self._hydrate_entry(line) == entry:
91 found = True
92 break
93
94 if not found:
95 return False
96
97 lines.remove(line)
98
99 self.seek(0)
100 self.write(''.join(lines))
101 self.truncate()
102 return True
103
104 @classmethod
105 def remove_by_mountpoint(cls, mountpoint, path=None):
106 fstab = cls(path=path)
107 entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
108 if entry:
109 return fstab.remove_entry(entry)
110 return False
111
112 @classmethod
113 def add(cls, device, mountpoint, filesystem, options=None, path=None):
114 return cls(path=path).add_entry(Fstab.Entry(device,
115 mountpoint, filesystem,
116 options=options))
0117
=== renamed file 'hooks/charmhelpers/core/fstab.py' => 'hooks/charmhelpers/core/fstab.py.moved'
=== modified file 'hooks/charmhelpers/core/hookenv.py'
--- hooks/charmhelpers/core/hookenv.py 2014-05-19 11:39:11 +0000
+++ hooks/charmhelpers/core/hookenv.py 2014-10-06 15:38:41 +0000
@@ -25,7 +25,7 @@
25def cached(func):25def cached(func):
26 """Cache return values for multiple executions of func + args26 """Cache return values for multiple executions of func + args
2727
28 For example:28 For example::
2929
30 @cached30 @cached
31 def unit_get(attribute):31 def unit_get(attribute):
@@ -156,12 +156,15 @@
156156
157157
158class Config(dict):158class Config(dict):
159 """A Juju charm config dictionary that can write itself to159 """A dictionary representation of the charm's config.yaml, with some
160 disk (as json) and track which values have changed since160 extra features:
161 the previous hook invocation.161
162162 - See which values in the dictionary have changed since the previous hook.
163 Do not instantiate this object directly - instead call163 - For values that have changed, see what the previous value was.
164 ``hookenv.config()``164 - Store arbitrary data for use in a later hook.
165
166 NOTE: Do not instantiate this object directly - instead call
167 ``hookenv.config()``, which will return an instance of :class:`Config`.
165168
166 Example usage::169 Example usage::
167170
@@ -170,8 +173,8 @@
170 >>> config = hookenv.config()173 >>> config = hookenv.config()
171 >>> config['foo']174 >>> config['foo']
172 'bar'175 'bar'
176 >>> # store a new key/value for later use
173 >>> config['mykey'] = 'myval'177 >>> config['mykey'] = 'myval'
174 >>> config.save()
175178
176179
177 >>> # user runs `juju set mycharm foo=baz`180 >>> # user runs `juju set mycharm foo=baz`
@@ -188,22 +191,34 @@
188 >>> # keys/values that we add are preserved across hooks191 >>> # keys/values that we add are preserved across hooks
189 >>> config['mykey']192 >>> config['mykey']
190 'myval'193 'myval'
191 >>> # don't forget to save at the end of hook!
192 >>> config.save()
193194
194 """195 """
195 CONFIG_FILE_NAME = '.juju-persistent-config'196 CONFIG_FILE_NAME = '.juju-persistent-config'
196197
197 def __init__(self, *args, **kw):198 def __init__(self, *args, **kw):
198 super(Config, self).__init__(*args, **kw)199 super(Config, self).__init__(*args, **kw)
200 self.implicit_save = True
199 self._prev_dict = None201 self._prev_dict = None
200 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)202 self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
201 if os.path.exists(self.path):203 if os.path.exists(self.path):
202 self.load_previous()204 self.load_previous()
203205
206 def __getitem__(self, key):
207 """For regular dict lookups, check the current juju config first,
208 then the previous (saved) copy. This ensures that user-saved values
209 will be returned by a dict lookup.
210
211 """
212 try:
213 return dict.__getitem__(self, key)
214 except KeyError:
215 return (self._prev_dict or {})[key]
216
204 def load_previous(self, path=None):217 def load_previous(self, path=None):
205 """Load previous copy of config from disk so that current values218 """Load previous copy of config from disk.
206 can be compared to previous values.219
220 In normal usage you don't need to call this method directly - it
221 is called automatically at object initialization.
207222
208 :param path:223 :param path:
209224
@@ -218,8 +233,8 @@
218 self._prev_dict = json.load(f)233 self._prev_dict = json.load(f)
219234
220 def changed(self, key):235 def changed(self, key):
221 """Return true if the value for this key has changed since236 """Return True if the current value for this key is different from
222 the last save.237 the previous value.
223238
224 """239 """
225 if self._prev_dict is None:240 if self._prev_dict is None:
@@ -228,7 +243,7 @@
228243
229 def previous(self, key):244 def previous(self, key):
230 """Return previous value for this key, or None if there245 """Return previous value for this key, or None if there
231 is no "previous" value.246 is no previous value.
232247
233 """248 """
234 if self._prev_dict:249 if self._prev_dict:
@@ -238,7 +253,13 @@
238 def save(self):253 def save(self):
239 """Save this config to disk.254 """Save this config to disk.
240255
241 Preserves items in _prev_dict that do not exist in self.256 If the charm is using the :mod:`Services Framework <services.base>`
257 or :meth:'@hook <Hooks.hook>' decorator, this
258 is called automatically at the end of successful hook execution.
259 Otherwise, it should be called directly by user code.
260
261 To disable automatic saves, set ``implicit_save=False`` on this
262 instance.
242263
243 """264 """
244 if self._prev_dict:265 if self._prev_dict:
@@ -285,8 +306,9 @@
285 raise306 raise
286307
287308
288def relation_set(relation_id=None, relation_settings={}, **kwargs):309def relation_set(relation_id=None, relation_settings=None, **kwargs):
289 """Set relation information for the current unit"""310 """Set relation information for the current unit"""
311 relation_settings = relation_settings if relation_settings else {}
290 relation_cmd_line = ['relation-set']312 relation_cmd_line = ['relation-set']
291 if relation_id is not None:313 if relation_id is not None:
292 relation_cmd_line.extend(('-r', relation_id))314 relation_cmd_line.extend(('-r', relation_id))
@@ -445,27 +467,29 @@
445class Hooks(object):467class Hooks(object):
446 """A convenient handler for hook functions.468 """A convenient handler for hook functions.
447469
448 Example:470 Example::
471
449 hooks = Hooks()472 hooks = Hooks()
450473
451 # register a hook, taking its name from the function name474 # register a hook, taking its name from the function name
452 @hooks.hook()475 @hooks.hook()
453 def install():476 def install():
454 ...477 pass # your code here
455478
456 # register a hook, providing a custom hook name479 # register a hook, providing a custom hook name
457 @hooks.hook("config-changed")480 @hooks.hook("config-changed")
458 def config_changed():481 def config_changed():
459 ...482 pass # your code here
460483
461 if __name__ == "__main__":484 if __name__ == "__main__":
462 # execute a hook based on the name the program is called by485 # execute a hook based on the name the program is called by
463 hooks.execute(sys.argv)486 hooks.execute(sys.argv)
464 """487 """
465488
466 def __init__(self):489 def __init__(self, config_save=True):
467 super(Hooks, self).__init__()490 super(Hooks, self).__init__()
468 self._hooks = {}491 self._hooks = {}
492 self._config_save = config_save
469493
470 def register(self, name, function):494 def register(self, name, function):
471 """Register a hook"""495 """Register a hook"""
@@ -476,6 +500,10 @@
476 hook_name = os.path.basename(args[0])500 hook_name = os.path.basename(args[0])
477 if hook_name in self._hooks:501 if hook_name in self._hooks:
478 self._hooks[hook_name]()502 self._hooks[hook_name]()
503 if self._config_save:
504 cfg = config()
505 if cfg.implicit_save:
506 cfg.save()
479 else:507 else:
480 raise UnregisteredHookError(hook_name)508 raise UnregisteredHookError(hook_name)
481509
482510
=== modified file 'hooks/charmhelpers/core/host.py'
--- hooks/charmhelpers/core/host.py 2014-08-27 07:17:33 +0000
+++ hooks/charmhelpers/core/host.py 2014-10-06 15:38:41 +0000
@@ -12,7 +12,8 @@
12import string12import string
13import subprocess13import subprocess
14import hashlib14import hashlib
15import apt_pkg15import shutil
16from contextlib import contextmanager
1617
17from collections import OrderedDict18from collections import OrderedDict
1819
@@ -53,7 +54,7 @@
53def service_running(service):54def service_running(service):
54 """Determine whether a system service is running"""55 """Determine whether a system service is running"""
55 try:56 try:
56 output = subprocess.check_output(['service', service, 'status'])57 output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
57 except subprocess.CalledProcessError:58 except subprocess.CalledProcessError:
58 return False59 return False
59 else:60 else:
@@ -63,6 +64,16 @@
63 return False64 return False
6465
6566
67def service_available(service_name):
68 """Determine whether a system service is available"""
69 try:
70 subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
71 except subprocess.CalledProcessError as e:
72 return 'unrecognized service' not in e.output
73 else:
74 return True
75
76
66def adduser(username, password=None, shell='/bin/bash', system_user=False):77def adduser(username, password=None, shell='/bin/bash', system_user=False):
67 """Add a user to the system"""78 """Add a user to the system"""
68 try:79 try:
@@ -198,10 +209,15 @@
198 return system_mounts209 return system_mounts
199210
200211
201def file_hash(path):212def file_hash(path, hash_type='md5'):
202 """Generate a md5 hash of the contents of 'path' or None if not found """213 """
214 Generate a hash checksum of the contents of 'path' or None if not found.
215
216 :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
217 such as md5, sha1, sha256, sha512, etc.
218 """
203 if os.path.exists(path):219 if os.path.exists(path):
204 h = hashlib.md5()220 h = getattr(hashlib, hash_type)()
205 with open(path, 'r') as source:221 with open(path, 'r') as source:
206 h.update(source.read()) # IGNORE:E1101 - it does have update222 h.update(source.read()) # IGNORE:E1101 - it does have update
207 return h.hexdigest()223 return h.hexdigest()
@@ -209,16 +225,36 @@
209 return None225 return None
210226
211227
228def check_hash(path, checksum, hash_type='md5'):
229 """
230 Validate a file using a cryptographic checksum.
231
232 :param str checksum: Value of the checksum used to validate the file.
233 :param str hash_type: Hash algorithm used to generate `checksum`.
234 Can be any hash alrgorithm supported by :mod:`hashlib`,
235 such as md5, sha1, sha256, sha512, etc.
236 :raises ChecksumError: If the file fails the checksum
237
238 """
239 actual_checksum = file_hash(path, hash_type)
240 if checksum != actual_checksum:
241 raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
242
243
244class ChecksumError(ValueError):
245 pass
246
247
212def restart_on_change(restart_map, stopstart=False):248def restart_on_change(restart_map, stopstart=False):
213 """Restart services based on configuration files changing249 """Restart services based on configuration files changing
214250
215 This function is used a decorator, for example251 This function is used a decorator, for example::
216252
217 @restart_on_change({253 @restart_on_change({
218 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]254 '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
219 })255 })
220 def ceph_client_changed():256 def ceph_client_changed():
221 ...257 pass # your code here
222258
223 In this example, the cinder-api and cinder-volume services259 In this example, the cinder-api and cinder-volume services
224 would be restarted if /etc/ceph/ceph.conf is changed by the260 would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -314,12 +350,40 @@
314350
315def cmp_pkgrevno(package, revno, pkgcache=None):351def cmp_pkgrevno(package, revno, pkgcache=None):
316 '''Compare supplied revno with the revno of the installed package352 '''Compare supplied revno with the revno of the installed package
317 1 => Installed revno is greater than supplied arg353
318 0 => Installed revno is the same as supplied arg354 * 1 => Installed revno is greater than supplied arg
319 -1 => Installed revno is less than supplied arg355 * 0 => Installed revno is the same as supplied arg
356 * -1 => Installed revno is less than supplied arg
357
320 '''358 '''
321 from charmhelpers.fetch import apt_cache359<<<<<<< TREE
360 from charmhelpers.fetch import apt_cache
361=======
362 import apt_pkg
363 from charmhelpers.fetch import apt_cache
364>>>>>>> MERGE-SOURCE
322 if not pkgcache:365 if not pkgcache:
323 pkgcache = apt_cache()366 pkgcache = apt_cache()
324 pkg = pkgcache[package]367 pkg = pkgcache[package]
325 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)368 return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
369
370
371@contextmanager
372def chdir(d):
373 cur = os.getcwd()
374 try:
375 yield os.chdir(d)
376 finally:
377 os.chdir(cur)
378
379
380def chownr(path, owner, group):
381 uid = pwd.getpwnam(owner).pw_uid
382 gid = grp.getgrnam(group).gr_gid
383
384 for root, dirs, files in os.walk(path):
385 for name in dirs + files:
386 full = os.path.join(root, name)
387 broken_symlink = os.path.lexists(full) and not os.path.exists(full)
388 if not broken_symlink:
389 os.chown(full, uid, gid)
326390
=== added directory 'hooks/charmhelpers/core/services'
=== added file 'hooks/charmhelpers/core/services/__init__.py'
--- hooks/charmhelpers/core/services/__init__.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/__init__.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,2 @@
1from .base import *
2from .helpers import *
03
=== added file 'hooks/charmhelpers/core/services/base.py'
--- hooks/charmhelpers/core/services/base.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/base.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,313 @@
1import os
2import re
3import json
4from collections import Iterable
5
6from charmhelpers.core import host
7from charmhelpers.core import hookenv
8
9
10__all__ = ['ServiceManager', 'ManagerCallback',
11 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
12 'service_restart', 'service_stop']
13
14
15class ServiceManager(object):
16 def __init__(self, services=None):
17 """
18 Register a list of services, given their definitions.
19
20 Service definitions are dicts in the following formats (all keys except
21 'service' are optional)::
22
23 {
24 "service": <service name>,
25 "required_data": <list of required data contexts>,
26 "provided_data": <list of provided data contexts>,
27 "data_ready": <one or more callbacks>,
28 "data_lost": <one or more callbacks>,
29 "start": <one or more callbacks>,
30 "stop": <one or more callbacks>,
31 "ports": <list of ports to manage>,
32 }
33
34 The 'required_data' list should contain dicts of required data (or
35 dependency managers that act like dicts and know how to collect the data).
36 Only when all items in the 'required_data' list are populated are the list
37 of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
38 information.
39
40 The 'provided_data' list should contain relation data providers, most likely
41 a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
42 that will indicate a set of data to set on a given relation.
43
44 The 'data_ready' value should be either a single callback, or a list of
45 callbacks, to be called when all items in 'required_data' pass `is_ready()`.
46 Each callback will be called with the service name as the only parameter.
47 After all of the 'data_ready' callbacks are called, the 'start' callbacks
48 are fired.
49
50 The 'data_lost' value should be either a single callback, or a list of
51 callbacks, to be called when a 'required_data' item no longer passes
52 `is_ready()`. Each callback will be called with the service name as the
53 only parameter. After all of the 'data_lost' callbacks are called,
54 the 'stop' callbacks are fired.
55
56 The 'start' value should be either a single callback, or a list of
57 callbacks, to be called when starting the service, after the 'data_ready'
58 callbacks are complete. Each callback will be called with the service
59 name as the only parameter. This defaults to
60 `[host.service_start, services.open_ports]`.
61
62 The 'stop' value should be either a single callback, or a list of
63 callbacks, to be called when stopping the service. If the service is
64 being stopped because it no longer has all of its 'required_data', this
65 will be called after all of the 'data_lost' callbacks are complete.
66 Each callback will be called with the service name as the only parameter.
67 This defaults to `[services.close_ports, host.service_stop]`.
68
69 The 'ports' value should be a list of ports to manage. The default
70 'start' handler will open the ports after the service is started,
71 and the default 'stop' handler will close the ports prior to stopping
72 the service.
73
74
75 Examples:
76
77 The following registers an Upstart service called bingod that depends on
78 a mongodb relation and which runs a custom `db_migrate` function prior to
79 restarting the service, and a Runit service called spadesd::
80
81 manager = services.ServiceManager([
82 {
83 'service': 'bingod',
84 'ports': [80, 443],
85 'required_data': [MongoRelation(), config(), {'my': 'data'}],
86 'data_ready': [
87 services.template(source='bingod.conf'),
88 services.template(source='bingod.ini',
89 target='/etc/bingod.ini',
90 owner='bingo', perms=0400),
91 ],
92 },
93 {
94 'service': 'spadesd',
95 'data_ready': services.template(source='spadesd_run.j2',
96 target='/etc/sv/spadesd/run',
97 perms=0555),
98 'start': runit_start,
99 'stop': runit_stop,
100 },
101 ])
102 manager.manage()
103 """
104 self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
105 self._ready = None
106 self.services = {}
107 for service in services or []:
108 service_name = service['service']
109 self.services[service_name] = service
110
111 def manage(self):
112 """
113 Handle the current hook by doing The Right Thing with the registered services.
114 """
115 hook_name = hookenv.hook_name()
116 if hook_name == 'stop':
117 self.stop_services()
118 else:
119 self.provide_data()
120 self.reconfigure_services()
121 cfg = hookenv.config()
122 if cfg.implicit_save:
123 cfg.save()
124
125 def provide_data(self):
126 """
127 Set the relation data for each provider in the ``provided_data`` list.
128
129 A provider must have a `name` attribute, which indicates which relation
130 to set data on, and a `provide_data()` method, which returns a dict of
131 data to set.
132 """
133 hook_name = hookenv.hook_name()
134 for service in self.services.values():
135 for provider in service.get('provided_data', []):
136 if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
137 data = provider.provide_data()
138 _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
139 if _ready:
140 hookenv.relation_set(None, data)
141
142 def reconfigure_services(self, *service_names):
143 """
144 Update all files for one or more registered services, and,
145 if ready, optionally restart them.
146
147 If no service names are given, reconfigures all registered services.
148 """
149 for service_name in service_names or self.services.keys():
150 if self.is_ready(service_name):
151 self.fire_event('data_ready', service_name)
152 self.fire_event('start', service_name, default=[
153 service_restart,
154 manage_ports])
155 self.save_ready(service_name)
156 else:
157 if self.was_ready(service_name):
158 self.fire_event('data_lost', service_name)
159 self.fire_event('stop', service_name, default=[
160 manage_ports,
161 service_stop])
162 self.save_lost(service_name)
163
164 def stop_services(self, *service_names):
165 """
166 Stop one or more registered services, by name.
167
168 If no service names are given, stops all registered services.
169 """
170 for service_name in service_names or self.services.keys():
171 self.fire_event('stop', service_name, default=[
172 manage_ports,
173 service_stop])
174
175 def get_service(self, service_name):
176 """
177 Given the name of a registered service, return its service definition.
178 """
179 service = self.services.get(service_name)
180 if not service:
181 raise KeyError('Service not registered: %s' % service_name)
182 return service
183
184 def fire_event(self, event_name, service_name, default=None):
185 """
186 Fire a data_ready, data_lost, start, or stop event on a given service.
187 """
188 service = self.get_service(service_name)
189 callbacks = service.get(event_name, default)
190 if not callbacks:
191 return
192 if not isinstance(callbacks, Iterable):
193 callbacks = [callbacks]
194 for callback in callbacks:
195 if isinstance(callback, ManagerCallback):
196 callback(self, service_name, event_name)
197 else:
198 callback(service_name)
199
200 def is_ready(self, service_name):
201 """
202 Determine if a registered service is ready, by checking its 'required_data'.
203
204 A 'required_data' item can be any mapping type, and is considered ready
205 if `bool(item)` evaluates as True.
206 """
207 service = self.get_service(service_name)
208 reqs = service.get('required_data', [])
209 return all(bool(req) for req in reqs)
210
211 def _load_ready_file(self):
212 if self._ready is not None:
213 return
214 if os.path.exists(self._ready_file):
215 with open(self._ready_file) as fp:
216 self._ready = set(json.load(fp))
217 else:
218 self._ready = set()
219
220 def _save_ready_file(self):
221 if self._ready is None:
222 return
223 with open(self._ready_file, 'w') as fp:
224 json.dump(list(self._ready), fp)
225
226 def save_ready(self, service_name):
227 """
228 Save an indicator that the given service is now data_ready.
229 """
230 self._load_ready_file()
231 self._ready.add(service_name)
232 self._save_ready_file()
233
234 def save_lost(self, service_name):
235 """
236 Save an indicator that the given service is no longer data_ready.
237 """
238 self._load_ready_file()
239 self._ready.discard(service_name)
240 self._save_ready_file()
241
242 def was_ready(self, service_name):
243 """
244 Determine if the given service was previously data_ready.
245 """
246 self._load_ready_file()
247 return service_name in self._ready
248
249
250class ManagerCallback(object):
251 """
252 Special case of a callback that takes the `ServiceManager` instance
253 in addition to the service name.
254
255 Subclasses should implement `__call__` which should accept three parameters:
256
257 * `manager` The `ServiceManager` instance
258 * `service_name` The name of the service it's being triggered for
259 * `event_name` The name of the event that this callback is handling
260 """
261 def __call__(self, manager, service_name, event_name):
262 raise NotImplementedError()
263
264
265class PortManagerCallback(ManagerCallback):
266 """
267 Callback class that will open or close ports, for use as either
268 a start or stop action.
269 """
270 def __call__(self, manager, service_name, event_name):
271 service = manager.get_service(service_name)
272 new_ports = service.get('ports', [])
273 port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
274 if os.path.exists(port_file):
275 with open(port_file) as fp:
276 old_ports = fp.read().split(',')
277 for old_port in old_ports:
278 if bool(old_port):
279 old_port = int(old_port)
280 if old_port not in new_ports:
281 hookenv.close_port(old_port)
282 with open(port_file, 'w') as fp:
283 fp.write(','.join(str(port) for port in new_ports))
284 for port in new_ports:
285 if event_name == 'start':
286 hookenv.open_port(port)
287 elif event_name == 'stop':
288 hookenv.close_port(port)
289
290
291def service_stop(service_name):
292 """
293 Wrapper around host.service_stop to prevent spurious "unknown service"
294 messages in the logs.
295 """
296 if host.service_running(service_name):
297 host.service_stop(service_name)
298
299
300def service_restart(service_name):
301 """
302 Wrapper around host.service_restart to prevent spurious "unknown service"
303 messages in the logs.
304 """
305 if host.service_available(service_name):
306 if host.service_running(service_name):
307 host.service_restart(service_name)
308 else:
309 host.service_start(service_name)
310
311
312# Convenience aliases
313open_ports = close_ports = manage_ports = PortManagerCallback()
0314
=== added file 'hooks/charmhelpers/core/services/helpers.py'
--- hooks/charmhelpers/core/services/helpers.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/services/helpers.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,239 @@
1import os
2import yaml
3from charmhelpers.core import hookenv
4from charmhelpers.core import templating
5
6from charmhelpers.core.services.base import ManagerCallback
7
8
9__all__ = ['RelationContext', 'TemplateCallback',
10 'render_template', 'template']
11
12
13class RelationContext(dict):
14 """
15 Base class for a context generator that gets relation data from juju.
16
17 Subclasses must provide the attributes `name`, which is the name of the
18 interface of interest, `interface`, which is the type of the interface of
19 interest, and `required_keys`, which is the set of keys required for the
20 relation to be considered complete. The data for all interfaces matching
21 the `name` attribute that are complete will used to populate the dictionary
22 values (see `get_data`, below).
23
24 The generated context will be namespaced under the relation :attr:`name`,
25 to prevent potential naming conflicts.
26
27 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
28 :param list additional_required_keys: Extend the list of :attr:`required_keys`
29 """
30 name = None
31 interface = None
32 required_keys = []
33
34 def __init__(self, name=None, additional_required_keys=None):
35 if name is not None:
36 self.name = name
37 if additional_required_keys is not None:
38 self.required_keys.extend(additional_required_keys)
39 self.get_data()
40
41 def __bool__(self):
42 """
43 Returns True if all of the required_keys are available.
44 """
45 return self.is_ready()
46
47 __nonzero__ = __bool__
48
49 def __repr__(self):
50 return super(RelationContext, self).__repr__()
51
52 def is_ready(self):
53 """
54 Returns True if all of the `required_keys` are available from any units.
55 """
56 ready = len(self.get(self.name, [])) > 0
57 if not ready:
58 hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
59 return ready
60
61 def _is_ready(self, unit_data):
62 """
63 Helper method that tests a set of relation data and returns True if
64 all of the `required_keys` are present.
65 """
66 return set(unit_data.keys()).issuperset(set(self.required_keys))
67
68 def get_data(self):
69 """
70 Retrieve the relation data for each unit involved in a relation and,
71 if complete, store it in a list under `self[self.name]`. This
72 is automatically called when the RelationContext is instantiated.
73
74 The units are sorted lexographically first by the service ID, then by
75 the unit ID. Thus, if an interface has two other services, 'db:1'
76 and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
77 and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
78 set of data, the relation data for the units will be stored in the
79 order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
80
81 If you only care about a single unit on the relation, you can just
82 access it as `{{ interface[0]['key'] }}`. However, if you can at all
83 support multiple units on a relation, you should iterate over the list,
84 like::
85
86 {% for unit in interface -%}
87 {{ unit['key'] }}{% if not loop.last %},{% endif %}
88 {%- endfor %}
89
90 Note that since all sets of relation data from all related services and
91 units are in a single list, if you need to know which service or unit a
92 set of data came from, you'll need to extend this class to preserve
93 that information.
94 """
95 if not hookenv.relation_ids(self.name):
96 return
97
98 ns = self.setdefault(self.name, [])
99 for rid in sorted(hookenv.relation_ids(self.name)):
100 for unit in sorted(hookenv.related_units(rid)):
101 reldata = hookenv.relation_get(rid=rid, unit=unit)
102 if self._is_ready(reldata):
103 ns.append(reldata)
104
105 def provide_data(self):
106 """
107 Return data to be relation_set for this interface.
108 """
109 return {}
110
111
112class MysqlRelation(RelationContext):
113 """
114 Relation context for the `mysql` interface.
115
116 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
117 :param list additional_required_keys: Extend the list of :attr:`required_keys`
118 """
119 name = 'db'
120 interface = 'mysql'
121 required_keys = ['host', 'user', 'password', 'database']
122
123
124class HttpRelation(RelationContext):
125 """
126 Relation context for the `http` interface.
127
128 :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
129 :param list additional_required_keys: Extend the list of :attr:`required_keys`
130 """
131 name = 'website'
132 interface = 'http'
133 required_keys = ['host', 'port']
134
135 def provide_data(self):
136 return {
137 'host': hookenv.unit_get('private-address'),
138 'port': 80,
139 }
140
141
142class RequiredConfig(dict):
143 """
144 Data context that loads config options with one or more mandatory options.
145
146 Once the required options have been changed from their default values, all
147 config options will be available, namespaced under `config` to prevent
148 potential naming conflicts (for example, between a config option and a
149 relation property).
150
151 :param list *args: List of options that must be changed from their default values.
152 """
153
154 def __init__(self, *args):
155 self.required_options = args
156 self['config'] = hookenv.config()
157 with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
158 self.config = yaml.load(fp).get('options', {})
159
160 def __bool__(self):
161 for option in self.required_options:
162 if option not in self['config']:
163 return False
164 current_value = self['config'][option]
165 default_value = self.config[option].get('default')
166 if current_value == default_value:
167 return False
168 if current_value in (None, '') and default_value in (None, ''):
169 return False
170 return True
171
172 def __nonzero__(self):
173 return self.__bool__()
174
175
176class StoredContext(dict):
177 """
178 A data context that always returns the data that it was first created with.
179
180 This is useful to do a one-time generation of things like passwords, that
181 will thereafter use the same value that was originally generated, instead
182 of generating a new value each time it is run.
183 """
184 def __init__(self, file_name, config_data):
185 """
186 If the file exists, populate `self` with the data from the file.
187 Otherwise, populate with the given data and persist it to the file.
188 """
189 if os.path.exists(file_name):
190 self.update(self.read_context(file_name))
191 else:
192 self.store_context(file_name, config_data)
193 self.update(config_data)
194
195 def store_context(self, file_name, config_data):
196 if not os.path.isabs(file_name):
197 file_name = os.path.join(hookenv.charm_dir(), file_name)
198 with open(file_name, 'w') as file_stream:
199 os.fchmod(file_stream.fileno(), 0600)
200 yaml.dump(config_data, file_stream)
201
202 def read_context(self, file_name):
203 if not os.path.isabs(file_name):
204 file_name = os.path.join(hookenv.charm_dir(), file_name)
205 with open(file_name, 'r') as file_stream:
206 data = yaml.load(file_stream)
207 if not data:
208 raise OSError("%s is empty" % file_name)
209 return data
210
211
212class TemplateCallback(ManagerCallback):
213 """
214 Callback class that will render a Jinja2 template, for use as a ready action.
215
216 :param str source: The template source file, relative to `$CHARM_DIR/templates`
217 :param str target: The target to write the rendered template to
218 :param str owner: The owner of the rendered file
219 :param str group: The group of the rendered file
220 :param int perms: The permissions of the rendered file
221 """
222 def __init__(self, source, target, owner='root', group='root', perms=0444):
223 self.source = source
224 self.target = target
225 self.owner = owner
226 self.group = group
227 self.perms = perms
228
229 def __call__(self, manager, service_name, event_name):
230 service = manager.get_service(service_name)
231 context = {}
232 for ctx in service.get('required_data', []):
233 context.update(ctx)
234 templating.render(self.source, self.target, context,
235 self.owner, self.group, self.perms)
236
237
238# Convenience aliases for templates
239render_template = template = TemplateCallback
0240
=== added file 'hooks/charmhelpers/core/templating.py'
--- hooks/charmhelpers/core/templating.py 1970-01-01 00:00:00 +0000
+++ hooks/charmhelpers/core/templating.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,51 @@
1import os
2
3from charmhelpers.core import host
4from charmhelpers.core import hookenv
5
6
7def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
8 """
9 Render a template.
10
11 The `source` path, if not absolute, is relative to the `templates_dir`.
12
13 The `target` path should be absolute.
14
15 The context should be a dict containing the values to be replaced in the
16 template.
17
18 The `owner`, `group`, and `perms` options will be passed to `write_file`.
19
20 If omitted, `templates_dir` defaults to the `templates` folder in the charm.
21
22 Note: Using this requires python-jinja2; if it is not installed, calling
23 this will attempt to use charmhelpers.fetch.apt_install to install it.
24 """
25 try:
26 from jinja2 import FileSystemLoader, Environment, exceptions
27 except ImportError:
28 try:
29 from charmhelpers.fetch import apt_install
30 except ImportError:
31 hookenv.log('Could not import jinja2, and could not import '
32 'charmhelpers.fetch to install it',
33 level=hookenv.ERROR)
34 raise
35 apt_install('python-jinja2', fatal=True)
36 from jinja2 import FileSystemLoader, Environment, exceptions
37
38 if templates_dir is None:
39 templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
40 loader = Environment(loader=FileSystemLoader(templates_dir))
41 try:
42 source = source
43 template = loader.get_template(source)
44 except exceptions.TemplateNotFound as e:
45 hookenv.log('Could not load template %s from %s.' %
46 (source, templates_dir),
47 level=hookenv.ERROR)
48 raise e
49 content = template.render(context)
50 host.mkdir(os.path.dirname(target))
51 host.write_file(target, content, owner, group, perms)
052
=== modified file 'hooks/charmhelpers/fetch/__init__.py'
--- hooks/charmhelpers/fetch/__init__.py 2014-08-27 07:17:33 +0000
+++ hooks/charmhelpers/fetch/__init__.py 2014-10-06 15:38:41 +0000
@@ -1,4 +1,5 @@
1import importlib1import importlib
2from tempfile import NamedTemporaryFile
2import time3import time
3from yaml import safe_load4from yaml import safe_load
4from charmhelpers.core.host import (5from charmhelpers.core.host import (
@@ -13,7 +14,6 @@
13 config,14 config,
14 log,15 log,
15)16)
16import apt_pkg
17import os17import os
1818
1919
@@ -56,6 +56,15 @@
56 'icehouse/proposed': 'precise-proposed/icehouse',56 'icehouse/proposed': 'precise-proposed/icehouse',
57 'precise-icehouse/proposed': 'precise-proposed/icehouse',57 'precise-icehouse/proposed': 'precise-proposed/icehouse',
58 'precise-proposed/icehouse': 'precise-proposed/icehouse',58 'precise-proposed/icehouse': 'precise-proposed/icehouse',
59 # Juno
60 'juno': 'trusty-updates/juno',
61 'trusty-juno': 'trusty-updates/juno',
62 'trusty-juno/updates': 'trusty-updates/juno',
63 'trusty-updates/juno': 'trusty-updates/juno',
64 'juno/proposed': 'trusty-proposed/juno',
65 'juno/proposed': 'trusty-proposed/juno',
66 'trusty-juno/proposed': 'trusty-proposed/juno',
67 'trusty-proposed/juno': 'trusty-proposed/juno',
59}68}
6069
61# The order of this list is very important. Handlers should be listed in from70# The order of this list is very important. Handlers should be listed in from
@@ -108,8 +117,12 @@
108117
109def filter_installed_packages(packages):118def filter_installed_packages(packages):
110 """Returns a list of packages that require installation"""119 """Returns a list of packages that require installation"""
120<<<<<<< TREE
111121
112 cache = apt_cache()122 cache = apt_cache()
123=======
124 cache = apt_cache()
125>>>>>>> MERGE-SOURCE
113 _pkgs = []126 _pkgs = []
114 for package in packages:127 for package in packages:
115 try:128 try:
@@ -122,15 +135,28 @@
122 return _pkgs135 return _pkgs
123136
124137
125def apt_cache(in_memory=True):138<<<<<<< TREE
126 """Build and return an apt cache"""139def apt_cache(in_memory=True):
127 apt_pkg.init()140 """Build and return an apt cache"""
128 if in_memory:141 apt_pkg.init()
129 apt_pkg.config.set("Dir::Cache::pkgcache", "")142 if in_memory:
130 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")143 apt_pkg.config.set("Dir::Cache::pkgcache", "")
131 return apt_pkg.Cache()144 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
132145 return apt_pkg.Cache()
133146
147
148=======
149def apt_cache(in_memory=True):
150 """Build and return an apt cache"""
151 import apt_pkg
152 apt_pkg.init()
153 if in_memory:
154 apt_pkg.config.set("Dir::Cache::pkgcache", "")
155 apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
156 return apt_pkg.Cache()
157
158
159>>>>>>> MERGE-SOURCE
134def apt_install(packages, options=None, fatal=False):160def apt_install(packages, options=None, fatal=False):
135 """Install one or more packages"""161 """Install one or more packages"""
136 if options is None:162 if options is None:
@@ -196,6 +222,28 @@
196222
197223
198def add_source(source, key=None):224def add_source(source, key=None):
225 """Add a package source to this system.
226
227 @param source: a URL or sources.list entry, as supported by
228 add-apt-repository(1). Examples::
229
230 ppa:charmers/example
231 deb https://stub:key@private.example.com/ubuntu trusty main
232
233 In addition:
234 'proposed:' may be used to enable the standard 'proposed'
235 pocket for the release.
236 'cloud:' may be used to activate official cloud archive pockets,
237 such as 'cloud:icehouse'
238
239 @param key: A key to be added to the system's APT keyring and used
240 to verify the signatures on packages. Ideally, this should be an
241 ASCII format GPG public key including the block headers. A GPG key
242 id may also be used, but be aware that only insecure protocols are
243 available to retrieve the actual public key from a public keyserver
244 placing your Juju environment at risk. ppa and cloud archive keys
245 are securely added automtically, so sould not be provided.
246 """
199 if source is None:247 if source is None:
200 log('Source is not present. Skipping')248 log('Source is not present. Skipping')
201 return249 return
@@ -220,61 +268,96 @@
220 release = lsb_release()['DISTRIB_CODENAME']268 release = lsb_release()['DISTRIB_CODENAME']
221 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:269 with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
222 apt.write(PROPOSED_POCKET.format(release))270 apt.write(PROPOSED_POCKET.format(release))
271 else:
272 raise SourceConfigError("Unknown source: {!r}".format(source))
273
223 if key:274 if key:
224 subprocess.check_call(['apt-key', 'adv', '--keyserver',275 if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
225 'hkp://keyserver.ubuntu.com:80', '--recv',276 with NamedTemporaryFile() as key_file:
226 key])277 key_file.write(key)
278 key_file.flush()
279 key_file.seek(0)
280 subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
281 else:
282 # Note that hkp: is in no way a secure protocol. Using a
283 # GPG key id is pointless from a security POV unless you
284 # absolutely trust your network and DNS.
285 subprocess.check_call(['apt-key', 'adv', '--keyserver',
286 'hkp://keyserver.ubuntu.com:80', '--recv',
287 key])
227288
228289
229def configure_sources(update=False,290def configure_sources(update=False,
230 sources_var='install_sources',291 sources_var='install_sources',
231 keys_var='install_keys'):292 keys_var='install_keys'):
232 """293 """
233 Configure multiple sources from charm configuration294 Configure multiple sources from charm configuration.
295
296 The lists are encoded as yaml fragments in the configuration.
297 The frament needs to be included as a string. Sources and their
298 corresponding keys are of the types supported by add_source().
234299
235 Example config:300 Example config:
236 install_sources:301 install_sources: |
237 - "ppa:foo"302 - "ppa:foo"
238 - "http://example.com/repo precise main"303 - "http://example.com/repo precise main"
239 install_keys:304 install_keys: |
240 - null305 - null
241 - "a1b2c3d4"306 - "a1b2c3d4"
242307
243 Note that 'null' (a.k.a. None) should not be quoted.308 Note that 'null' (a.k.a. None) should not be quoted.
244 """309 """
245 sources = safe_load(config(sources_var))310 sources = safe_load((config(sources_var) or '').strip()) or []
246 keys = config(keys_var)311 keys = safe_load((config(keys_var) or '').strip()) or None
247 if keys is not None:312
248 keys = safe_load(keys)313 if isinstance(sources, basestring):
249 if isinstance(sources, basestring) and (314 sources = [sources]
250 keys is None or isinstance(keys, basestring)):315
251 add_source(sources, keys)316 if keys is None:
317 for source in sources:
318 add_source(source, None)
252 else:319 else:
253 if not len(sources) == len(keys):320 if isinstance(keys, basestring):
254 msg = 'Install sources and keys lists are different lengths'321 keys = [keys]
255 raise SourceConfigError(msg)322
256 for src_num in range(len(sources)):323 if len(sources) != len(keys):
257 add_source(sources[src_num], keys[src_num])324 raise SourceConfigError(
325 'Install sources and keys lists are different lengths')
326 for source, key in zip(sources, keys):
327 add_source(source, key)
258 if update:328 if update:
259 apt_update(fatal=True)329 apt_update(fatal=True)
260330
261331
262def install_remote(source):332def install_remote(source, *args, **kwargs):
263 """333 """
264 Install a file tree from a remote source334 Install a file tree from a remote source
265335
266 The specified source should be a url of the form:336 The specified source should be a url of the form:
267 scheme://[host]/path[#[option=value][&...]]337 scheme://[host]/path[#[option=value][&...]]
268338
269 Schemes supported are based on this modules submodules339 Schemes supported are based on this modules submodules.
270 Options supported are submodule-specific"""340 Options supported are submodule-specific.
341 Additional arguments are passed through to the submodule.
342
343 For example::
344
345 dest = install_remote('http://example.com/archive.tgz',
346 checksum='deadbeef',
347 hash_type='sha1')
348
349 This will download `archive.tgz`, validate it using SHA1 and, if
350 the file is ok, extract it and return the directory in which it
351 was extracted. If the checksum fails, it will raise
352 :class:`charmhelpers.core.host.ChecksumError`.
353 """
271 # We ONLY check for True here because can_handle may return a string354 # We ONLY check for True here because can_handle may return a string
272 # explaining why it can't handle a given source.355 # explaining why it can't handle a given source.
273 handlers = [h for h in plugins() if h.can_handle(source) is True]356 handlers = [h for h in plugins() if h.can_handle(source) is True]
274 installed_to = None357 installed_to = None
275 for handler in handlers:358 for handler in handlers:
276 try:359 try:
277 installed_to = handler.install(source)360 installed_to = handler.install(source, *args, **kwargs)
278 except UnhandledSource:361 except UnhandledSource:
279 pass362 pass
280 if not installed_to:363 if not installed_to:
281364
=== modified file 'hooks/charmhelpers/fetch/archiveurl.py'
--- hooks/charmhelpers/fetch/archiveurl.py 2014-03-20 13:47:46 +0000
+++ hooks/charmhelpers/fetch/archiveurl.py 2014-10-06 15:38:41 +0000
@@ -1,6 +1,8 @@
1import os1import os
2import urllib22import urllib2
3from urllib import urlretrieve
3import urlparse4import urlparse
5import hashlib
46
5from charmhelpers.fetch import (7from charmhelpers.fetch import (
6 BaseFetchHandler,8 BaseFetchHandler,
@@ -10,11 +12,19 @@
10 get_archive_handler,12 get_archive_handler,
11 extract,13 extract,
12)14)
13from charmhelpers.core.host import mkdir15from charmhelpers.core.host import mkdir, check_hash
1416
1517
16class ArchiveUrlFetchHandler(BaseFetchHandler):18class ArchiveUrlFetchHandler(BaseFetchHandler):
17 """Handler for archives via generic URLs"""19 """
20 Handler to download archive files from arbitrary URLs.
21
22 Can fetch from http, https, ftp, and file URLs.
23
24 Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
25
26 Installs the contents of the archive in $CHARM_DIR/fetched/.
27 """
18 def can_handle(self, source):28 def can_handle(self, source):
19 url_parts = self.parse_url(source)29 url_parts = self.parse_url(source)
20 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):30 if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
@@ -24,6 +34,12 @@
24 return False34 return False
2535
26 def download(self, source, dest):36 def download(self, source, dest):
37 """
38 Download an archive file.
39
40 :param str source: URL pointing to an archive file.
41 :param str dest: Local path location to download archive file to.
42 """
27 # propogate all exceptions43 # propogate all exceptions
28 # URLError, OSError, etc44 # URLError, OSError, etc
29 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)45 proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
@@ -48,7 +64,30 @@
48 os.unlink(dest)64 os.unlink(dest)
49 raise e65 raise e
5066
51 def install(self, source):67 # Mandatory file validation via Sha1 or MD5 hashing.
68 def download_and_validate(self, url, hashsum, validate="sha1"):
69 tempfile, headers = urlretrieve(url)
70 check_hash(tempfile, hashsum, validate)
71 return tempfile
72
73 def install(self, source, dest=None, checksum=None, hash_type='sha1'):
74 """
75 Download and install an archive file, with optional checksum validation.
76
77 The checksum can also be given on the `source` URL's fragment.
78 For example::
79
80 handler.install('http://example.com/file.tgz#sha1=deadbeef')
81
82 :param str source: URL pointing to an archive file.
83 :param str dest: Local destination path to install to. If not given,
84 installs to `$CHARM_DIR/archives/archive_file_name`.
85 :param str checksum: If given, validate the archive file after download.
86 :param str hash_type: Algorithm used to generate `checksum`.
87 Can be any hash alrgorithm supported by :mod:`hashlib`,
88 such as md5, sha1, sha256, sha512, etc.
89
90 """
52 url_parts = self.parse_url(source)91 url_parts = self.parse_url(source)
53 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')92 dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
54 if not os.path.exists(dest_dir):93 if not os.path.exists(dest_dir):
@@ -60,4 +99,10 @@
60 raise UnhandledSource(e.reason)99 raise UnhandledSource(e.reason)
61 except OSError as e:100 except OSError as e:
62 raise UnhandledSource(e.strerror)101 raise UnhandledSource(e.strerror)
63 return extract(dld_file)102 options = urlparse.parse_qs(url_parts.fragment)
103 for key, value in options.items():
104 if key in hashlib.algorithms:
105 check_hash(dld_file, value, key)
106 if checksum:
107 check_hash(dld_file, checksum, hash_type)
108 return extract(dld_file, dest)
64109
=== modified file 'hooks/swift_context.py'
--- hooks/swift_context.py 2014-04-10 16:52:10 +0000
+++ hooks/swift_context.py 2014-10-06 15:38:41 +0000
@@ -4,7 +4,8 @@
4 relation_ids,4 relation_ids,
5 related_units,5 related_units,
6 relation_get,6 relation_get,
7 unit_get7 unit_get,
8 service_name
8)9)
910
10from charmhelpers.contrib.openstack.context import (11from charmhelpers.contrib.openstack.context import (
@@ -19,9 +20,14 @@
19 determine_apache_port,20 determine_apache_port,
20)21)
2122
23from charmhelpers.contrib.network.ip import (
24 get_ipv6_addr
25)
26
22from charmhelpers.contrib.openstack.utils import get_host_ip27from charmhelpers.contrib.openstack.utils import get_host_ip
23import subprocess28import subprocess
24import os29import os
30import uuid
2531
2632
27from charmhelpers.contrib.hahelpers.apache import (33from charmhelpers.contrib.hahelpers.apache import (
@@ -116,7 +122,11 @@
116 for relid in relation_ids('swift-storage'):122 for relid in relation_ids('swift-storage'):
117 for unit in related_units(relid):123 for unit in related_units(relid):
118 host = relation_get('private-address', unit, relid)124 host = relation_get('private-address', unit, relid)
119 allowed_hosts.append(get_host_ip(host))125 if config('prefer-ipv6'):
126 host_ip = get_ipv6_addr(exc_list=[config('vip')])[0]
127 else:
128 host_ip = get_host_ip(host)
129 allowed_hosts.append(host_ip)
120130
121 ctxt = {131 ctxt = {
122 'www_dir': WWW_DIR,132 'www_dir': WWW_DIR,
@@ -134,12 +144,21 @@
134 if workers == '0':144 if workers == '0':
135 import multiprocessing145 import multiprocessing
136 workers = multiprocessing.cpu_count()146 workers = multiprocessing.cpu_count()
147 if config('prefer-ipv6'):
148 proxy_ip = '[%s]' % get_ipv6_addr(exc_list=[config('vip')])[0]
149 memcached_ip = 'ip6-localhost'
150 else:
151 proxy_ip = get_host_ip(unit_get('private-address'))
152 memcached_ip = get_host_ip(unit_get('private-address'))
137 ctxt = {153 ctxt = {
138 'proxy_ip': get_host_ip(unit_get('private-address')),154 'proxy_ip': proxy_ip,
155 'memcached_ip': memcached_ip,
139 'bind_port': determine_api_port(bind_port),156 'bind_port': determine_api_port(bind_port),
140 'workers': workers,157 'workers': workers,
141 'operator_roles': config('operator-roles'),158 'operator_roles': config('operator-roles'),
142 'delay_auth_decision': config('delay-auth-decision')159 'delay_auth_decision': config('delay-auth-decision'),
160 'node_timeout': config('node-timeout'),
161 'recoverable_node_timeout': config('recoverable-node-timeout'),
143 }162 }
144163
145 ctxt['ssl'] = False164 ctxt['ssl'] = False
@@ -194,9 +213,11 @@
194class MemcachedContext(OSContextGenerator):213class MemcachedContext(OSContextGenerator):
195214
196 def __call__(self):215 def __call__(self):
197 ctxt = {216 ctxt = {}
198 'proxy_ip': get_host_ip(unit_get('private-address'))217 if config('prefer-ipv6'):
199 }218 ctxt['memcached_ip'] = 'ip6-localhost'
219 else:
220 ctxt['memcached_ip'] = get_host_ip(unit_get('private-address'))
200 return ctxt221 return ctxt
201222
202SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'223SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
@@ -211,10 +232,8 @@
211 with open(SWIFT_HASH_FILE, 'w') as hashfile:232 with open(SWIFT_HASH_FILE, 'w') as hashfile:
212 hashfile.write(swift_hash)233 hashfile.write(swift_hash)
213 else:234 else:
214 cmd = ['od', '-t', 'x8', '-N', '8', '-A', 'n']235 swift_hash = str(uuid.uuid3(uuid.UUID(os.environ.get("JUJU_ENV_UUID")),
215 rand = open('/dev/random', 'r')236 service_name()))
216 p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=rand)
217 swift_hash = p.communicate()[0].strip()
218 with open(SWIFT_HASH_FILE, 'w') as hashfile:237 with open(SWIFT_HASH_FILE, 'w') as hashfile:
219 hashfile.write(swift_hash)238 hashfile.write(swift_hash)
220 return swift_hash239 return swift_hash
221240
=== modified file 'hooks/swift_hooks.py'
--- hooks/swift_hooks.py 2014-04-10 16:52:10 +0000
+++ hooks/swift_hooks.py 2014-10-06 15:38:41 +0000
@@ -24,7 +24,8 @@
24 add_to_ring,24 add_to_ring,
25 should_balance,25 should_balance,
26 do_openstack_upgrade,26 do_openstack_upgrade,
27 write_rc_script27 write_rc_script,
28 setup_ipv6
28)29)
29from swift_context import get_swift_hash30from swift_context import get_swift_hash
3031
@@ -48,6 +49,19 @@
48)49)
49from charmhelpers.payload.execd import execd_preinstall50from charmhelpers.payload.execd import execd_preinstall
5051
52from charmhelpers.contrib.openstack.ip import (
53 canonical_url,
54 PUBLIC, INTERNAL, ADMIN
55)
56from charmhelpers.contrib.network.ip import (
57 get_iface_for_address,
58 get_netmask_for_address,
59 get_address_in_network,
60 get_ipv6_addr,
61 format_ipv6_addr,
62 is_ipv6
63)
64
51extra_pkgs = [65extra_pkgs = [
52 "haproxy",66 "haproxy",
53 "python-jinja2"67 "python-jinja2"
@@ -71,7 +85,6 @@
71 pkgs = determine_packages(rel)85 pkgs = determine_packages(rel)
72 apt_install(pkgs, fatal=True)86 apt_install(pkgs, fatal=True)
73 apt_install(extra_pkgs, fatal=True)87 apt_install(extra_pkgs, fatal=True)
74
75 ensure_swift_dir()88 ensure_swift_dir()
76 # initialize new storage rings.89 # initialize new storage rings.
77 for ring in SWIFT_RINGS.iteritems():90 for ring in SWIFT_RINGS.iteritems():
@@ -92,20 +105,16 @@
92def keystone_joined(relid=None):105def keystone_joined(relid=None):
93 if not cluster.eligible_leader(SWIFT_HA_RES):106 if not cluster.eligible_leader(SWIFT_HA_RES):
94 return107 return
95 if cluster.is_clustered():
96 hostname = config('vip')
97 else:
98 hostname = unit_get('private-address')
99 port = config('bind-port')108 port = config('bind-port')
100 if cluster.https():109 admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
101 proto = 'https'110 internal_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
102 else:111 (canonical_url(CONFIGS, INTERNAL), port)
103 proto = 'http'112 public_url = '%s:%s/v1/AUTH_$(tenant_id)s' % \
104 admin_url = '%s://%s:%s' % (proto, hostname, port)113 (canonical_url(CONFIGS, PUBLIC), port)
105 internal_url = public_url = '%s/v1/AUTH_$(tenant_id)s' % admin_url
106 relation_set(service='swift',114 relation_set(service='swift',
107 region=config('region'),115 region=config('region'),
108 public_url=public_url, internal_url=internal_url,116 public_url=public_url,
117 internal_url=internal_url,
109 admin_url=admin_url,118 admin_url=admin_url,
110 requested_roles=config('operator-roles'),119 requested_roles=config('operator-roles'),
111 relation_id=relid)120 relation_id=relid)
@@ -142,9 +151,12 @@
142151
143 if cluster.is_clustered():152 if cluster.is_clustered():
144 hostname = config('vip')153 hostname = config('vip')
154 elif config('prefer-ipv6'):
155 hostname = get_ipv6_addr(exc_list=[config('vip')])[0]
145 else:156 else:
146 hostname = unit_get('private-address')157 hostname = unit_get('private-address')
147158
159 hostname = format_ipv6_addr(hostname) or hostname
148 rings_url = 'http://%s/%s' % (hostname, path)160 rings_url = 'http://%s/%s' % (hostname, path)
149 # notify storage nodes that there is a new ring to fetch.161 # notify storage nodes that there is a new ring to fetch.
150 for relid in relation_ids('swift-storage'):162 for relid in relation_ids('swift-storage'):
@@ -157,9 +169,14 @@
157@hooks.hook('swift-storage-relation-changed')169@hooks.hook('swift-storage-relation-changed')
158@restart_on_change(restart_map())170@restart_on_change(restart_map())
159def storage_changed():171def storage_changed():
172 if config('prefer-ipv6'):
173 host_ip = '[%s]' % relation_get('private-address')
174 else:
175 host_ip = openstack.get_host_ip(relation_get('private-address'))
176
160 zone = get_zone(config('zone-assignment'))177 zone = get_zone(config('zone-assignment'))
161 node_settings = {178 node_settings = {
162 'ip': openstack.get_host_ip(relation_get('private-address')),179 'ip': host_ip,
163 'zone': zone,180 'zone': zone,
164 'account_port': relation_get('account_port'),181 'account_port': relation_get('account_port'),
165 'object_port': relation_get('object_port'),182 'object_port': relation_get('object_port'),
@@ -195,16 +212,33 @@
195@hooks.hook('config-changed')212@hooks.hook('config-changed')
196@restart_on_change(restart_map())213@restart_on_change(restart_map())
197def config_changed():214def config_changed():
215 if config('prefer-ipv6'):
216 setup_ipv6()
217
198 configure_https()218 configure_https()
199 open_port(config('bind-port'))219 open_port(config('bind-port'))
200 # Determine whether or not we should do an upgrade, based on the220 # Determine whether or not we should do an upgrade, based on the
201 # the version offered in keyston-release.221 # the version offered in keyston-release.
202 if (openstack.openstack_upgrade_available('python-swift')):222 if (openstack.openstack_upgrade_available('python-swift')):
203 do_openstack_upgrade(CONFIGS)223 do_openstack_upgrade(CONFIGS)
204224 for r_id in relation_ids('identity-service'):
205225 keystone_joined(relid=r_id)
206@hooks.hook('cluster-relation-changed',226
207 'cluster-relation-joined')227
228@hooks.hook('cluster-relation-joined')
229def cluster_joined(relation_id=None):
230 if config('prefer-ipv6'):
231 private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
232 else:
233 private_addr = unit_get('private-address')
234
235 address = get_address_in_network(config('os-internal-network'),
236 private_addr)
237 relation_set(relation_id=relation_id,
238 relation_settings={'private-address': address})
239
240
241@hooks.hook('cluster-relation-changed')
208@restart_on_change(restart_map())242@restart_on_change(restart_map())
209def cluster_changed():243def cluster_changed():
210 CONFIGS.write_all()244 CONFIGS.write_all()
@@ -229,8 +263,6 @@
229 corosync_bindiface = config('ha-bindiface')263 corosync_bindiface = config('ha-bindiface')
230 corosync_mcastport = config('ha-mcastport')264 corosync_mcastport = config('ha-mcastport')
231 vip = config('vip')265 vip = config('vip')
232 vip_cidr = config('vip_cidr')
233 vip_iface = config('vip_iface')
234 if not vip:266 if not vip:
235 log('Unable to configure hacluster as vip not provided',267 log('Unable to configure hacluster as vip not provided',
236 level=ERROR)268 level=ERROR)
@@ -238,14 +270,37 @@
238270
239 # Obtain resources271 # Obtain resources
240 resources = {272 resources = {
241 'res_swift_vip': 'ocf:heartbeat:IPaddr2',
242 'res_swift_haproxy': 'lsb:haproxy'273 'res_swift_haproxy': 'lsb:haproxy'
243 }274 }
244 resource_params = {275 resource_params = {
245 'res_swift_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' %
246 (vip, vip_cidr, vip_iface),
247 'res_swift_haproxy': 'op monitor interval="5s"'276 'res_swift_haproxy': 'op monitor interval="5s"'
248 }277 }
278
279 vip_group = []
280 for vip in vip.split():
281 if is_ipv6(vip):
282 res_swift_vip = 'ocf:heartbeat:IPv6addr'
283 vip_params = 'ipv6addr'
284 else:
285 res_swift_vip = 'ocf:heartbeat:IPaddr2'
286 vip_params = 'ip'
287
288 iface = get_iface_for_address(vip)
289 if iface is not None:
290 vip_key = 'res_swift_{}_vip'.format(iface)
291 resources[vip_key] = res_swift_vip
292 resource_params[vip_key] = (
293 'params {ip}="{vip}" cidr_netmask="{netmask}"'
294 ' nic="{iface}"'.format(ip=vip_params,
295 vip=vip,
296 iface=iface,
297 netmask=get_netmask_for_address(vip))
298 )
299 vip_group.append(vip_key)
300
301 if len(vip_group) >= 1:
302 relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
303
249 init_services = {304 init_services = {
250 'res_swift_haproxy': 'haproxy'305 'res_swift_haproxy': 'haproxy'
251 }306 }
252307
=== modified file 'hooks/swift_utils.py'
--- hooks/swift_utils.py 2014-08-11 08:59:49 +0000
+++ hooks/swift_utils.py 2014-10-06 15:38:41 +0000
@@ -12,7 +12,13 @@
12)12)
13from charmhelpers.fetch import (13from charmhelpers.fetch import (
14 apt_update,14 apt_update,
15 apt_upgrade15 apt_upgrade,
16 apt_install,
17 add_source
18)
19
20from charmhelpers.core.host import (
21 lsb_release
16)22)
1723
18import charmhelpers.contrib.openstack.context as context24import charmhelpers.contrib.openstack.context as context
@@ -63,7 +69,7 @@
63# > Folsom specific packages69# > Folsom specific packages
64FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']70FOLSOM_PACKAGES = BASE_PACKAGES + ['swift-plugin-s3']
6571
66SWIFT_HA_RES = 'res_swift_vip'72SWIFT_HA_RES = 'grp_swift_vips'
6773
68TEMPLATES = 'templates/'74TEMPLATES = 'templates/'
6975
@@ -75,7 +81,8 @@
75 'services': ['swift-proxy'],81 'services': ['swift-proxy'],
76 }),82 }),
77 (SWIFT_PROXY_CONF, {83 (SWIFT_PROXY_CONF, {
78 'hook_contexts': [swift_context.SwiftIdentityContext()],84 'hook_contexts': [swift_context.SwiftIdentityContext(),
85 context.BindHostContext()],
79 'services': ['swift-proxy'],86 'services': ['swift-proxy'],
80 }),87 }),
81 (HAPROXY_CONF, {88 (HAPROXY_CONF, {
@@ -368,3 +375,19 @@
368 apt_upgrade(options=dpkg_opts, fatal=True, dist=True)375 apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
369 configs.set_release(openstack_release=new_os_rel)376 configs.set_release(openstack_release=new_os_rel)
370 configs.write_all()377 configs.write_all()
378
379
380def setup_ipv6():
381 ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
382 if ubuntu_rel < "trusty":
383 raise Exception("IPv6 is not supported in the charms for Ubuntu "
384 "versions less than Trusty 14.04")
385
386 # NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports
387 # to support ipv6 address, so check is required to make sure not
388 # breaking other versions, IPv6 only support for >= Trusty
389 if ubuntu_rel == 'trusty':
390 add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports'
391 ' main')
392 apt_update()
393 apt_install('haproxy/trusty-backports', fatal=True)
371394
=== modified file 'revision'
--- revision 2013-09-27 12:02:37 +0000
+++ revision 2014-10-06 15:38:41 +0000
@@ -1,1 +1,1 @@
11461147
22
=== modified file 'templates/essex/proxy-server.conf'
--- templates/essex/proxy-server.conf 2014-02-27 12:17:53 +0000
+++ templates/essex/proxy-server.conf 2014-10-06 15:38:41 +0000
@@ -19,6 +19,8 @@
19use = egg:swift#proxy19use = egg:swift#proxy
20allow_account_management = true20allow_account_management = true
21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
22node_timeout = {{ node_timeout }}
23recoverable_node_timeout = {{ recoverable_node_timeout }}
2224
23[filter:tempauth]25[filter:tempauth]
24use = egg:swift#tempauth26use = egg:swift#tempauth
2527
=== modified file 'templates/grizzly/proxy-server.conf'
--- templates/grizzly/proxy-server.conf 2014-03-27 11:23:24 +0000
+++ templates/grizzly/proxy-server.conf 2014-10-06 15:38:41 +0000
@@ -19,6 +19,8 @@
19use = egg:swift#proxy19use = egg:swift#proxy
20allow_account_management = true20allow_account_management = true
21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
22node_timeout = {{ node_timeout }}
23recoverable_node_timeout = {{ recoverable_node_timeout }}
2224
23[filter:tempauth]25[filter:tempauth]
24use = egg:swift#tempauth26use = egg:swift#tempauth
2527
=== modified file 'templates/havana/proxy-server.conf'
--- templates/havana/proxy-server.conf 2014-03-27 11:23:24 +0000
+++ templates/havana/proxy-server.conf 2014-10-06 15:38:41 +0000
@@ -19,6 +19,8 @@
19use = egg:swift#proxy19use = egg:swift#proxy
20allow_account_management = true20allow_account_management = true
21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
22node_timeout = {{ node_timeout }}
23recoverable_node_timeout = {{ recoverable_node_timeout }}
2224
23[filter:tempauth]25[filter:tempauth]
24use = egg:swift#tempauth26use = egg:swift#tempauth
2527
=== modified file 'templates/icehouse/proxy-server.conf'
--- templates/icehouse/proxy-server.conf 2014-04-07 14:44:39 +0000
+++ templates/icehouse/proxy-server.conf 2014-10-06 15:38:41 +0000
@@ -2,6 +2,7 @@
2bind_port = {{ bind_port }}2bind_port = {{ bind_port }}
3workers = {{ workers }}3workers = {{ workers }}
4user = swift4user = swift
5bind_ip = {{ bind_host }}
5{% if ssl %}6{% if ssl %}
6cert_file = {{ ssl_cert }}7cert_file = {{ ssl_cert }}
7key_file = {{ ssl_key }}8key_file = {{ ssl_key }}
@@ -19,6 +20,8 @@
19use = egg:swift#proxy20use = egg:swift#proxy
20allow_account_management = true21allow_account_management = true
21{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}22{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
23node_timeout = {{ node_timeout }}
24recoverable_node_timeout = {{ recoverable_node_timeout }}
2225
23[filter:tempauth]26[filter:tempauth]
24use = egg:swift#tempauth27use = egg:swift#tempauth
@@ -29,7 +32,7 @@
2932
30[filter:cache]33[filter:cache]
31use = egg:swift#memcache34use = egg:swift#memcache
32memcache_servers = {{ proxy_ip }}:1121135memcache_servers = {{ memcached_ip }}:11211
3336
34[filter:account-quotas]37[filter:account-quotas]
35use = egg:swift#account_quotas38use = egg:swift#account_quotas
3639
=== modified file 'templates/memcached.conf'
--- templates/memcached.conf 2013-09-27 12:02:37 +0000
+++ templates/memcached.conf 2014-10-06 15:38:41 +0000
@@ -32,7 +32,7 @@
32# Specify which IP address to listen on. The default is to listen on all IP addresses32# Specify which IP address to listen on. The default is to listen on all IP addresses
33# This parameter is one of the only security measures that memcached has, so make sure33# This parameter is one of the only security measures that memcached has, so make sure
34# it's listening on a firewalled interface.34# it's listening on a firewalled interface.
35-l {{ proxy_ip }} 35-l {{ memcached_ip }}
3636
37# Limit the number of simultaneous incoming connections. The daemon default is 102437# Limit the number of simultaneous incoming connections. The daemon default is 1024
38# -c 102438# -c 1024
3939
=== added directory 'tests'
=== added file 'tests/00-setup'
--- tests/00-setup 1970-01-01 00:00:00 +0000
+++ tests/00-setup 2014-10-06 15:38:41 +0000
@@ -0,0 +1,11 @@
1#!/bin/bash
2
3set -ex
4
5sudo add-apt-repository --yes ppa:juju/stable
6sudo apt-get update --yes
7sudo apt-get install --yes python-amulet
8sudo apt-get install --yes python-swiftclient
9sudo apt-get install --yes python-glanceclient
10sudo apt-get install --yes python-keystoneclient
11sudo apt-get install --yes python-novaclient
012
=== added file 'tests/10-basic-precise-essex'
--- tests/10-basic-precise-essex 1970-01-01 00:00:00 +0000
+++ tests/10-basic-precise-essex 2014-10-06 15:38:41 +0000
@@ -0,0 +1,9 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on precise-essex."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='precise')
9 deployment.run_tests()
010
=== added file 'tests/11-basic-precise-folsom'
--- tests/11-basic-precise-folsom 1970-01-01 00:00:00 +0000
+++ tests/11-basic-precise-folsom 2014-10-06 15:38:41 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on precise-folsom."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='precise',
9 openstack='cloud:precise-folsom',
10 source='cloud:precise-updates/folsom')
11 deployment.run_tests()
012
=== added file 'tests/12-basic-precise-grizzly'
--- tests/12-basic-precise-grizzly 1970-01-01 00:00:00 +0000
+++ tests/12-basic-precise-grizzly 2014-10-06 15:38:41 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on precise-grizzly."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='precise',
9 openstack='cloud:precise-grizzly',
10 source='cloud:precise-updates/grizzly')
11 deployment.run_tests()
012
=== added file 'tests/13-basic-precise-havana'
--- tests/13-basic-precise-havana 1970-01-01 00:00:00 +0000
+++ tests/13-basic-precise-havana 2014-10-06 15:38:41 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on precise-havana."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='precise',
9 openstack='cloud:precise-havana',
10 source='cloud:precise-updates/havana')
11 deployment.run_tests()
012
=== added file 'tests/14-basic-precise-icehouse'
--- tests/14-basic-precise-icehouse 1970-01-01 00:00:00 +0000
+++ tests/14-basic-precise-icehouse 2014-10-06 15:38:41 +0000
@@ -0,0 +1,11 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on precise-icehouse."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='precise',
9 openstack='cloud:precise-icehouse',
10 source='cloud:precise-updates/icehouse')
11 deployment.run_tests()
012
=== added file 'tests/15-basic-trusty-icehouse'
--- tests/15-basic-trusty-icehouse 1970-01-01 00:00:00 +0000
+++ tests/15-basic-trusty-icehouse 2014-10-06 15:38:41 +0000
@@ -0,0 +1,9 @@
1#!/usr/bin/python
2
3"""Amulet tests on a basic swift-proxy deployment on trusty-icehouse."""
4
5from basic_deployment import SwiftProxyBasicDeployment
6
7if __name__ == '__main__':
8 deployment = SwiftProxyBasicDeployment(series='trusty')
9 deployment.run_tests()
010
=== added file 'tests/README'
--- tests/README 1970-01-01 00:00:00 +0000
+++ tests/README 2014-10-06 15:38:41 +0000
@@ -0,0 +1,52 @@
1This directory provides Amulet tests that focus on verification of swift-proxy
2deployments.
3
4If you use a web proxy server to access the web, you'll need to set the
5AMULET_HTTP_PROXY environment variable to the http URL of the proxy server.
6
7The following examples demonstrate different ways that tests can be executed.
8All examples are run from the charm's root directory.
9
10 * To run all tests (starting with 00-setup):
11
12 make test
13
14 * To run a specific test module (or modules):
15
16 juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
17
18 * To run a specific test module (or modules), and keep the environment
19 deployed after a failure:
20
21 juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse
22
23 * To re-run a test module against an already deployed environment (one
24 that was deployed by a previous call to 'juju test --set-e'):
25
26 ./tests/15-basic-trusty-icehouse
27
28For debugging and test development purposes, all code should be idempotent.
29In other words, the code should have the ability to be re-run without changing
30the results beyond the initial run. This enables editing and re-running of a
31test module against an already deployed environment, as described above.
32
33Manual debugging tips:
34
35 * Set the following env vars before using the OpenStack CLI as admin:
36 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
37 export OS_TENANT_NAME=admin
38 export OS_USERNAME=admin
39 export OS_PASSWORD=openstack
40 export OS_REGION_NAME=RegionOne
41
42 * Set the following env vars before using the OpenStack CLI as demoUser:
43 export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0
44 export OS_TENANT_NAME=demoTenant
45 export OS_USERNAME=demoUser
46 export OS_PASSWORD=password
47 export OS_REGION_NAME=RegionOne
48
49 * Sample swift command:
50 swift -A $OS_AUTH_URL --os-tenant-name services --os-username swift \
51 --os-password password list
52 (where tenant/user names and password are in swift-proxy's nova.conf file)
053
=== added file 'tests/basic_deployment.py'
--- tests/basic_deployment.py 1970-01-01 00:00:00 +0000
+++ tests/basic_deployment.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,827 @@
1#!/usr/bin/python
2
3import amulet
4import swiftclient
5
6from charmhelpers.contrib.openstack.amulet.deployment import (
7 OpenStackAmuletDeployment
8)
9
10from charmhelpers.contrib.openstack.amulet.utils import (
11 OpenStackAmuletUtils,
12 DEBUG, # flake8: noqa
13 ERROR
14)
15
16# Use DEBUG to turn on debug logging
17u = OpenStackAmuletUtils(ERROR)
18
19
20class SwiftProxyBasicDeployment(OpenStackAmuletDeployment):
21 """Amulet tests on a basic swift-proxy deployment."""
22
23 def __init__(self, series, openstack=None, source=None):
24 """Deploy the entire test environment."""
25 super(SwiftProxyBasicDeployment, self).__init__(series, openstack,
26 source)
27 self._add_services()
28 self._add_relations()
29 self._configure_services()
30 self._deploy()
31 self._initialize_tests()
32
33 def _add_services(self):
34 """Add the service that we're testing, including the number of units,
35 where swift-proxy is local, and the other charms are from
36 the charm store."""
37 this_service = ('swift-proxy', 1)
38 other_services = [('mysql', 1),
39 ('keystone', 1), ('glance', 1), ('swift-storage', 1)]
40 super(SwiftProxyBasicDeployment, self)._add_services(this_service,
41 other_services)
42
43 def _add_relations(self):
44 """Add all of the relations for the services."""
45 relations = {
46 'keystone:shared-db': 'mysql:shared-db',
47 'swift-proxy:identity-service': 'keystone:identity-service',
48 'swift-storage:swift-storage': 'swift-proxy:swift-storage',
49 'glance:identity-service': 'keystone:identity-service',
50 'glance:shared-db': 'mysql:shared-db',
51 'glance:object-store': 'swift-proxy:object-store'
52 }
53 super(SwiftProxyBasicDeployment, self)._add_relations(relations)
54
55 def _configure_services(self):
56 """Configure all of the services."""
57 keystone_config = {'admin-password': 'openstack',
58 'admin-token': 'ubuntutesting'}
59 swift_proxy_config = {'zone-assignment': 'manual',
60 'replicas': '1',
61 'swift-hash': 'fdfef9d4-8b06-11e2-8ac0-531c923c8fae',
62 'use-https': 'no'}
63 swift_storage_config = {'zone': '1',
64 'block-device': 'vdb',
65 'overwrite': 'true'}
66 configs = {'keystone': keystone_config,
67 'swift-proxy': swift_proxy_config,
68 'swift-storage': swift_storage_config}
69 super(SwiftProxyBasicDeployment, self)._configure_services(configs)
70
71 def _initialize_tests(self):
72 """Perform final initialization before tests get run."""
73 # Access the sentries for inspecting service units
74 self.mysql_sentry = self.d.sentry.unit['mysql/0']
75 self.keystone_sentry = self.d.sentry.unit['keystone/0']
76 self.glance_sentry = self.d.sentry.unit['glance/0']
77 self.swift_proxy_sentry = self.d.sentry.unit['swift-proxy/0']
78 self.swift_storage_sentry = self.d.sentry.unit['swift-storage/0']
79
80 # Authenticate admin with keystone
81 self.keystone = u.authenticate_keystone_admin(self.keystone_sentry,
82 user='admin',
83 password='openstack',
84 tenant='admin')
85
86 # Authenticate admin with glance endpoint
87 self.glance = u.authenticate_glance_admin(self.keystone)
88
89 # Authenticate swift user
90 keystone_relation = self.keystone_sentry.relation('identity-service',
91 'swift-proxy:identity-service')
92 ep = self.keystone.service_catalog.url_for(service_type='identity',
93 endpoint_type='publicURL')
94 self.swift = swiftclient.Connection(authurl=ep,
95 user=keystone_relation['service_username'],
96 key=keystone_relation['service_password'],
97 tenant_name=keystone_relation['service_tenant'],
98 auth_version='2.0')
99
100 # Create a demo tenant/role/user
101 self.demo_tenant = 'demoTenant'
102 self.demo_role = 'demoRole'
103 self.demo_user = 'demoUser'
104 if not u.tenant_exists(self.keystone, self.demo_tenant):
105 tenant = self.keystone.tenants.create(tenant_name=self.demo_tenant,
106 description='demo tenant',
107 enabled=True)
108 self.keystone.roles.create(name=self.demo_role)
109 self.keystone.users.create(name=self.demo_user,
110 password='password',
111 tenant_id=tenant.id,
112 email='demo@demo.com')
113
114 # Authenticate demo user with keystone
115 self.keystone_demo = \
116 u.authenticate_keystone_user(self.keystone, user=self.demo_user,
117 password='password',
118 tenant=self.demo_tenant)
119
120 def test_services(self):
121 """Verify the expected services are running on the corresponding
122 service units."""
123 swift_storage_services = ['status swift-account',
124 'status swift-account-auditor',
125 'status swift-account-reaper',
126 'status swift-account-replicator',
127 'status swift-container',
128 'status swift-container-auditor',
129 'status swift-container-replicator',
130 'status swift-container-updater',
131 'status swift-object',
132 'status swift-object-auditor',
133 'status swift-object-replicator',
134 'status swift-object-updater']
135 if self._get_openstack_release() >= self.precise_icehouse:
136 swift_storage_services.append('status swift-container-sync')
137
138 commands = {
139 self.mysql_sentry: ['status mysql'],
140 self.keystone_sentry: ['status keystone'],
141 self.glance_sentry: ['status glance-registry', 'status glance-api'],
142 self.swift_proxy_sentry: ['status swift-proxy'],
143 self.swift_storage_sentry: swift_storage_services
144 }
145
146 ret = u.validate_services(commands)
147 if ret:
148 amulet.raise_status(amulet.FAIL, msg=ret)
149
150 def test_users(self):
151 """Verify all existing roles."""
152 user1 = {'name': 'demoUser',
153 'enabled': True,
154 'tenantId': u.not_null,
155 'id': u.not_null,
156 'email': 'demo@demo.com'}
157 user2 = {'name': 'admin',
158 'enabled': True,
159 'tenantId': u.not_null,
160 'id': u.not_null,
161 'email': 'juju@localhost'}
162 user3 = {'name': 'glance',
163 'enabled': True,
164 'tenantId': u.not_null,
165 'id': u.not_null,
166 'email': u'juju@localhost'}
167 user4 = {'name': 'swift',
168 'enabled': True,
169 'tenantId': u.not_null,
170 'id': u.not_null,
171 'email': u'juju@localhost'}
172 expected = [user1, user2, user3, user4]
173 actual = self.keystone.users.list()
174
175 ret = u.validate_user_data(expected, actual)
176 if ret:
177 amulet.raise_status(amulet.FAIL, msg=ret)
178
179 def test_service_catalog(self):
180 """Verify that the service catalog endpoint data is valid."""
181 endpoint_vol = {'adminURL': u.valid_url,
182 'region': 'RegionOne',
183 'publicURL': u.valid_url,
184 'internalURL': u.valid_url}
185 endpoint_id = {'adminURL': u.valid_url,
186 'region': 'RegionOne',
187 'publicURL': u.valid_url,
188 'internalURL': u.valid_url}
189 if self._get_openstack_release() >= self.precise_folsom:
190 endpoint_vol['id'] = u.not_null
191 endpoint_id['id'] = u.not_null
192 expected = {'image': [endpoint_id], 'object-store': [endpoint_id],
193 'identity': [endpoint_id]}
194 actual = self.keystone_demo.service_catalog.get_endpoints()
195
196 ret = u.validate_svc_catalog_endpoint_data(expected, actual)
197 if ret:
198 amulet.raise_status(amulet.FAIL, msg=ret)
199
200 def test_openstack_object_store_endpoint(self):
201 """Verify the swift object-store endpoint data."""
202 endpoints = self.keystone.endpoints.list()
203 admin_port = internal_port = public_port = '8080'
204 expected = {'id': u.not_null,
205 'region': 'RegionOne',
206 'adminurl': u.valid_url,
207 'internalurl': u.valid_url,
208 'publicurl': u.valid_url,
209 'service_id': u.not_null}
210
211 ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,
212 public_port, expected)
213 if ret:
214 message = 'object-store endpoint: {}'.format(ret)
215 amulet.raise_status(amulet.FAIL, msg=message)
216
217 def test_swift_proxy_identity_service_relation(self):
218 """Verify the swift-proxy to keystone identity-service relation data."""
219 unit = self.swift_proxy_sentry
220 relation = ['identity-service', 'keystone:identity-service']
221 expected = {
222 'service': 'swift',
223 'region': 'RegionOne',
224 'public_url': u.valid_url,
225 'internal_url': u.valid_url,
226 'private-address': u.valid_ip,
227 'requested_roles': 'Member,Admin',
228 'admin_url': u.valid_url
229 }
230
231 ret = u.validate_relation_data(unit, relation, expected)
232 if ret:
233 message = u.relation_error('swift-proxy identity-service', ret)
234 amulet.raise_status(amulet.FAIL, msg=message)
235
236 def test_keystone_identity_service_relation(self):
237 """Verify the keystone to swift-proxy identity-service relation data."""
238 unit = self.keystone_sentry
239 relation = ['identity-service', 'swift-proxy:identity-service']
240 expected = {
241 'service_protocol': 'http',
242 'service_tenant': 'services',
243 'admin_token': 'ubuntutesting',
244 'service_password': u.not_null,
245 'service_port': '5000',
246 'auth_port': '35357',
247 'auth_protocol': 'http',
248 'private-address': u.valid_ip,
249 'https_keystone': 'False',
250 'auth_host': u.valid_ip,
251 'service_username': 'swift',
252 'service_tenant_id': u.not_null,
253 'service_host': u.valid_ip
254 }
255
256 ret = u.validate_relation_data(unit, relation, expected)
257 if ret:
258 message = u.relation_error('keystone identity-service', ret)
259 amulet.raise_status(amulet.FAIL, msg=message)
260
261 def test_swift_storage_swift_storage_relation(self):
262 """Verify the swift-storage to swift-proxy swift-storage relation
263 data."""
264 unit = self.swift_storage_sentry
265 relation = ['swift-storage', 'swift-proxy:swift-storage']
266 expected = {
267 'account_port': '6002',
268 'zone': '1',
269 'object_port': '6000',
270 'container_port': '6001',
271 'private-address': u.valid_ip,
272 'device': 'vdb'
273 }
274
275 ret = u.validate_relation_data(unit, relation, expected)
276 if ret:
277 message = u.relation_error('swift-storage swift-storage', ret)
278 amulet.raise_status(amulet.FAIL, msg=message)
279
280 def test_swift_proxy_swift_storage_relation(self):
281 """Verify the swift-proxy to swift-storage swift-storage relation
282 data."""
283 unit = self.swift_proxy_sentry
284 relation = ['swift-storage', 'swift-storage:swift-storage']
285 expected = {
286 'private-address': u.valid_ip,
287 'trigger': u.not_null,
288 'rings_url': u.valid_url,
289 'swift_hash': u.not_null
290 }
291
292 ret = u.validate_relation_data(unit, relation, expected)
293 if ret:
294 message = u.relation_error('swift-proxy swift-storage', ret)
295 amulet.raise_status(amulet.FAIL, msg=message)
296
297 def test_glance_object_store_relation(self):
298 """Verify the glance to swift-proxy object-store relation data."""
299 unit = self.glance_sentry
300 relation = ['object-store', 'swift-proxy:object-store']
301 expected = { 'private-address': u.valid_ip }
302
303 ret = u.validate_relation_data(unit, relation, expected)
304 if ret:
305 message = u.relation_error('glance object-store', ret)
306 amulet.raise_status(amulet.FAIL, msg=message)
307
308 def test_swift_proxy_object_store_relation(self):
309 """Verify the swift-proxy to glance object-store relation data."""
310 unit = self.swift_proxy_sentry
311 relation = ['object-store', 'glance:object-store']
312 expected = {'private-address': u.valid_ip}
313 ret = u.validate_relation_data(unit, relation, expected)
314 if ret:
315 message = u.relation_error('swift-proxy object-store', ret)
316 amulet.raise_status(amulet.FAIL, msg=message)
317
318 def test_restart_on_config_change(self):
319 """Verify that the specified services are restarted when the config
320 is changed."""
321 svc = 'swift-proxy'
322 self.d.configure('swift-proxy', {'node-timeout': '90'})
323
324 if not u.service_restarted(self.swift_proxy_sentry, svc,
325 '/etc/swift/proxy-server.conf'):
326 msg = "service {} didn't restart after config change".format(svc)
327 amulet.raise_status(amulet.FAIL, msg=msg)
328
329 self.d.configure('swift-proxy', {'node-timeout': '60'})
330
331 def test_swift_config(self):
332 """Verify the data in the swift config file."""
333 unit = self.swift_proxy_sentry
334 conf = '/etc/swift/swift.conf'
335 swift_proxy_relation = unit.relation('swift-storage',
336 'swift-storage:swift-storage')
337 expected = {
338 'swift_hash_path_suffix': swift_proxy_relation['swift_hash']
339 }
340
341 ret = u.validate_config_data(unit, conf, 'swift-hash', expected)
342 if ret:
343 message = "swift config error: {}".format(ret)
344 amulet.raise_status(amulet.FAIL, msg=message)
345
346 def test_proxy_server_icehouse_config(self):
347 """Verify the data in the proxy-server config file."""
348 if self._get_openstack_release() < self.precise_icehouse:
349 return
350
351 unit = self.swift_proxy_sentry
352 conf = '/etc/swift/proxy-server.conf'
353 keystone_relation = self.keystone_sentry.relation('identity-service',
354 'swift-proxy:identity-service')
355 swift_proxy_relation = unit.relation('identity-service',
356 'keystone:identity-service')
357 swift_proxy_ip = swift_proxy_relation['private-address']
358 auth_host = keystone_relation['auth_host']
359 auth_protocol = keystone_relation['auth_protocol']
360
361 expected = {
362 'DEFAULT': {
363 'bind_port': '8080',
364 'workers': '0',
365 'user': 'swift'
366 },
367 'pipeline:main': {
368 'pipeline': 'gatekeeper healthcheck cache swift3 s3token '
369 'container_sync bulk tempurl slo dlo formpost '
370 'authtoken keystoneauth staticweb '
371 'container-quotas account-quotas proxy-server'
372 },
373 'app:proxy-server': {
374 'use': 'egg:swift#proxy',
375 'allow_account_management': 'true',
376 'account_autocreate': 'true',
377 'node_timeout': '60',
378 'recoverable_node_timeout': '30'
379 },
380 'filter:tempauth': {
381 'use': 'egg:swift#tempauth',
382 'user_system_root': 'testpass .admin https://{}:8080/v1/'
383 'AUTH_system'.format(swift_proxy_ip)
384 },
385 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
386 'filter:cache': {
387 'use': 'egg:swift#memcache',
388 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
389 },
390 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
391 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
392 'filter:staticweb': {'use': 'egg:swift#staticweb'},
393 'filter:bulk': {'use': 'egg:swift#bulk'},
394 'filter:slo': {'use': 'egg:swift#slo'},
395 'filter:dlo': {'use': 'egg:swift#dlo'},
396 'filter:formpost': {'use': 'egg:swift#formpost'},
397 'filter:tempurl': {'use': 'egg:swift#tempurl'},
398 'filter:container_sync': {'use': 'egg:swift#container_sync'},
399 'filter:gatekeeper': {'use': 'egg:swift#gatekeeper'},
400 'filter:keystoneauth': {
401 'use': 'egg:swift#keystoneauth',
402 'operator_roles': 'Member,Admin'
403 },
404 'filter:authtoken': {
405 'paste.filter_factory': 'keystoneclient.middleware.'
406 'auth_token:filter_factory',
407 'auth_host': auth_host,
408 'auth_port': keystone_relation['auth_port'],
409 'auth_protocol': auth_protocol,
410 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
411 keystone_relation['service_port']),
412 'admin_tenant_name': keystone_relation['service_tenant'],
413 'admin_user': keystone_relation['service_username'],
414 'admin_password': keystone_relation['service_password'],
415 'delay_auth_decision': 'true',
416 'signing_dir': '/etc/swift',
417 'cache': 'swift.cache'
418 },
419 'filter:s3token': {
420 'paste.filter_factory': 'keystoneclient.middleware.'
421 's3_token:filter_factory',
422 'service_host': keystone_relation['service_host'],
423 'service_port': keystone_relation['service_port'],
424 'auth_port': keystone_relation['auth_port'],
425 'auth_host': keystone_relation['auth_host'],
426 'auth_protocol': keystone_relation['auth_protocol'],
427 'auth_token': keystone_relation['admin_token'],
428 'admin_token': keystone_relation['admin_token']
429 },
430 'filter:swift3': {'use': 'egg:swift3#swift3'}
431 }
432
433 for section, pairs in expected.iteritems():
434 ret = u.validate_config_data(unit, conf, section, pairs)
435 if ret:
436 message = "proxy-server config error: {}".format(ret)
437 amulet.raise_status(amulet.FAIL, msg=message)
438
439 def test_proxy_server_havana_config(self):
440 """Verify the data in the proxy-server config file."""
441 if self._get_openstack_release() != self.precise_havana:
442 return
443
444 unit = self.swift_proxy_sentry
445 conf = '/etc/swift/proxy-server.conf'
446 keystone_relation = self.keystone_sentry.relation('identity-service',
447 'swift-proxy:identity-service')
448 swift_proxy_relation = unit.relation('identity-service',
449 'keystone:identity-service')
450 swift_proxy_ip = swift_proxy_relation['private-address']
451 auth_host = keystone_relation['auth_host']
452 auth_protocol = keystone_relation['auth_protocol']
453
454 expected = {
455 'DEFAULT': {
456 'bind_port': '8080',
457 'workers': '0',
458 'user': 'swift'
459 },
460 'pipeline:main': {
461 'pipeline': 'healthcheck cache swift3 authtoken '
462 'keystoneauth container-quotas account-quotas '
463 'proxy-server'
464 },
465 'app:proxy-server': {
466 'use': 'egg:swift#proxy',
467 'allow_account_management': 'true',
468 'account_autocreate': 'true',
469 'node_timeout': '60',
470 'recoverable_node_timeout': '30'
471 },
472 'filter:tempauth': {
473 'use': 'egg:swift#tempauth',
474 'user_system_root': 'testpass .admin https://{}:8080/v1/'
475 'AUTH_system'.format(swift_proxy_ip)
476 },
477 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
478 'filter:cache': {
479 'use': 'egg:swift#memcache',
480 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
481 },
482 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
483 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
484 'filter:keystoneauth': {
485 'use': 'egg:swift#keystoneauth',
486 'operator_roles': 'Member,Admin'
487 },
488 'filter:authtoken': {
489 'paste.filter_factory': 'keystoneclient.middleware.'
490 'auth_token:filter_factory',
491 'auth_host': auth_host,
492 'auth_port': keystone_relation['auth_port'],
493 'auth_protocol': auth_protocol,
494 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
495 keystone_relation['service_port']),
496 'admin_tenant_name': keystone_relation['service_tenant'],
497 'admin_user': keystone_relation['service_username'],
498 'admin_password': keystone_relation['service_password'],
499 'delay_auth_decision': 'true',
500 'signing_dir': '/etc/swift',
501 'cache': 'swift.cache'
502 },
503 'filter:s3token': {
504 'paste.filter_factory': 'keystone.middleware.s3_token:'
505 'filter_factory',
506 'service_host': keystone_relation['service_host'],
507 'service_port': keystone_relation['service_port'],
508 'auth_port': keystone_relation['auth_port'],
509 'auth_host': keystone_relation['auth_host'],
510 'auth_protocol': keystone_relation['auth_protocol'],
511 'auth_token': keystone_relation['admin_token'],
512 'admin_token': keystone_relation['admin_token'],
513 'service_protocol': keystone_relation['service_protocol']
514 },
515 'filter:swift3': {'use': 'egg:swift3#swift3'}
516 }
517
518 for section, pairs in expected.iteritems():
519 ret = u.validate_config_data(unit, conf, section, pairs)
520 if ret:
521 message = "proxy-server config error: {}".format(ret)
522 amulet.raise_status(amulet.FAIL, msg=message)
523
524 def test_proxy_server_grizzly_config(self):
525 """Verify the data in the proxy-server config file."""
526 if self._get_openstack_release() != self.precise_grizzly:
527 return
528
529 unit = self.swift_proxy_sentry
530 conf = '/etc/swift/proxy-server.conf'
531 keystone_relation = self.keystone_sentry.relation('identity-service',
532 'swift-proxy:identity-service')
533 swift_proxy_relation = unit.relation('identity-service',
534 'keystone:identity-service')
535 swift_proxy_ip = swift_proxy_relation['private-address']
536 auth_host = keystone_relation['auth_host']
537 auth_protocol = keystone_relation['auth_protocol']
538
539 expected = {
540 'DEFAULT': {
541 'bind_port': '8080',
542 'workers': '0',
543 'user': 'swift'
544 },
545 'pipeline:main': {
546 'pipeline': 'healthcheck cache swift3 s3token authtoken '
547 'keystone container-quotas account-quotas '
548 'proxy-server'
549 },
550 'app:proxy-server': {
551 'use': 'egg:swift#proxy',
552 'allow_account_management': 'true',
553 'account_autocreate': 'true',
554 'node_timeout': '60',
555 'recoverable_node_timeout': '30'
556 },
557 'filter:tempauth': {
558 'use': 'egg:swift#tempauth',
559 'user_system_root': 'testpass .admin https://{}:8080/v1/'
560 'AUTH_system'.format(swift_proxy_ip)
561 },
562 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
563 'filter:cache': {
564 'use': 'egg:swift#memcache',
565 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
566 },
567 'filter:account-quotas': {'use': 'egg:swift#account_quotas'},
568 'filter:container-quotas': {'use': 'egg:swift#container_quotas'},
569 'filter:keystone': {
570 'paste.filter_factory': 'swift.common.middleware.'
571 'keystoneauth:filter_factory',
572 'operator_roles': 'Member,Admin'
573 },
574 'filter:authtoken': {
575 'paste.filter_factory': 'keystone.middleware.auth_token:'
576 'filter_factory',
577 'auth_host': auth_host,
578 'auth_port': keystone_relation['auth_port'],
579 'auth_protocol': auth_protocol,
580 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
581 keystone_relation['service_port']),
582 'admin_tenant_name': keystone_relation['service_tenant'],
583 'admin_user': keystone_relation['service_username'],
584 'admin_password': keystone_relation['service_password'],
585 'delay_auth_decision': 'true',
586 'signing_dir': '/etc/swift'
587 },
588 'filter:s3token': {
589 'paste.filter_factory': 'keystone.middleware.s3_token:'
590 'filter_factory',
591 'service_host': keystone_relation['service_host'],
592 'service_port': keystone_relation['service_port'],
593 'auth_port': keystone_relation['auth_port'],
594 'auth_host': keystone_relation['auth_host'],
595 'auth_protocol': keystone_relation['auth_protocol'],
596 'auth_token': keystone_relation['admin_token'],
597 'admin_token': keystone_relation['admin_token'],
598 'service_protocol': keystone_relation['service_protocol']
599 },
600 'filter:swift3': {'use': 'egg:swift3#swift3'}
601 }
602
603 for section, pairs in expected.iteritems():
604 ret = u.validate_config_data(unit, conf, section, pairs)
605 if ret:
606 message = "proxy-server config error: {}".format(ret)
607 amulet.raise_status(amulet.FAIL, msg=message)
608
609 def test_proxy_server_folsom_config(self):
610 """Verify the data in the proxy-server config file."""
611 if self._get_openstack_release() != self.precise_folsom:
612 return
613
614 unit = self.swift_proxy_sentry
615 conf = '/etc/swift/proxy-server.conf'
616 keystone_relation = self.keystone_sentry.relation('identity-service',
617 'swift-proxy:identity-service')
618 swift_proxy_relation = unit.relation('identity-service',
619 'keystone:identity-service')
620 swift_proxy_ip = swift_proxy_relation['private-address']
621 auth_host = keystone_relation['auth_host']
622 auth_protocol = keystone_relation['auth_protocol']
623
624 expected = {
625 'DEFAULT': {
626 'bind_port': '8080',
627 'workers': '0',
628 'user': 'swift'
629 },
630 'pipeline:main': {
631 'pipeline': 'healthcheck cache swift3 s3token authtoken '
632 'keystone proxy-server'
633 },
634 'app:proxy-server': {
635 'use': 'egg:swift#proxy',
636 'allow_account_management': 'true',
637 'account_autocreate': 'true',
638 'node_timeout': '60',
639 'recoverable_node_timeout': '30'
640 },
641 'filter:tempauth': {
642 'use': 'egg:swift#tempauth',
643 'user_system_root': 'testpass .admin https://{}:8080/v1/'
644 'AUTH_system'.format(swift_proxy_ip)
645 },
646 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
647 'filter:cache': {
648 'use': 'egg:swift#memcache',
649 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
650 },
651 'filter:keystone': {
652 'paste.filter_factory': 'keystone.middleware.swift_auth:'
653 'filter_factory',
654 'operator_roles': 'Member,Admin'
655 },
656 'filter:authtoken': {
657 'paste.filter_factory': 'keystone.middleware.auth_token:'
658 'filter_factory',
659 'auth_host': auth_host,
660 'auth_port': keystone_relation['auth_port'],
661 'auth_protocol': auth_protocol,
662 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
663 keystone_relation['service_port']),
664 'admin_tenant_name': keystone_relation['service_tenant'],
665 'admin_user': keystone_relation['service_username'],
666 'admin_password': keystone_relation['service_password'],
667 'delay_auth_decision': '1'
668 },
669 'filter:s3token': {
670 'paste.filter_factory': 'keystone.middleware.s3_token:'
671 'filter_factory',
672 'service_host': keystone_relation['service_host'],
673 'service_port': keystone_relation['service_port'],
674 'auth_port': keystone_relation['auth_port'],
675 'auth_host': keystone_relation['auth_host'],
676 'auth_protocol': keystone_relation['auth_protocol'],
677 'auth_token': keystone_relation['admin_token'],
678 'admin_token': keystone_relation['admin_token'],
679 'service_protocol': keystone_relation['service_protocol']
680 },
681 'filter:swift3': {'use': 'egg:swift#swift3'}
682 }
683
684 for section, pairs in expected.iteritems():
685 ret = u.validate_config_data(unit, conf, section, pairs)
686 if ret:
687 message = "proxy-server config error: {}".format(ret)
688 amulet.raise_status(amulet.FAIL, msg=message)
689
690 def test_proxy_server_essex_config(self):
691 """Verify the data in the proxy-server config file."""
692 if self._get_openstack_release() != self.precise_essex:
693 return
694
695 unit = self.swift_proxy_sentry
696 conf = '/etc/swift/proxy-server.conf'
697 keystone_relation = self.keystone_sentry.relation('identity-service',
698 'swift-proxy:identity-service')
699 swift_proxy_relation = unit.relation('identity-service',
700 'keystone:identity-service')
701 swift_proxy_ip = swift_proxy_relation['private-address']
702 auth_host = keystone_relation['auth_host']
703 auth_protocol = keystone_relation['auth_protocol']
704
705 expected = {
706 'DEFAULT': {
707 'bind_port': '8080',
708 'workers': '0',
709 'user': 'swift'
710 },
711 'pipeline:main': {
712 'pipeline': 'healthcheck cache swift3 s3token authtoken '
713 'keystone proxy-server'
714 },
715 'app:proxy-server': {
716 'use': 'egg:swift#proxy',
717 'allow_account_management': 'true',
718 'account_autocreate': 'true',
719 'node_timeout': '60',
720 'recoverable_node_timeout': '30'
721 },
722 'filter:tempauth': {
723 'use': 'egg:swift#tempauth',
724 'user_system_root': 'testpass .admin https://{}:8080/v1/'
725 'AUTH_system'.format(swift_proxy_ip)
726 },
727 'filter:healthcheck': {'use': 'egg:swift#healthcheck'},
728 'filter:cache': {
729 'use': 'egg:swift#memcache',
730 'memcache_servers': '{}:11211'.format(swift_proxy_ip)
731 },
732 'filter:keystone': {
733 'paste.filter_factory': 'keystone.middleware.swift_auth:'
734 'filter_factory',
735 'operator_roles': 'Member,Admin'
736 },
737 'filter:authtoken': {
738 'paste.filter_factory': 'keystone.middleware.auth_token:'
739 'filter_factory',
740 'auth_host': auth_host,
741 'auth_port': keystone_relation['auth_port'],
742 'auth_protocol': auth_protocol,
743 'auth_uri': '{}://{}:{}'.format(auth_protocol, auth_host,
744 keystone_relation['service_port']),
745 'admin_tenant_name': keystone_relation['service_tenant'],
746 'admin_user': keystone_relation['service_username'],
747 'admin_password': keystone_relation['service_password'],
748 'delay_auth_decision': '1'
749 },
750 'filter:s3token': {
751 'paste.filter_factory': 'keystone.middleware.s3_token:'
752 'filter_factory',
753 'service_host': keystone_relation['service_host'],
754 'service_port': keystone_relation['service_port'],
755 'auth_port': keystone_relation['auth_port'],
756 'auth_host': keystone_relation['auth_host'],
757 'auth_protocol': keystone_relation['auth_protocol'],
758 'auth_token': keystone_relation['admin_token'],
759 'admin_token': keystone_relation['admin_token'],
760 'service_protocol': keystone_relation['service_protocol']
761 },
762 'filter:swift3': {'use': 'egg:swift#swift3'}
763 }
764
765 for section, pairs in expected.iteritems():
766 ret = u.validate_config_data(unit, conf, section, pairs)
767 if ret:
768 message = "proxy-server config error: {}".format(ret)
769 amulet.raise_status(amulet.FAIL, msg=message)
770
771 def test_image_create(self):
772 """Create an instance in glance, which is backed by swift, and validate
773 that some of the metadata for the image match in glance and swift."""
774 # NOTE(coreycb): Skipping failing test on folsom until resolved. On
775 # folsom only, uploading an image to glance gets 400 Bad
776 # Request - Error uploading image: (error): [Errno 111]
777 # ECONNREFUSED (HTTP 400)
778 if self._get_openstack_release() == self.precise_folsom:
779 u.log.error("Skipping failing test until resolved")
780 return
781
782 # Create glance image
783 image = u.create_cirros_image(self.glance, "cirros-image")
784 if not image:
785 amulet.raise_status(amulet.FAIL, msg="Image create failed")
786
787 # Validate that cirros image exists in glance and get its checksum/size
788 images = list(self.glance.images.list())
789 if len(images) != 1:
790 msg = "Expected 1 glance image, found {}".format(len(images))
791 amulet.raise_status(amulet.FAIL, msg=msg)
792
793 if images[0].name != 'cirros-image':
794 message = "cirros image does not exist"
795 amulet.raise_status(amulet.FAIL, msg=message)
796
797 glance_image_md5 = image.checksum
798 glance_image_size = image.size
799
800 # Validate that swift object's checksum/size match that from glance
801 headers, containers = self.swift.get_account()
802 if len(containers) != 1:
803 msg = "Expected 1 swift container, found {}".format(len(containers))
804 amulet.raise_status(amulet.FAIL, msg=msg)
805
806 container_name = containers[0].get('name')
807
808 headers, objects = self.swift.get_container(container_name)
809 if len(objects) != 1:
810 msg = "Expected 1 swift object, found {}".format(len(objects))
811 amulet.raise_status(amulet.FAIL, msg=msg)
812
813 swift_object_size = objects[0].get('bytes')
814 swift_object_md5 = objects[0].get('hash')
815
816 if glance_image_size != swift_object_size:
817 msg = "Glance image size {} != swift object size {}".format( \
818 glance_image_size, swift_object_size)
819 amulet.raise_status(amulet.FAIL, msg=msg)
820
821 if glance_image_md5 != swift_object_md5:
822 msg = "Glance image hash {} != swift object hash {}".format( \
823 glance_image_md5, swift_object_md5)
824 amulet.raise_status(amulet.FAIL, msg=msg)
825
826 # Cleanup
827 u.delete_image(self.glance, image)
0828
=== added directory 'tests/charmhelpers'
=== added file 'tests/charmhelpers/__init__.py'
=== added directory 'tests/charmhelpers/contrib'
=== added file 'tests/charmhelpers/contrib/__init__.py'
=== added directory 'tests/charmhelpers/contrib/amulet'
=== added file 'tests/charmhelpers/contrib/amulet/__init__.py'
=== added file 'tests/charmhelpers/contrib/amulet/deployment.py'
--- tests/charmhelpers/contrib/amulet/deployment.py 1970-01-01 00:00:00 +0000
+++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-06 15:38:41 +0000
@@ -0,0 +1,72 @@
1import amulet
2
3import os
4
5
6class AmuletDeployment(object):
7 """Amulet deployment.
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches