Merge lp:~james-page/charms/trusty/glance/network-splits into lp:~openstack-charmers-archive/charms/trusty/glance/next
- Trusty Tahr (14.04)
- network-splits
- Merge into next
Proposed by
James Page
Status: | Merged |
---|---|
Merged at revision: | 55 |
Proposed branch: | lp:~james-page/charms/trusty/glance/network-splits |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/glance/next |
Diff against target: |
1049 lines (+511/-168) 23 files modified
.bzrignore (+2/-0) Makefile (+11/-5) charm-helpers-hooks.yaml (+2/-1) config.yaml (+28/-9) hooks/charmhelpers/contrib/hahelpers/cluster.py (+2/-2) hooks/charmhelpers/contrib/network/ip.py (+156/-0) hooks/charmhelpers/contrib/openstack/context.py (+28/-3) hooks/charmhelpers/contrib/openstack/ip.py (+75/-0) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+15/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+41/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+23/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+23/-0) hooks/charmhelpers/core/host.py (+4/-0) hooks/glance_contexts.py (+1/-0) hooks/glance_relations.py (+62/-28) hooks/glance_utils.py (+1/-1) templates/haproxy.cfg (+0/-36) tests/charmhelpers/contrib/amulet/deployment.py (+14/-19) tests/charmhelpers/contrib/amulet/utils.py (+2/-2) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+3/-5) tests/charmhelpers/contrib/openstack/amulet/utils.py (+1/-45) unit_tests/test_glance_relations.py (+14/-9) unit_tests/test_utils.py (+3/-3) |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/glance/network-splits |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Needs Resubmitting | ||
James Troup (community) | Needs Fixing | ||
OpenStack Charmers | Pending | ||
Review via email: mp+228141@code.launchpad.net |
Commit message
Description of the change
Add support for multiple network configuration.
To post a comment you must log in.
- 72. By James Page
-
Rebase
Revision history for this message
James Page (james-page) : | # |
review:
Needs Resubmitting
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' | |||
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 | |||
3 | +++ .bzrignore 2014-07-25 09:37:37 +0000 | |||
4 | @@ -0,0 +1,2 @@ | |||
5 | 1 | .coverage | ||
6 | 2 | bin | ||
7 | 0 | 3 | ||
8 | === modified file 'Makefile' | |||
9 | --- Makefile 2014-07-18 09:45:47 +0000 | |||
10 | +++ Makefile 2014-07-25 09:37:37 +0000 | |||
11 | @@ -1,4 +1,5 @@ | |||
12 | 1 | #!/usr/bin/make | 1 | #!/usr/bin/make |
13 | 2 | PYTHON := /usr/bin/env python | ||
14 | 2 | 3 | ||
15 | 3 | lint: | 4 | lint: |
16 | 4 | @echo "Running flake8 tests: " | 5 | @echo "Running flake8 tests: " |
17 | @@ -8,12 +9,17 @@ | |||
18 | 8 | @charm proof | 9 | @charm proof |
19 | 9 | @echo "OK" | 10 | @echo "OK" |
20 | 10 | 11 | ||
21 | 11 | sync: | ||
22 | 12 | @charm-helper-sync -c charm-helpers-hooks.yaml | ||
23 | 13 | @charm-helper-sync -c charm-helpers-tests.yaml | ||
24 | 14 | |||
25 | 15 | unit_test: | 12 | unit_test: |
27 | 16 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests | 13 | @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests |
28 | 14 | |||
29 | 15 | bin/charm_helpers_sync.py: | ||
30 | 16 | @mkdir -p bin | ||
31 | 17 | @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ | ||
32 | 18 | > bin/charm_helpers_sync.py | ||
33 | 19 | |||
34 | 20 | sync: bin/charm_helpers_sync.py | ||
35 | 21 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml | ||
36 | 22 | @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml | ||
37 | 17 | 23 | ||
38 | 18 | test: | 24 | test: |
39 | 19 | @echo Starting Amulet tests... | 25 | @echo Starting Amulet tests... |
40 | 20 | 26 | ||
41 | === modified file 'charm-helpers-hooks.yaml' | |||
42 | --- charm-helpers-hooks.yaml 2014-06-24 19:54:45 +0000 | |||
43 | +++ charm-helpers-hooks.yaml 2014-07-25 09:37:37 +0000 | |||
44 | @@ -3,7 +3,8 @@ | |||
45 | 3 | include: | 3 | include: |
46 | 4 | - core | 4 | - core |
47 | 5 | - fetch | 5 | - fetch |
49 | 6 | - contrib.openstack | 6 | - contrib.openstack|inc=* |
50 | 7 | - contrib.hahelpers | 7 | - contrib.hahelpers |
51 | 8 | - contrib.storage.linux.ceph | 8 | - contrib.storage.linux.ceph |
52 | 9 | - payload.execd | 9 | - payload.execd |
53 | 10 | - contrib.network.ip | ||
54 | 10 | 11 | ||
55 | === modified file 'config.yaml' | |||
56 | --- config.yaml 2014-04-12 16:55:29 +0000 | |||
57 | +++ config.yaml 2014-07-25 09:37:37 +0000 | |||
58 | @@ -52,15 +52,11 @@ | |||
59 | 52 | # HA configuration settings | 52 | # HA configuration settings |
60 | 53 | vip: | 53 | vip: |
61 | 54 | type: string | 54 | type: string |
71 | 55 | description: "Virtual IP to use to front Glance API in ha configuration" | 55 | description: | |
72 | 56 | vip_iface: | 56 | Virtual IP(s) to use to front API services in HA configuration. |
73 | 57 | type: string | 57 | . |
74 | 58 | default: eth0 | 58 | If multiple networks are being used, a VIP should be provided for each |
75 | 59 | description: "Network Interface where to place the Virtual IP" | 59 | network, separated by spaces. |
67 | 60 | vip_cidr: | ||
68 | 61 | type: int | ||
69 | 62 | default: 24 | ||
70 | 63 | description: "Netmask that will be used for the Virtual IP" | ||
76 | 64 | ha-bindiface: | 60 | ha-bindiface: |
77 | 65 | type: string | 61 | type: string |
78 | 66 | default: eth0 | 62 | default: eth0 |
79 | @@ -96,4 +92,27 @@ | |||
80 | 96 | default: openstack | 92 | default: openstack |
81 | 97 | type: string | 93 | type: string |
82 | 98 | description: RabbitMQ virtual host to request access on rabbitmq-server. | 94 | description: RabbitMQ virtual host to request access on rabbitmq-server. |
83 | 95 | # Network configuration options | ||
84 | 96 | # by default all access is over 'private-address' | ||
85 | 97 | os-admin-network: | ||
86 | 98 | type: string | ||
87 | 99 | description: | | ||
88 | 100 | The IP address and netmask of the OpenStack Admin network (e.g., | ||
89 | 101 | 192.168.0.0/24) | ||
90 | 102 | . | ||
91 | 103 | This network will be used for admin endpoints. | ||
92 | 104 | os-internal-network: | ||
93 | 105 | type: string | ||
94 | 106 | description: | | ||
95 | 107 | The IP address and netmask of the OpenStack Internal network (e.g., | ||
96 | 108 | 192.168.0.0/24) | ||
97 | 109 | . | ||
98 | 110 | This network will be used for internal endpoints. | ||
99 | 111 | os-public-network: | ||
100 | 112 | type: string | ||
101 | 113 | description: | | ||
102 | 114 | The IP address and netmask of the OpenStack Public network (e.g., | ||
103 | 115 | 192.168.0.0/24) | ||
104 | 116 | . | ||
105 | 117 | This network will be used for public endpoints. | ||
106 | 99 | 118 | ||
107 | 100 | 119 | ||
108 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' | |||
109 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-10 21:43:51 +0000 | |||
110 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 09:37:37 +0000 | |||
111 | @@ -146,12 +146,12 @@ | |||
112 | 146 | Obtains all relevant configuration from charm configuration required | 146 | Obtains all relevant configuration from charm configuration required |
113 | 147 | for initiating a relation to hacluster: | 147 | for initiating a relation to hacluster: |
114 | 148 | 148 | ||
116 | 149 | ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr | 149 | ha-bindiface, ha-mcastport, vip |
117 | 150 | 150 | ||
118 | 151 | returns: dict: A dict containing settings keyed by setting name. | 151 | returns: dict: A dict containing settings keyed by setting name. |
119 | 152 | raises: HAIncompleteConfig if settings are missing. | 152 | raises: HAIncompleteConfig if settings are missing. |
120 | 153 | ''' | 153 | ''' |
122 | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] | 154 | settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
123 | 155 | conf = {} | 155 | conf = {} |
124 | 156 | for setting in settings: | 156 | for setting in settings: |
125 | 157 | conf[setting] = config_get(setting) | 157 | conf[setting] = config_get(setting) |
126 | 158 | 158 | ||
127 | === added directory 'hooks/charmhelpers/contrib/network' | |||
128 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' | |||
129 | === added file 'hooks/charmhelpers/contrib/network/ip.py' | |||
130 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 | |||
131 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-07-25 09:37:37 +0000 | |||
132 | @@ -0,0 +1,156 @@ | |||
133 | 1 | import sys | ||
134 | 2 | |||
135 | 3 | from functools import partial | ||
136 | 4 | |||
137 | 5 | from charmhelpers.fetch import apt_install | ||
138 | 6 | from charmhelpers.core.hookenv import ( | ||
139 | 7 | ERROR, log, | ||
140 | 8 | ) | ||
141 | 9 | |||
142 | 10 | try: | ||
143 | 11 | import netifaces | ||
144 | 12 | except ImportError: | ||
145 | 13 | apt_install('python-netifaces') | ||
146 | 14 | import netifaces | ||
147 | 15 | |||
148 | 16 | try: | ||
149 | 17 | import netaddr | ||
150 | 18 | except ImportError: | ||
151 | 19 | apt_install('python-netaddr') | ||
152 | 20 | import netaddr | ||
153 | 21 | |||
154 | 22 | |||
155 | 23 | def _validate_cidr(network): | ||
156 | 24 | try: | ||
157 | 25 | netaddr.IPNetwork(network) | ||
158 | 26 | except (netaddr.core.AddrFormatError, ValueError): | ||
159 | 27 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
160 | 28 | network) | ||
161 | 29 | |||
162 | 30 | |||
163 | 31 | def get_address_in_network(network, fallback=None, fatal=False): | ||
164 | 32 | """ | ||
165 | 33 | Get an IPv4 or IPv6 address within the network from the host. | ||
166 | 34 | |||
167 | 35 | :param network (str): CIDR presentation format. For example, | ||
168 | 36 | '192.168.1.0/24'. | ||
169 | 37 | :param fallback (str): If no address is found, return fallback. | ||
170 | 38 | :param fatal (boolean): If no address is found, fallback is not | ||
171 | 39 | set and fatal is True then exit(1). | ||
172 | 40 | |||
173 | 41 | """ | ||
174 | 42 | |||
175 | 43 | def not_found_error_out(): | ||
176 | 44 | log("No IP address found in network: %s" % network, | ||
177 | 45 | level=ERROR) | ||
178 | 46 | sys.exit(1) | ||
179 | 47 | |||
180 | 48 | if network is None: | ||
181 | 49 | if fallback is not None: | ||
182 | 50 | return fallback | ||
183 | 51 | else: | ||
184 | 52 | if fatal: | ||
185 | 53 | not_found_error_out() | ||
186 | 54 | |||
187 | 55 | _validate_cidr(network) | ||
188 | 56 | network = netaddr.IPNetwork(network) | ||
189 | 57 | for iface in netifaces.interfaces(): | ||
190 | 58 | addresses = netifaces.ifaddresses(iface) | ||
191 | 59 | if network.version == 4 and netifaces.AF_INET in addresses: | ||
192 | 60 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
193 | 61 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
194 | 62 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
195 | 63 | if cidr in network: | ||
196 | 64 | return str(cidr.ip) | ||
197 | 65 | if network.version == 6 and netifaces.AF_INET6 in addresses: | ||
198 | 66 | for addr in addresses[netifaces.AF_INET6]: | ||
199 | 67 | if not addr['addr'].startswith('fe80'): | ||
200 | 68 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
201 | 69 | addr['netmask'])) | ||
202 | 70 | if cidr in network: | ||
203 | 71 | return str(cidr.ip) | ||
204 | 72 | |||
205 | 73 | if fallback is not None: | ||
206 | 74 | return fallback | ||
207 | 75 | |||
208 | 76 | if fatal: | ||
209 | 77 | not_found_error_out() | ||
210 | 78 | |||
211 | 79 | return None | ||
212 | 80 | |||
213 | 81 | |||
214 | 82 | def is_ipv6(address): | ||
215 | 83 | '''Determine whether provided address is IPv6 or not''' | ||
216 | 84 | try: | ||
217 | 85 | address = netaddr.IPAddress(address) | ||
218 | 86 | except netaddr.AddrFormatError: | ||
219 | 87 | # probably a hostname - so not an address at all! | ||
220 | 88 | return False | ||
221 | 89 | else: | ||
222 | 90 | return address.version == 6 | ||
223 | 91 | |||
224 | 92 | |||
225 | 93 | def is_address_in_network(network, address): | ||
226 | 94 | """ | ||
227 | 95 | Determine whether the provided address is within a network range. | ||
228 | 96 | |||
229 | 97 | :param network (str): CIDR presentation format. For example, | ||
230 | 98 | '192.168.1.0/24'. | ||
231 | 99 | :param address: An individual IPv4 or IPv6 address without a net | ||
232 | 100 | mask or subnet prefix. For example, '192.168.1.1'. | ||
233 | 101 | :returns boolean: Flag indicating whether address is in network. | ||
234 | 102 | """ | ||
235 | 103 | try: | ||
236 | 104 | network = netaddr.IPNetwork(network) | ||
237 | 105 | except (netaddr.core.AddrFormatError, ValueError): | ||
238 | 106 | raise ValueError("Network (%s) is not in CIDR presentation format" % | ||
239 | 107 | network) | ||
240 | 108 | try: | ||
241 | 109 | address = netaddr.IPAddress(address) | ||
242 | 110 | except (netaddr.core.AddrFormatError, ValueError): | ||
243 | 111 | raise ValueError("Address (%s) is not in correct presentation format" % | ||
244 | 112 | address) | ||
245 | 113 | if address in network: | ||
246 | 114 | return True | ||
247 | 115 | else: | ||
248 | 116 | return False | ||
249 | 117 | |||
250 | 118 | |||
251 | 119 | def _get_for_address(address, key): | ||
252 | 120 | """Retrieve an attribute of or the physical interface that | ||
253 | 121 | the IP address provided could be bound to. | ||
254 | 122 | |||
255 | 123 | :param address (str): An individual IPv4 or IPv6 address without a net | ||
256 | 124 | mask or subnet prefix. For example, '192.168.1.1'. | ||
257 | 125 | :param key: 'iface' for the physical interface name or an attribute | ||
258 | 126 | of the configured interface, for example 'netmask'. | ||
259 | 127 | :returns str: Requested attribute or None if address is not bindable. | ||
260 | 128 | """ | ||
261 | 129 | address = netaddr.IPAddress(address) | ||
262 | 130 | for iface in netifaces.interfaces(): | ||
263 | 131 | addresses = netifaces.ifaddresses(iface) | ||
264 | 132 | if address.version == 4 and netifaces.AF_INET in addresses: | ||
265 | 133 | addr = addresses[netifaces.AF_INET][0]['addr'] | ||
266 | 134 | netmask = addresses[netifaces.AF_INET][0]['netmask'] | ||
267 | 135 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | ||
268 | 136 | if address in cidr: | ||
269 | 137 | if key == 'iface': | ||
270 | 138 | return iface | ||
271 | 139 | else: | ||
272 | 140 | return addresses[netifaces.AF_INET][0][key] | ||
273 | 141 | if address.version == 6 and netifaces.AF_INET6 in addresses: | ||
274 | 142 | for addr in addresses[netifaces.AF_INET6]: | ||
275 | 143 | if not addr['addr'].startswith('fe80'): | ||
276 | 144 | cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], | ||
277 | 145 | addr['netmask'])) | ||
278 | 146 | if address in cidr: | ||
279 | 147 | if key == 'iface': | ||
280 | 148 | return iface | ||
281 | 149 | else: | ||
282 | 150 | return addr[key] | ||
283 | 151 | return None | ||
284 | 152 | |||
285 | 153 | |||
286 | 154 | get_iface_for_address = partial(_get_for_address, key='iface') | ||
287 | 155 | |||
288 | 156 | get_netmask_for_address = partial(_get_for_address, key='netmask') | ||
289 | 0 | 157 | ||
290 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
291 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-10 21:43:51 +0000 | |||
292 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 09:37:37 +0000 | |||
293 | @@ -21,6 +21,7 @@ | |||
294 | 21 | relation_get, | 21 | relation_get, |
295 | 22 | relation_ids, | 22 | relation_ids, |
296 | 23 | related_units, | 23 | related_units, |
297 | 24 | relation_set, | ||
298 | 24 | unit_get, | 25 | unit_get, |
299 | 25 | unit_private_ip, | 26 | unit_private_ip, |
300 | 26 | ERROR, | 27 | ERROR, |
301 | @@ -43,6 +44,8 @@ | |||
302 | 43 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
303 | 44 | ) | 45 | ) |
304 | 45 | 46 | ||
305 | 47 | from charmhelpers.contrib.network.ip import get_address_in_network | ||
306 | 48 | |||
307 | 46 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 49 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
308 | 47 | 50 | ||
309 | 48 | 51 | ||
310 | @@ -135,8 +138,26 @@ | |||
311 | 135 | 'Missing required charm config options. ' | 138 | 'Missing required charm config options. ' |
312 | 136 | '(database name and user)') | 139 | '(database name and user)') |
313 | 137 | raise OSContextError | 140 | raise OSContextError |
314 | 141 | |||
315 | 138 | ctxt = {} | 142 | ctxt = {} |
316 | 139 | 143 | ||
317 | 144 | # NOTE(jamespage) if mysql charm provides a network upon which | ||
318 | 145 | # access to the database should be made, reconfigure relation | ||
319 | 146 | # with the service units local address and defer execution | ||
320 | 147 | access_network = relation_get('access-network') | ||
321 | 148 | if access_network is not None: | ||
322 | 149 | if self.relation_prefix is not None: | ||
323 | 150 | hostname_key = "{}_hostname".format(self.relation_prefix) | ||
324 | 151 | else: | ||
325 | 152 | hostname_key = "hostname" | ||
326 | 153 | access_hostname = get_address_in_network(access_network, | ||
327 | 154 | unit_get('private-address')) | ||
328 | 155 | set_hostname = relation_get(attribute=hostname_key, | ||
329 | 156 | unit=local_unit()) | ||
330 | 157 | if set_hostname != access_hostname: | ||
331 | 158 | relation_set(relation_settings={hostname_key: access_hostname}) | ||
332 | 159 | return ctxt # Defer any further hook execution for now.... | ||
333 | 160 | |||
334 | 140 | password_setting = 'password' | 161 | password_setting = 'password' |
335 | 141 | if self.relation_prefix: | 162 | if self.relation_prefix: |
336 | 142 | password_setting = self.relation_prefix + '_password' | 163 | password_setting = self.relation_prefix + '_password' |
337 | @@ -341,10 +362,12 @@ | |||
338 | 341 | use_syslog = str(config('use-syslog')).lower() | 362 | use_syslog = str(config('use-syslog')).lower() |
339 | 342 | for rid in relation_ids('ceph'): | 363 | for rid in relation_ids('ceph'): |
340 | 343 | for unit in related_units(rid): | 364 | for unit in related_units(rid): |
341 | 344 | mon_hosts.append(relation_get('private-address', rid=rid, | ||
342 | 345 | unit=unit)) | ||
343 | 346 | auth = relation_get('auth', rid=rid, unit=unit) | 365 | auth = relation_get('auth', rid=rid, unit=unit) |
344 | 347 | key = relation_get('key', rid=rid, unit=unit) | 366 | key = relation_get('key', rid=rid, unit=unit) |
345 | 367 | ceph_addr = \ | ||
346 | 368 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | ||
347 | 369 | relation_get('private-address', rid=rid, unit=unit) | ||
348 | 370 | mon_hosts.append(ceph_addr) | ||
349 | 348 | 371 | ||
350 | 349 | ctxt = { | 372 | ctxt = { |
351 | 350 | 'mon_hosts': ' '.join(mon_hosts), | 373 | 'mon_hosts': ' '.join(mon_hosts), |
352 | @@ -378,7 +401,9 @@ | |||
353 | 378 | 401 | ||
354 | 379 | cluster_hosts = {} | 402 | cluster_hosts = {} |
355 | 380 | l_unit = local_unit().replace('/', '-') | 403 | l_unit = local_unit().replace('/', '-') |
357 | 381 | cluster_hosts[l_unit] = unit_get('private-address') | 404 | cluster_hosts[l_unit] = \ |
358 | 405 | get_address_in_network(config('os-internal-network'), | ||
359 | 406 | unit_get('private-address')) | ||
360 | 382 | 407 | ||
361 | 383 | for rid in relation_ids('cluster'): | 408 | for rid in relation_ids('cluster'): |
362 | 384 | for unit in related_units(rid): | 409 | for unit in related_units(rid): |
363 | 385 | 410 | ||
364 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' | |||
365 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 | |||
366 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-25 09:37:37 +0000 | |||
367 | @@ -0,0 +1,75 @@ | |||
368 | 1 | from charmhelpers.core.hookenv import ( | ||
369 | 2 | config, | ||
370 | 3 | unit_get, | ||
371 | 4 | ) | ||
372 | 5 | |||
373 | 6 | from charmhelpers.contrib.network.ip import ( | ||
374 | 7 | get_address_in_network, | ||
375 | 8 | is_address_in_network, | ||
376 | 9 | is_ipv6, | ||
377 | 10 | ) | ||
378 | 11 | |||
379 | 12 | from charmhelpers.contrib.hahelpers.cluster import is_clustered | ||
380 | 13 | |||
381 | 14 | PUBLIC = 'public' | ||
382 | 15 | INTERNAL = 'int' | ||
383 | 16 | ADMIN = 'admin' | ||
384 | 17 | |||
385 | 18 | _address_map = { | ||
386 | 19 | PUBLIC: { | ||
387 | 20 | 'config': 'os-public-network', | ||
388 | 21 | 'fallback': 'public-address' | ||
389 | 22 | }, | ||
390 | 23 | INTERNAL: { | ||
391 | 24 | 'config': 'os-internal-network', | ||
392 | 25 | 'fallback': 'private-address' | ||
393 | 26 | }, | ||
394 | 27 | ADMIN: { | ||
395 | 28 | 'config': 'os-admin-network', | ||
396 | 29 | 'fallback': 'private-address' | ||
397 | 30 | } | ||
398 | 31 | } | ||
399 | 32 | |||
400 | 33 | |||
401 | 34 | def canonical_url(configs, endpoint_type=PUBLIC): | ||
402 | 35 | ''' | ||
403 | 36 | Returns the correct HTTP URL to this host given the state of HTTPS | ||
404 | 37 | configuration, hacluster and charm configuration. | ||
405 | 38 | |||
406 | 39 | :configs OSTemplateRenderer: A config tempating object to inspect for | ||
407 | 40 | a complete https context. | ||
408 | 41 | :endpoint_type str: The endpoint type to resolve. | ||
409 | 42 | |||
410 | 43 | :returns str: Base URL for services on the current service unit. | ||
411 | 44 | ''' | ||
412 | 45 | scheme = 'http' | ||
413 | 46 | if 'https' in configs.complete_contexts(): | ||
414 | 47 | scheme = 'https' | ||
415 | 48 | address = resolve_address(endpoint_type) | ||
416 | 49 | if is_ipv6(address): | ||
417 | 50 | address = "[{}]".format(address) | ||
418 | 51 | return '%s://%s' % (scheme, address) | ||
419 | 52 | |||
420 | 53 | |||
421 | 54 | def resolve_address(endpoint_type=PUBLIC): | ||
422 | 55 | resolved_address = None | ||
423 | 56 | if is_clustered(): | ||
424 | 57 | if config(_address_map[endpoint_type]['config']) is None: | ||
425 | 58 | # Assume vip is simple and pass back directly | ||
426 | 59 | resolved_address = config('vip') | ||
427 | 60 | else: | ||
428 | 61 | for vip in config('vip').split(): | ||
429 | 62 | if is_address_in_network( | ||
430 | 63 | config(_address_map[endpoint_type]['config']), | ||
431 | 64 | vip): | ||
432 | 65 | resolved_address = vip | ||
433 | 66 | else: | ||
434 | 67 | resolved_address = get_address_in_network( | ||
435 | 68 | config(_address_map[endpoint_type]['config']), | ||
436 | 69 | unit_get(_address_map[endpoint_type]['fallback']) | ||
437 | 70 | ) | ||
438 | 71 | if resolved_address is None: | ||
439 | 72 | raise ValueError('Unable to resolve a suitable IP address' | ||
440 | 73 | ' based on charm state and configuration') | ||
441 | 74 | else: | ||
442 | 75 | return resolved_address | ||
443 | 0 | 76 | ||
444 | === added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' | |||
445 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000 | |||
446 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2014-07-25 09:37:37 +0000 | |||
447 | @@ -0,0 +1,15 @@ | |||
448 | 1 | ############################################################################### | ||
449 | 2 | # [ WARNING ] | ||
450 | 3 | # cinder configuration file maintained by Juju | ||
451 | 4 | # local changes may be overwritten. | ||
452 | 5 | ############################################################################### | ||
453 | 6 | [global] | ||
454 | 7 | {% if auth -%} | ||
455 | 8 | auth_supported = {{ auth }} | ||
456 | 9 | keyring = /etc/ceph/$cluster.$name.keyring | ||
457 | 10 | mon host = {{ mon_hosts }} | ||
458 | 11 | {% endif -%} | ||
459 | 12 | log to syslog = {{ use_syslog }} | ||
460 | 13 | err to syslog = {{ use_syslog }} | ||
461 | 14 | clog to syslog = {{ use_syslog }} | ||
462 | 15 | |||
463 | 0 | 16 | ||
464 | === added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' | |||
465 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000 | |||
466 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-25 09:37:37 +0000 | |||
467 | @@ -0,0 +1,41 @@ | |||
468 | 1 | global | ||
469 | 2 | log 127.0.0.1 local0 | ||
470 | 3 | log 127.0.0.1 local1 notice | ||
471 | 4 | maxconn 20000 | ||
472 | 5 | user haproxy | ||
473 | 6 | group haproxy | ||
474 | 7 | spread-checks 0 | ||
475 | 8 | |||
476 | 9 | defaults | ||
477 | 10 | log global | ||
478 | 11 | mode tcp | ||
479 | 12 | option tcplog | ||
480 | 13 | option dontlognull | ||
481 | 14 | retries 3 | ||
482 | 15 | timeout queue 1000 | ||
483 | 16 | timeout connect 1000 | ||
484 | 17 | timeout client 30000 | ||
485 | 18 | timeout server 30000 | ||
486 | 19 | |||
487 | 20 | listen stats :8888 | ||
488 | 21 | mode http | ||
489 | 22 | stats enable | ||
490 | 23 | stats hide-version | ||
491 | 24 | stats realm Haproxy\ Statistics | ||
492 | 25 | stats uri / | ||
493 | 26 | stats auth admin:password | ||
494 | 27 | |||
495 | 28 | {% if units -%} | ||
496 | 29 | {% for service, ports in service_ports.iteritems() -%} | ||
497 | 30 | listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} | ||
498 | 31 | balance roundrobin | ||
499 | 32 | {% for unit, address in units.iteritems() -%} | ||
500 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
501 | 34 | {% endfor %} | ||
502 | 35 | listen {{ service }}_ipv6 :::{{ ports[0] }} | ||
503 | 36 | balance roundrobin | ||
504 | 37 | {% for unit, address in units.iteritems() -%} | ||
505 | 38 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
506 | 39 | {% endfor %} | ||
507 | 40 | {% endfor -%} | ||
508 | 41 | {% endif -%} | ||
509 | 0 | 42 | ||
510 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend' | |||
511 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000 | |||
512 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2014-07-25 09:37:37 +0000 | |||
513 | @@ -0,0 +1,23 @@ | |||
514 | 1 | {% if endpoints -%} | ||
515 | 2 | {% for ext, int in endpoints -%} | ||
516 | 3 | Listen {{ ext }} | ||
517 | 4 | NameVirtualHost *:{{ ext }} | ||
518 | 5 | <VirtualHost *:{{ ext }}> | ||
519 | 6 | ServerName {{ private_address }} | ||
520 | 7 | SSLEngine on | ||
521 | 8 | SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert | ||
522 | 9 | SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key | ||
523 | 10 | ProxyPass / http://localhost:{{ int }}/ | ||
524 | 11 | ProxyPassReverse / http://localhost:{{ int }}/ | ||
525 | 12 | ProxyPreserveHost on | ||
526 | 13 | </VirtualHost> | ||
527 | 14 | <Proxy *> | ||
528 | 15 | Order deny,allow | ||
529 | 16 | Allow from all | ||
530 | 17 | </Proxy> | ||
531 | 18 | <Location /> | ||
532 | 19 | Order allow,deny | ||
533 | 20 | Allow from all | ||
534 | 21 | </Location> | ||
535 | 22 | {% endfor -%} | ||
536 | 23 | {% endif -%} | ||
537 | 0 | 24 | ||
538 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf' | |||
539 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000 | |||
540 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2014-07-25 09:37:37 +0000 | |||
541 | @@ -0,0 +1,23 @@ | |||
542 | 1 | {% if endpoints -%} | ||
543 | 2 | {% for ext, int in endpoints -%} | ||
544 | 3 | Listen {{ ext }} | ||
545 | 4 | NameVirtualHost *:{{ ext }} | ||
546 | 5 | <VirtualHost *:{{ ext }}> | ||
547 | 6 | ServerName {{ private_address }} | ||
548 | 7 | SSLEngine on | ||
549 | 8 | SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert | ||
550 | 9 | SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key | ||
551 | 10 | ProxyPass / http://localhost:{{ int }}/ | ||
552 | 11 | ProxyPassReverse / http://localhost:{{ int }}/ | ||
553 | 12 | ProxyPreserveHost on | ||
554 | 13 | </VirtualHost> | ||
555 | 14 | <Proxy *> | ||
556 | 15 | Order deny,allow | ||
557 | 16 | Allow from all | ||
558 | 17 | </Proxy> | ||
559 | 18 | <Location /> | ||
560 | 19 | Order allow,deny | ||
561 | 20 | Allow from all | ||
562 | 21 | </Location> | ||
563 | 22 | {% endfor -%} | ||
564 | 23 | {% endif -%} | ||
565 | 0 | 24 | ||
566 | === modified file 'hooks/charmhelpers/core/host.py' | |||
567 | --- hooks/charmhelpers/core/host.py 2014-07-10 21:43:51 +0000 | |||
568 | +++ hooks/charmhelpers/core/host.py 2014-07-25 09:37:37 +0000 | |||
569 | @@ -322,6 +322,10 @@ | |||
570 | 322 | import apt_pkg | 322 | import apt_pkg |
571 | 323 | if not pkgcache: | 323 | if not pkgcache: |
572 | 324 | apt_pkg.init() | 324 | apt_pkg.init() |
573 | 325 | # Force Apt to build its cache in memory. That way we avoid race | ||
574 | 326 | # conditions with other applications building the cache in the same | ||
575 | 327 | # place. | ||
576 | 328 | apt_pkg.config.set("Dir::Cache::pkgcache", "") | ||
577 | 325 | pkgcache = apt_pkg.Cache() | 329 | pkgcache = apt_pkg.Cache() |
578 | 326 | pkg = pkgcache[package] | 330 | pkg = pkgcache[package] |
579 | 327 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) | 331 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
580 | 328 | 332 | ||
581 | === added symlink 'hooks/cluster-relation-joined' | |||
582 | === target is u'glance_relations.py' | |||
583 | === modified file 'hooks/glance_contexts.py' | |||
584 | --- hooks/glance_contexts.py 2014-04-16 08:18:06 +0000 | |||
585 | +++ hooks/glance_contexts.py 2014-07-25 09:37:37 +0000 | |||
586 | @@ -78,5 +78,6 @@ | |||
587 | 78 | 78 | ||
588 | 79 | 79 | ||
589 | 80 | class LoggingConfigContext(OSContextGenerator): | 80 | class LoggingConfigContext(OSContextGenerator): |
590 | 81 | |||
591 | 81 | def __call__(self): | 82 | def __call__(self): |
592 | 82 | return {'debug': config('debug'), 'verbose': config('verbose')} | 83 | return {'debug': config('debug'), 'verbose': config('verbose')} |
593 | 83 | 84 | ||
594 | === modified file 'hooks/glance_relations.py' | |||
595 | --- hooks/glance_relations.py 2014-04-10 15:48:30 +0000 | |||
596 | +++ hooks/glance_relations.py 2014-07-25 09:37:37 +0000 | |||
597 | @@ -44,7 +44,9 @@ | |||
598 | 44 | ) | 44 | ) |
599 | 45 | 45 | ||
600 | 46 | from charmhelpers.contrib.hahelpers.cluster import ( | 46 | from charmhelpers.contrib.hahelpers.cluster import ( |
602 | 47 | canonical_url, eligible_leader) | 47 | eligible_leader, |
603 | 48 | get_hacluster_config | ||
604 | 49 | ) | ||
605 | 48 | 50 | ||
606 | 49 | from charmhelpers.contrib.openstack.utils import ( | 51 | from charmhelpers.contrib.openstack.utils import ( |
607 | 50 | configure_installation_source, | 52 | configure_installation_source, |
608 | @@ -54,6 +56,15 @@ | |||
609 | 54 | 56 | ||
610 | 55 | from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring | 57 | from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring |
611 | 56 | from charmhelpers.payload.execd import execd_preinstall | 58 | from charmhelpers.payload.execd import execd_preinstall |
612 | 59 | from charmhelpers.contrib.network.ip import ( | ||
613 | 60 | get_address_in_network, | ||
614 | 61 | get_netmask_for_address, | ||
615 | 62 | get_iface_for_address, | ||
616 | 63 | ) | ||
617 | 64 | from charmhelpers.contrib.openstack.ip import ( | ||
618 | 65 | canonical_url, | ||
619 | 66 | PUBLIC, INTERNAL, ADMIN | ||
620 | 67 | ) | ||
621 | 57 | 68 | ||
622 | 58 | from subprocess import ( | 69 | from subprocess import ( |
623 | 59 | check_call, | 70 | check_call, |
624 | @@ -70,7 +81,7 @@ | |||
625 | 70 | execd_preinstall() | 81 | execd_preinstall() |
626 | 71 | src = config('openstack-origin') | 82 | src = config('openstack-origin') |
627 | 72 | if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and | 83 | if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and |
629 | 73 | src == 'distro'): | 84 | src == 'distro'): |
630 | 74 | src = 'cloud:precise-folsom' | 85 | src = 'cloud:precise-folsom' |
631 | 75 | 86 | ||
632 | 76 | configure_installation_source(src) | 87 | configure_installation_source(src) |
633 | @@ -163,7 +174,8 @@ | |||
634 | 163 | return | 174 | return |
635 | 164 | 175 | ||
636 | 165 | relation_data = { | 176 | relation_data = { |
638 | 166 | 'glance-api-server': canonical_url(CONFIGS) + ":9292" | 177 | 'glance-api-server': |
639 | 178 | "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) | ||
640 | 167 | } | 179 | } |
641 | 168 | 180 | ||
642 | 169 | juju_log("%s: image-service_joined: To peer glance-api-server=%s" % | 181 | juju_log("%s: image-service_joined: To peer glance-api-server=%s" % |
643 | @@ -222,13 +234,15 @@ | |||
644 | 222 | juju_log('Deferring keystone_joined() to service leader.') | 234 | juju_log('Deferring keystone_joined() to service leader.') |
645 | 223 | return | 235 | return |
646 | 224 | 236 | ||
648 | 225 | url = canonical_url(CONFIGS) + ":9292" | 237 | public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) |
649 | 238 | internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) | ||
650 | 239 | admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) | ||
651 | 226 | relation_data = { | 240 | relation_data = { |
652 | 227 | 'service': 'glance', | 241 | 'service': 'glance', |
653 | 228 | 'region': config('region'), | 242 | 'region': config('region'), |
657 | 229 | 'public_url': url, | 243 | 'public_url': public_url, |
658 | 230 | 'admin_url': url, | 244 | 'admin_url': admin_url, |
659 | 231 | 'internal_url': url, } | 245 | 'internal_url': internal_url, } |
660 | 232 | 246 | ||
661 | 233 | relation_set(relation_id=relation_id, **relation_data) | 247 | relation_set(relation_id=relation_id, **relation_data) |
662 | 234 | 248 | ||
663 | @@ -265,10 +279,19 @@ | |||
664 | 265 | open_port(9292) | 279 | open_port(9292) |
665 | 266 | configure_https() | 280 | configure_https() |
666 | 267 | 281 | ||
671 | 268 | # env_vars = {'OPENSTACK_PORT_MCASTPORT': config("ha-mcastport"), | 282 | # Pickup and changes due to network reference architecture |
672 | 269 | # 'OPENSTACK_SERVICE_API': "glance-api", | 283 | # configuration |
673 | 270 | # 'OPENSTACK_SERVICE_REGISTRY': "glance-registry"} | 284 | [keystone_joined(rid) for rid in relation_ids('identity-service')] |
674 | 271 | # save_script_rc(**env_vars) | 285 | [image_service_joined(rid) for rid in relation_ids('image-service')] |
675 | 286 | [cluster_joined(rid) for rid in relation_ids('cluster')] | ||
676 | 287 | |||
677 | 288 | |||
678 | 289 | @hooks.hook('cluster-relation-joined') | ||
679 | 290 | def cluster_joined(relation_id=None): | ||
680 | 291 | address = get_address_in_network(config('os-internal-network'), | ||
681 | 292 | unit_get('private-address')) | ||
682 | 293 | relation_set(relation_id=relation_id, | ||
683 | 294 | relation_settings={'private-address': address}) | ||
684 | 272 | 295 | ||
685 | 273 | 296 | ||
686 | 274 | @hooks.hook('cluster-relation-changed') | 297 | @hooks.hook('cluster-relation-changed') |
687 | @@ -289,33 +312,44 @@ | |||
688 | 289 | 312 | ||
689 | 290 | @hooks.hook('ha-relation-joined') | 313 | @hooks.hook('ha-relation-joined') |
690 | 291 | def ha_relation_joined(): | 314 | def ha_relation_joined(): |
699 | 292 | corosync_bindiface = config("ha-bindiface") | 315 | config = get_hacluster_config() |
692 | 293 | corosync_mcastport = config("ha-mcastport") | ||
693 | 294 | vip = config("vip") | ||
694 | 295 | vip_iface = config("vip_iface") | ||
695 | 296 | vip_cidr = config("vip_cidr") | ||
696 | 297 | |||
697 | 298 | # if vip and vip_iface and vip_cidr and \ | ||
698 | 299 | # corosync_bindiface and corosync_mcastport: | ||
700 | 300 | 316 | ||
701 | 301 | resources = { | 317 | resources = { |
704 | 302 | 'res_glance_vip': 'ocf:heartbeat:IPaddr2', | 318 | 'res_glance_haproxy': 'lsb:haproxy' |
705 | 303 | 'res_glance_haproxy': 'lsb:haproxy', } | 319 | } |
706 | 304 | 320 | ||
707 | 305 | resource_params = { | 321 | resource_params = { |
711 | 306 | 'res_glance_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % | 322 | 'res_glance_haproxy': 'op monitor interval="5s"' |
712 | 307 | (vip, vip_cidr, vip_iface), | 323 | } |
713 | 308 | 'res_glance_haproxy': 'op monitor interval="5s"', } | 324 | |
714 | 325 | vip_group = [] | ||
715 | 326 | for vip in config['vip'].split(): | ||
716 | 327 | iface = get_iface_for_address(vip) | ||
717 | 328 | if iface is not None: | ||
718 | 329 | vip_key = 'res_glance_{}_vip'.format(iface) | ||
719 | 330 | resources[vip_key] = 'ocf:heartbeat:IPaddr2' | ||
720 | 331 | resource_params[vip_key] = ( | ||
721 | 332 | 'params ip="{vip}" cidr_netmask="{netmask}"' | ||
722 | 333 | ' nic="{iface}"'.format(vip=vip, | ||
723 | 334 | iface=iface, | ||
724 | 335 | netmask=get_netmask_for_address(vip)) | ||
725 | 336 | ) | ||
726 | 337 | vip_group.append(vip_key) | ||
727 | 338 | |||
728 | 339 | if len(vip_group) > 1: | ||
729 | 340 | relation_set(groups={'grp_glance_vips': ' '.join(vip_group)}) | ||
730 | 309 | 341 | ||
731 | 310 | init_services = { | 342 | init_services = { |
733 | 311 | 'res_glance_haproxy': 'haproxy', } | 343 | 'res_glance_haproxy': 'haproxy', |
734 | 344 | } | ||
735 | 312 | 345 | ||
736 | 313 | clones = { | 346 | clones = { |
738 | 314 | 'cl_glance_haproxy': 'res_glance_haproxy', } | 347 | 'cl_glance_haproxy': 'res_glance_haproxy', |
739 | 348 | } | ||
740 | 315 | 349 | ||
741 | 316 | relation_set(init_services=init_services, | 350 | relation_set(init_services=init_services, |
744 | 317 | corosync_bindiface=corosync_bindiface, | 351 | corosync_bindiface=config['ha-bindiface'], |
745 | 318 | corosync_mcastport=corosync_mcastport, | 352 | corosync_mcastport=config['ha-mcastport'], |
746 | 319 | resources=resources, | 353 | resources=resources, |
747 | 320 | resource_params=resource_params, | 354 | resource_params=resource_params, |
748 | 321 | clones=clones) | 355 | clones=clones) |
749 | 322 | 356 | ||
750 | === modified file 'hooks/glance_utils.py' | |||
751 | --- hooks/glance_utils.py 2014-04-12 17:10:59 +0000 | |||
752 | +++ hooks/glance_utils.py 2014-07-25 09:37:37 +0000 | |||
753 | @@ -42,7 +42,7 @@ | |||
754 | 42 | get_os_codename_package, | 42 | get_os_codename_package, |
755 | 43 | configure_installation_source) | 43 | configure_installation_source) |
756 | 44 | 44 | ||
758 | 45 | CLUSTER_RES = "res_glance_vip" | 45 | CLUSTER_RES = "grp_glance_vips" |
759 | 46 | 46 | ||
760 | 47 | PACKAGES = [ | 47 | PACKAGES = [ |
761 | 48 | "apache2", "glance", "python-mysqldb", "python-swift", | 48 | "apache2", "glance", "python-mysqldb", "python-swift", |
762 | 49 | 49 | ||
763 | === removed file 'templates/haproxy.cfg' | |||
764 | --- templates/haproxy.cfg 2014-02-16 20:40:22 +0000 | |||
765 | +++ templates/haproxy.cfg 1970-01-01 00:00:00 +0000 | |||
766 | @@ -1,36 +0,0 @@ | |||
767 | 1 | global | ||
768 | 2 | log 127.0.0.1 local0 | ||
769 | 3 | log 127.0.0.1 local1 notice | ||
770 | 4 | maxconn 20000 | ||
771 | 5 | user haproxy | ||
772 | 6 | group haproxy | ||
773 | 7 | spread-checks 0 | ||
774 | 8 | |||
775 | 9 | defaults | ||
776 | 10 | log global | ||
777 | 11 | mode tcp | ||
778 | 12 | option tcplog | ||
779 | 13 | option dontlognull | ||
780 | 14 | retries 3 | ||
781 | 15 | timeout queue 1000 | ||
782 | 16 | timeout connect 1000 | ||
783 | 17 | timeout client 30000 | ||
784 | 18 | timeout server 30000 | ||
785 | 19 | |||
786 | 20 | listen stats :8888 | ||
787 | 21 | mode http | ||
788 | 22 | stats enable | ||
789 | 23 | stats hide-version | ||
790 | 24 | stats realm Haproxy\ Statistics | ||
791 | 25 | stats uri / | ||
792 | 26 | stats auth admin:password | ||
793 | 27 | |||
794 | 28 | {% if units %} | ||
795 | 29 | {% for service, ports in service_ports.iteritems() -%} | ||
796 | 30 | listen {{ service }} 0.0.0.0:{{ ports[0] }} | ||
797 | 31 | balance roundrobin | ||
798 | 32 | {% for unit, address in units.iteritems() -%} | ||
799 | 33 | server {{ unit }} {{ address }}:{{ ports[1] }} check | ||
800 | 34 | {% endfor %} | ||
801 | 35 | {% endfor %} | ||
802 | 36 | {% endif %} | ||
803 | 37 | 0 | ||
804 | === modified file 'tests/charmhelpers/contrib/amulet/deployment.py' | |||
805 | --- tests/charmhelpers/contrib/amulet/deployment.py 2014-07-10 21:43:51 +0000 | |||
806 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-25 09:37:37 +0000 | |||
807 | @@ -1,40 +1,35 @@ | |||
808 | 1 | import amulet | 1 | import amulet |
809 | 2 | import re | ||
810 | 3 | 2 | ||
811 | 4 | 3 | ||
812 | 5 | class AmuletDeployment(object): | 4 | class AmuletDeployment(object): |
813 | 6 | """This class provides generic Amulet deployment and test runner | 5 | """This class provides generic Amulet deployment and test runner |
814 | 7 | methods.""" | 6 | methods.""" |
815 | 8 | 7 | ||
817 | 9 | def __init__(self, series): | 8 | def __init__(self, series=None): |
818 | 10 | """Initialize the deployment environment.""" | 9 | """Initialize the deployment environment.""" |
821 | 11 | self.series = series | 10 | self.series = None |
820 | 12 | self.d = amulet.Deployment(series=self.series) | ||
822 | 13 | 11 | ||
828 | 14 | def _get_charm_name(self, service_name): | 12 | if series: |
829 | 15 | """Gets the charm name from the service name. Unique service names can | 13 | self.series = series |
830 | 16 | be specified with a '-service#' suffix (e.g. mysql-service1).""" | 14 | self.d = amulet.Deployment(series=self.series) |
826 | 17 | if re.match(r"^.*-service\d{1,3}$", service_name): | ||
827 | 18 | charm_name = re.sub('\-service\d{1,3}$', '', service_name) | ||
831 | 19 | else: | 15 | else: |
834 | 20 | charm_name = service_name | 16 | self.d = amulet.Deployment() |
833 | 21 | return charm_name | ||
835 | 22 | 17 | ||
836 | 23 | def _add_services(self, this_service, other_services): | 18 | def _add_services(self, this_service, other_services): |
837 | 24 | """Add services to the deployment where this_service is the local charm | 19 | """Add services to the deployment where this_service is the local charm |
838 | 25 | that we're focused on testing and other_services are the other | 20 | that we're focused on testing and other_services are the other |
839 | 26 | charms that come from the charm store.""" | 21 | charms that come from the charm store.""" |
840 | 27 | name, units = range(2) | 22 | name, units = range(2) |
845 | 28 | 23 | self.this_service = this_service[name] | |
846 | 29 | charm_name = self._get_charm_name(this_service[name]) | 24 | self.d.add(this_service[name], units=this_service[units]) |
843 | 30 | self.d.add(this_service[name], | ||
844 | 31 | units=this_service[units]) | ||
847 | 32 | 25 | ||
848 | 33 | for svc in other_services: | 26 | for svc in other_services: |
853 | 34 | charm_name = self._get_charm_name(svc[name]) | 27 | if self.series: |
854 | 35 | self.d.add(svc[name], | 28 | self.d.add(svc[name], |
855 | 36 | charm='cs:{}/{}'.format(self.series, charm_name), | 29 | charm='cs:{}/{}'.format(self.series, svc[name]), |
856 | 37 | units=svc[units]) | 30 | units=svc[units]) |
857 | 31 | else: | ||
858 | 32 | self.d.add(svc[name], units=svc[units]) | ||
859 | 38 | 33 | ||
860 | 39 | def _add_relations(self, relations): | 34 | def _add_relations(self, relations): |
861 | 40 | """Add all of the relations for the services.""" | 35 | """Add all of the relations for the services.""" |
862 | 41 | 36 | ||
863 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' | |||
864 | --- tests/charmhelpers/contrib/amulet/utils.py 2014-07-10 21:43:51 +0000 | |||
865 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-25 09:37:37 +0000 | |||
866 | @@ -139,11 +139,11 @@ | |||
867 | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) | 139 | return self._get_dir_mtime(sentry_unit, proc_dir) |
868 | 140 | 140 | ||
869 | 141 | def service_restarted(self, sentry_unit, service, filename, | 141 | def service_restarted(self, sentry_unit, service, filename, |
871 | 142 | pgrep_full=False, sleep_time=20): | 142 | pgrep_full=False): |
872 | 143 | """Compare a service's start time vs a file's last modification time | 143 | """Compare a service's start time vs a file's last modification time |
873 | 144 | (such as a config file for that service) to determine if the service | 144 | (such as a config file for that service) to determine if the service |
874 | 145 | has been restarted.""" | 145 | has been restarted.""" |
876 | 146 | sleep(sleep_time) | 146 | sleep(10) |
877 | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ | 147 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ |
878 | 148 | self._get_file_mtime(sentry_unit, filename): | 148 | self._get_file_mtime(sentry_unit, filename): |
879 | 149 | return True | 149 | return True |
880 | 150 | 150 | ||
881 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' | |||
882 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-10 21:43:51 +0000 | |||
883 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-25 09:37:37 +0000 | |||
884 | @@ -7,7 +7,7 @@ | |||
885 | 7 | """This class inherits from AmuletDeployment and has additional support | 7 | """This class inherits from AmuletDeployment and has additional support |
886 | 8 | that is specifically for use by OpenStack charms.""" | 8 | that is specifically for use by OpenStack charms.""" |
887 | 9 | 9 | ||
889 | 10 | def __init__(self, series, openstack=None, source=None): | 10 | def __init__(self, series=None, openstack=None, source=None): |
890 | 11 | """Initialize the deployment environment.""" | 11 | """Initialize the deployment environment.""" |
891 | 12 | super(OpenStackAmuletDeployment, self).__init__(series) | 12 | super(OpenStackAmuletDeployment, self).__init__(series) |
892 | 13 | self.openstack = openstack | 13 | self.openstack = openstack |
893 | @@ -24,15 +24,13 @@ | |||
894 | 24 | 24 | ||
895 | 25 | if self.openstack: | 25 | if self.openstack: |
896 | 26 | for svc in services: | 26 | for svc in services: |
899 | 27 | charm_name = self._get_charm_name(svc[name]) | 27 | if svc[name] not in use_source: |
898 | 28 | if charm_name not in use_source: | ||
900 | 29 | config = {'openstack-origin': self.openstack} | 28 | config = {'openstack-origin': self.openstack} |
901 | 30 | self.d.configure(svc[name], config) | 29 | self.d.configure(svc[name], config) |
902 | 31 | 30 | ||
903 | 32 | if self.source: | 31 | if self.source: |
904 | 33 | for svc in services: | 32 | for svc in services: |
907 | 34 | charm_name = self._get_charm_name(svc[name]) | 33 | if svc[name] in use_source: |
906 | 35 | if charm_name in use_source: | ||
908 | 36 | config = {'source': self.source} | 34 | config = {'source': self.source} |
909 | 37 | self.d.configure(svc[name], config) | 35 | self.d.configure(svc[name], config) |
910 | 38 | 36 | ||
911 | 39 | 37 | ||
912 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' | |||
913 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-10 21:43:51 +0000 | |||
914 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-25 09:37:37 +0000 | |||
915 | @@ -177,40 +177,12 @@ | |||
916 | 177 | image = glance.images.create(name=image_name, is_public=True, | 177 | image = glance.images.create(name=image_name, is_public=True, |
917 | 178 | disk_format='qcow2', | 178 | disk_format='qcow2', |
918 | 179 | container_format='bare', data=f) | 179 | container_format='bare', data=f) |
919 | 180 | count = 1 | ||
920 | 181 | status = image.status | ||
921 | 182 | while status != 'active' and count < 10: | ||
922 | 183 | time.sleep(3) | ||
923 | 184 | image = glance.images.get(image.id) | ||
924 | 185 | status = image.status | ||
925 | 186 | self.log.debug('image status: {}'.format(status)) | ||
926 | 187 | count += 1 | ||
927 | 188 | |||
928 | 189 | if status != 'active': | ||
929 | 190 | self.log.error('image creation timed out') | ||
930 | 191 | return None | ||
931 | 192 | |||
932 | 193 | return image | 180 | return image |
933 | 194 | 181 | ||
934 | 195 | def delete_image(self, glance, image): | 182 | def delete_image(self, glance, image): |
935 | 196 | """Delete the specified image.""" | 183 | """Delete the specified image.""" |
936 | 197 | num_before = len(list(glance.images.list())) | ||
937 | 198 | glance.images.delete(image) | 184 | glance.images.delete(image) |
938 | 199 | 185 | ||
939 | 200 | count = 1 | ||
940 | 201 | num_after = len(list(glance.images.list())) | ||
941 | 202 | while num_after != (num_before - 1) and count < 10: | ||
942 | 203 | time.sleep(3) | ||
943 | 204 | num_after = len(list(glance.images.list())) | ||
944 | 205 | self.log.debug('number of images: {}'.format(num_after)) | ||
945 | 206 | count += 1 | ||
946 | 207 | |||
947 | 208 | if num_after != (num_before - 1): | ||
948 | 209 | self.log.error('image deletion timed out') | ||
949 | 210 | return False | ||
950 | 211 | |||
951 | 212 | return True | ||
952 | 213 | |||
953 | 214 | def create_instance(self, nova, image_name, instance_name, flavor): | 186 | def create_instance(self, nova, image_name, instance_name, flavor): |
954 | 215 | """Create the specified instance.""" | 187 | """Create the specified instance.""" |
955 | 216 | image = nova.images.find(name=image_name) | 188 | image = nova.images.find(name=image_name) |
956 | @@ -227,27 +199,11 @@ | |||
957 | 227 | self.log.debug('instance status: {}'.format(status)) | 199 | self.log.debug('instance status: {}'.format(status)) |
958 | 228 | count += 1 | 200 | count += 1 |
959 | 229 | 201 | ||
962 | 230 | if status != 'ACTIVE': | 202 | if status == 'BUILD': |
961 | 231 | self.log.error('instance creation timed out') | ||
963 | 232 | return None | 203 | return None |
964 | 233 | 204 | ||
965 | 234 | return instance | 205 | return instance |
966 | 235 | 206 | ||
967 | 236 | def delete_instance(self, nova, instance): | 207 | def delete_instance(self, nova, instance): |
968 | 237 | """Delete the specified instance.""" | 208 | """Delete the specified instance.""" |
969 | 238 | num_before = len(list(nova.servers.list())) | ||
970 | 239 | nova.servers.delete(instance) | 209 | nova.servers.delete(instance) |
971 | 240 | |||
972 | 241 | count = 1 | ||
973 | 242 | num_after = len(list(nova.servers.list())) | ||
974 | 243 | while num_after != (num_before - 1) and count < 10: | ||
975 | 244 | time.sleep(3) | ||
976 | 245 | num_after = len(list(nova.servers.list())) | ||
977 | 246 | self.log.debug('number of instances: {}'.format(num_after)) | ||
978 | 247 | count += 1 | ||
979 | 248 | |||
980 | 249 | if num_after != (num_before - 1): | ||
981 | 250 | self.log.error('instance deletion timed out') | ||
982 | 251 | return False | ||
983 | 252 | |||
984 | 253 | return True | ||
985 | 254 | 210 | ||
986 | === modified file 'unit_tests/test_glance_relations.py' | |||
987 | --- unit_tests/test_glance_relations.py 2014-03-31 11:38:11 +0000 | |||
988 | +++ unit_tests/test_glance_relations.py 2014-07-25 09:37:37 +0000 | |||
989 | @@ -54,7 +54,10 @@ | |||
990 | 54 | 'check_call', | 54 | 'check_call', |
991 | 55 | 'execd_preinstall', | 55 | 'execd_preinstall', |
992 | 56 | 'lsb_release', | 56 | 'lsb_release', |
994 | 57 | 'filter_installed_packages' | 57 | 'filter_installed_packages', |
995 | 58 | 'get_hacluster_config', | ||
996 | 59 | 'get_netmask_for_address', | ||
997 | 60 | 'get_iface_for_address' | ||
998 | 58 | ] | 61 | ] |
999 | 59 | 62 | ||
1000 | 60 | 63 | ||
1001 | @@ -435,21 +438,23 @@ | |||
1002 | 435 | self.assertTrue(configs.write_all.called) | 438 | self.assertTrue(configs.write_all.called) |
1003 | 436 | 439 | ||
1004 | 437 | def test_ha_relation_joined(self): | 440 | def test_ha_relation_joined(self): |
1010 | 438 | self.test_config.set('ha-bindiface', 'em0') | 441 | self.get_hacluster_config.return_value = { |
1011 | 439 | self.test_config.set('ha-mcastport', '8080') | 442 | 'ha-bindiface': 'em0', |
1012 | 440 | self.test_config.set('vip', '10.10.10.10') | 443 | 'ha-mcastport': '8080', |
1013 | 441 | self.test_config.set('vip_iface', 'em1') | 444 | 'vip': '10.10.10.10', |
1014 | 442 | self.test_config.set('vip_cidr', '24') | 445 | } |
1015 | 446 | self.get_iface_for_address.return_value = 'eth1' | ||
1016 | 447 | self.get_netmask_for_address.return_value = '255.255.0.0' | ||
1017 | 443 | relations.ha_relation_joined() | 448 | relations.ha_relation_joined() |
1018 | 444 | args = { | 449 | args = { |
1019 | 445 | 'corosync_bindiface': 'em0', | 450 | 'corosync_bindiface': 'em0', |
1020 | 446 | 'corosync_mcastport': '8080', | 451 | 'corosync_mcastport': '8080', |
1021 | 447 | 'init_services': {'res_glance_haproxy': 'haproxy'}, | 452 | 'init_services': {'res_glance_haproxy': 'haproxy'}, |
1023 | 448 | 'resources': {'res_glance_vip': 'ocf:heartbeat:IPaddr2', | 453 | 'resources': {'res_glance_eth1_vip': 'ocf:heartbeat:IPaddr2', |
1024 | 449 | 'res_glance_haproxy': 'lsb:haproxy'}, | 454 | 'res_glance_haproxy': 'lsb:haproxy'}, |
1025 | 450 | 'resource_params': { | 455 | 'resource_params': { |
1028 | 451 | 'res_glance_vip': 'params ip="10.10.10.10"' | 456 | 'res_glance_eth1_vip': 'params ip="10.10.10.10"' |
1029 | 452 | ' cidr_netmask="24" nic="em1"', | 457 | ' cidr_netmask="255.255.0.0" nic="eth1"', |
1030 | 453 | 'res_glance_haproxy': 'op monitor interval="5s"'}, | 458 | 'res_glance_haproxy': 'op monitor interval="5s"'}, |
1031 | 454 | 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} | 459 | 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} |
1032 | 455 | } | 460 | } |
1033 | 456 | 461 | ||
1034 | === modified file 'unit_tests/test_utils.py' | |||
1035 | --- unit_tests/test_utils.py 2014-01-15 13:04:21 +0000 | |||
1036 | +++ unit_tests/test_utils.py 2014-07-25 09:37:37 +0000 | |||
1037 | @@ -80,9 +80,9 @@ | |||
1038 | 80 | return self.config | 80 | return self.config |
1039 | 81 | 81 | ||
1040 | 82 | def set(self, attr, value): | 82 | def set(self, attr, value): |
1044 | 83 | if attr not in self.config: | 83 | if attr not in self.config: |
1045 | 84 | raise KeyError | 84 | raise KeyError |
1046 | 85 | self.config[attr] = value | 85 | self.config[attr] = value |
1047 | 86 | 86 | ||
1048 | 87 | 87 | ||
1049 | 88 | class TestRelation(object): | 88 | class TestRelation(object): |
This has merge conflicts artifacts in the diff.