Merge lp:~james-page/charms/trusty/glance/network-splits into lp:~openstack-charmers-archive/charms/trusty/glance/next
- Trusty Tahr (14.04)
- network-splits
- Merge into next
Proposed by
James Page
Status: | Merged |
---|---|
Merged at revision: | 55 |
Proposed branch: | lp:~james-page/charms/trusty/glance/network-splits |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/glance/next |
Diff against target: |
1049 lines (+511/-168) 23 files modified
.bzrignore (+2/-0) Makefile (+11/-5) charm-helpers-hooks.yaml (+2/-1) config.yaml (+28/-9) hooks/charmhelpers/contrib/hahelpers/cluster.py (+2/-2) hooks/charmhelpers/contrib/network/ip.py (+156/-0) hooks/charmhelpers/contrib/openstack/context.py (+28/-3) hooks/charmhelpers/contrib/openstack/ip.py (+75/-0) hooks/charmhelpers/contrib/openstack/templates/ceph.conf (+15/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+41/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend (+23/-0) hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf (+23/-0) hooks/charmhelpers/core/host.py (+4/-0) hooks/glance_contexts.py (+1/-0) hooks/glance_relations.py (+62/-28) hooks/glance_utils.py (+1/-1) templates/haproxy.cfg (+0/-36) tests/charmhelpers/contrib/amulet/deployment.py (+14/-19) tests/charmhelpers/contrib/amulet/utils.py (+2/-2) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+3/-5) tests/charmhelpers/contrib/openstack/amulet/utils.py (+1/-45) unit_tests/test_glance_relations.py (+14/-9) unit_tests/test_utils.py (+3/-3) |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/glance/network-splits |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
James Page | Needs Resubmitting | ||
James Troup (community) | Needs Fixing | ||
OpenStack Charmers | Pending | ||
Review via email: mp+228141@code.launchpad.net |
Commit message
Description of the change
Add support for multiple network configuration.
To post a comment you must log in.
- 72. By James Page
-
Rebase
Revision history for this message
James Page (james-page) : | # |
review:
Needs Resubmitting
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' |
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 |
3 | +++ .bzrignore 2014-07-25 09:37:37 +0000 |
4 | @@ -0,0 +1,2 @@ |
5 | +.coverage |
6 | +bin |
7 | |
8 | === modified file 'Makefile' |
9 | --- Makefile 2014-07-18 09:45:47 +0000 |
10 | +++ Makefile 2014-07-25 09:37:37 +0000 |
11 | @@ -1,4 +1,5 @@ |
12 | #!/usr/bin/make |
13 | +PYTHON := /usr/bin/env python |
14 | |
15 | lint: |
16 | @echo "Running flake8 tests: " |
17 | @@ -8,12 +9,17 @@ |
18 | @charm proof |
19 | @echo "OK" |
20 | |
21 | -sync: |
22 | - @charm-helper-sync -c charm-helpers-hooks.yaml |
23 | - @charm-helper-sync -c charm-helpers-tests.yaml |
24 | - |
25 | unit_test: |
26 | - @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests |
27 | + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests |
28 | + |
29 | +bin/charm_helpers_sync.py: |
30 | + @mkdir -p bin |
31 | + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ |
32 | + > bin/charm_helpers_sync.py |
33 | + |
34 | +sync: bin/charm_helpers_sync.py |
35 | + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml |
36 | + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml |
37 | |
38 | test: |
39 | @echo Starting Amulet tests... |
40 | |
41 | === modified file 'charm-helpers-hooks.yaml' |
42 | --- charm-helpers-hooks.yaml 2014-06-24 19:54:45 +0000 |
43 | +++ charm-helpers-hooks.yaml 2014-07-25 09:37:37 +0000 |
44 | @@ -3,7 +3,8 @@ |
45 | include: |
46 | - core |
47 | - fetch |
48 | - - contrib.openstack |
49 | + - contrib.openstack|inc=* |
50 | - contrib.hahelpers |
51 | - contrib.storage.linux.ceph |
52 | - payload.execd |
53 | + - contrib.network.ip |
54 | |
55 | === modified file 'config.yaml' |
56 | --- config.yaml 2014-04-12 16:55:29 +0000 |
57 | +++ config.yaml 2014-07-25 09:37:37 +0000 |
58 | @@ -52,15 +52,11 @@ |
59 | # HA configuration settings |
60 | vip: |
61 | type: string |
62 | - description: "Virtual IP to use to front Glance API in ha configuration" |
63 | - vip_iface: |
64 | - type: string |
65 | - default: eth0 |
66 | - description: "Network Interface where to place the Virtual IP" |
67 | - vip_cidr: |
68 | - type: int |
69 | - default: 24 |
70 | - description: "Netmask that will be used for the Virtual IP" |
71 | + description: | |
72 | + Virtual IP(s) to use to front API services in HA configuration. |
73 | + . |
74 | + If multiple networks are being used, a VIP should be provided for each |
75 | + network, separated by spaces. |
76 | ha-bindiface: |
77 | type: string |
78 | default: eth0 |
79 | @@ -96,4 +92,27 @@ |
80 | default: openstack |
81 | type: string |
82 | description: RabbitMQ virtual host to request access on rabbitmq-server. |
83 | + # Network configuration options |
84 | + # by default all access is over 'private-address' |
85 | + os-admin-network: |
86 | + type: string |
87 | + description: | |
88 | + The IP address and netmask of the OpenStack Admin network (e.g., |
89 | + 192.168.0.0/24) |
90 | + . |
91 | + This network will be used for admin endpoints. |
92 | + os-internal-network: |
93 | + type: string |
94 | + description: | |
95 | + The IP address and netmask of the OpenStack Internal network (e.g., |
96 | + 192.168.0.0/24) |
97 | + . |
98 | + This network will be used for internal endpoints. |
99 | + os-public-network: |
100 | + type: string |
101 | + description: | |
102 | + The IP address and netmask of the OpenStack Public network (e.g., |
103 | + 192.168.0.0/24) |
104 | + . |
105 | + This network will be used for public endpoints. |
106 | |
107 | |
108 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
109 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-10 21:43:51 +0000 |
110 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 09:37:37 +0000 |
111 | @@ -146,12 +146,12 @@ |
112 | Obtains all relevant configuration from charm configuration required |
113 | for initiating a relation to hacluster: |
114 | |
115 | - ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr |
116 | + ha-bindiface, ha-mcastport, vip |
117 | |
118 | returns: dict: A dict containing settings keyed by setting name. |
119 | raises: HAIncompleteConfig if settings are missing. |
120 | ''' |
121 | - settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] |
122 | + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
123 | conf = {} |
124 | for setting in settings: |
125 | conf[setting] = config_get(setting) |
126 | |
127 | === added directory 'hooks/charmhelpers/contrib/network' |
128 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' |
129 | === added file 'hooks/charmhelpers/contrib/network/ip.py' |
130 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 |
131 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-07-25 09:37:37 +0000 |
132 | @@ -0,0 +1,156 @@ |
133 | +import sys |
134 | + |
135 | +from functools import partial |
136 | + |
137 | +from charmhelpers.fetch import apt_install |
138 | +from charmhelpers.core.hookenv import ( |
139 | + ERROR, log, |
140 | +) |
141 | + |
142 | +try: |
143 | + import netifaces |
144 | +except ImportError: |
145 | + apt_install('python-netifaces') |
146 | + import netifaces |
147 | + |
148 | +try: |
149 | + import netaddr |
150 | +except ImportError: |
151 | + apt_install('python-netaddr') |
152 | + import netaddr |
153 | + |
154 | + |
155 | +def _validate_cidr(network): |
156 | + try: |
157 | + netaddr.IPNetwork(network) |
158 | + except (netaddr.core.AddrFormatError, ValueError): |
159 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
160 | + network) |
161 | + |
162 | + |
163 | +def get_address_in_network(network, fallback=None, fatal=False): |
164 | + """ |
165 | + Get an IPv4 or IPv6 address within the network from the host. |
166 | + |
167 | + :param network (str): CIDR presentation format. For example, |
168 | + '192.168.1.0/24'. |
169 | + :param fallback (str): If no address is found, return fallback. |
170 | + :param fatal (boolean): If no address is found, fallback is not |
171 | + set and fatal is True then exit(1). |
172 | + |
173 | + """ |
174 | + |
175 | + def not_found_error_out(): |
176 | + log("No IP address found in network: %s" % network, |
177 | + level=ERROR) |
178 | + sys.exit(1) |
179 | + |
180 | + if network is None: |
181 | + if fallback is not None: |
182 | + return fallback |
183 | + else: |
184 | + if fatal: |
185 | + not_found_error_out() |
186 | + |
187 | + _validate_cidr(network) |
188 | + network = netaddr.IPNetwork(network) |
189 | + for iface in netifaces.interfaces(): |
190 | + addresses = netifaces.ifaddresses(iface) |
191 | + if network.version == 4 and netifaces.AF_INET in addresses: |
192 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
193 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
194 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
195 | + if cidr in network: |
196 | + return str(cidr.ip) |
197 | + if network.version == 6 and netifaces.AF_INET6 in addresses: |
198 | + for addr in addresses[netifaces.AF_INET6]: |
199 | + if not addr['addr'].startswith('fe80'): |
200 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
201 | + addr['netmask'])) |
202 | + if cidr in network: |
203 | + return str(cidr.ip) |
204 | + |
205 | + if fallback is not None: |
206 | + return fallback |
207 | + |
208 | + if fatal: |
209 | + not_found_error_out() |
210 | + |
211 | + return None |
212 | + |
213 | + |
214 | +def is_ipv6(address): |
215 | + '''Determine whether provided address is IPv6 or not''' |
216 | + try: |
217 | + address = netaddr.IPAddress(address) |
218 | + except netaddr.AddrFormatError: |
219 | + # probably a hostname - so not an address at all! |
220 | + return False |
221 | + else: |
222 | + return address.version == 6 |
223 | + |
224 | + |
225 | +def is_address_in_network(network, address): |
226 | + """ |
227 | + Determine whether the provided address is within a network range. |
228 | + |
229 | + :param network (str): CIDR presentation format. For example, |
230 | + '192.168.1.0/24'. |
231 | + :param address: An individual IPv4 or IPv6 address without a net |
232 | + mask or subnet prefix. For example, '192.168.1.1'. |
233 | + :returns boolean: Flag indicating whether address is in network. |
234 | + """ |
235 | + try: |
236 | + network = netaddr.IPNetwork(network) |
237 | + except (netaddr.core.AddrFormatError, ValueError): |
238 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
239 | + network) |
240 | + try: |
241 | + address = netaddr.IPAddress(address) |
242 | + except (netaddr.core.AddrFormatError, ValueError): |
243 | + raise ValueError("Address (%s) is not in correct presentation format" % |
244 | + address) |
245 | + if address in network: |
246 | + return True |
247 | + else: |
248 | + return False |
249 | + |
250 | + |
251 | +def _get_for_address(address, key): |
252 | + """Retrieve an attribute of or the physical interface that |
253 | + the IP address provided could be bound to. |
254 | + |
255 | + :param address (str): An individual IPv4 or IPv6 address without a net |
256 | + mask or subnet prefix. For example, '192.168.1.1'. |
257 | + :param key: 'iface' for the physical interface name or an attribute |
258 | + of the configured interface, for example 'netmask'. |
259 | + :returns str: Requested attribute or None if address is not bindable. |
260 | + """ |
261 | + address = netaddr.IPAddress(address) |
262 | + for iface in netifaces.interfaces(): |
263 | + addresses = netifaces.ifaddresses(iface) |
264 | + if address.version == 4 and netifaces.AF_INET in addresses: |
265 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
266 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
267 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
268 | + if address in cidr: |
269 | + if key == 'iface': |
270 | + return iface |
271 | + else: |
272 | + return addresses[netifaces.AF_INET][0][key] |
273 | + if address.version == 6 and netifaces.AF_INET6 in addresses: |
274 | + for addr in addresses[netifaces.AF_INET6]: |
275 | + if not addr['addr'].startswith('fe80'): |
276 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
277 | + addr['netmask'])) |
278 | + if address in cidr: |
279 | + if key == 'iface': |
280 | + return iface |
281 | + else: |
282 | + return addr[key] |
283 | + return None |
284 | + |
285 | + |
286 | +get_iface_for_address = partial(_get_for_address, key='iface') |
287 | + |
288 | +get_netmask_for_address = partial(_get_for_address, key='netmask') |
289 | |
290 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
291 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-10 21:43:51 +0000 |
292 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 09:37:37 +0000 |
293 | @@ -21,6 +21,7 @@ |
294 | relation_get, |
295 | relation_ids, |
296 | related_units, |
297 | + relation_set, |
298 | unit_get, |
299 | unit_private_ip, |
300 | ERROR, |
301 | @@ -43,6 +44,8 @@ |
302 | neutron_plugin_attribute, |
303 | ) |
304 | |
305 | +from charmhelpers.contrib.network.ip import get_address_in_network |
306 | + |
307 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
308 | |
309 | |
310 | @@ -135,8 +138,26 @@ |
311 | 'Missing required charm config options. ' |
312 | '(database name and user)') |
313 | raise OSContextError |
314 | + |
315 | ctxt = {} |
316 | |
317 | + # NOTE(jamespage) if mysql charm provides a network upon which |
318 | + # access to the database should be made, reconfigure relation |
319 | + # with the service units local address and defer execution |
320 | + access_network = relation_get('access-network') |
321 | + if access_network is not None: |
322 | + if self.relation_prefix is not None: |
323 | + hostname_key = "{}_hostname".format(self.relation_prefix) |
324 | + else: |
325 | + hostname_key = "hostname" |
326 | + access_hostname = get_address_in_network(access_network, |
327 | + unit_get('private-address')) |
328 | + set_hostname = relation_get(attribute=hostname_key, |
329 | + unit=local_unit()) |
330 | + if set_hostname != access_hostname: |
331 | + relation_set(relation_settings={hostname_key: access_hostname}) |
332 | + return ctxt # Defer any further hook execution for now.... |
333 | + |
334 | password_setting = 'password' |
335 | if self.relation_prefix: |
336 | password_setting = self.relation_prefix + '_password' |
337 | @@ -341,10 +362,12 @@ |
338 | use_syslog = str(config('use-syslog')).lower() |
339 | for rid in relation_ids('ceph'): |
340 | for unit in related_units(rid): |
341 | - mon_hosts.append(relation_get('private-address', rid=rid, |
342 | - unit=unit)) |
343 | auth = relation_get('auth', rid=rid, unit=unit) |
344 | key = relation_get('key', rid=rid, unit=unit) |
345 | + ceph_addr = \ |
346 | + relation_get('ceph-public-address', rid=rid, unit=unit) or \ |
347 | + relation_get('private-address', rid=rid, unit=unit) |
348 | + mon_hosts.append(ceph_addr) |
349 | |
350 | ctxt = { |
351 | 'mon_hosts': ' '.join(mon_hosts), |
352 | @@ -378,7 +401,9 @@ |
353 | |
354 | cluster_hosts = {} |
355 | l_unit = local_unit().replace('/', '-') |
356 | - cluster_hosts[l_unit] = unit_get('private-address') |
357 | + cluster_hosts[l_unit] = \ |
358 | + get_address_in_network(config('os-internal-network'), |
359 | + unit_get('private-address')) |
360 | |
361 | for rid in relation_ids('cluster'): |
362 | for unit in related_units(rid): |
363 | |
364 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' |
365 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 |
366 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-25 09:37:37 +0000 |
367 | @@ -0,0 +1,75 @@ |
368 | +from charmhelpers.core.hookenv import ( |
369 | + config, |
370 | + unit_get, |
371 | +) |
372 | + |
373 | +from charmhelpers.contrib.network.ip import ( |
374 | + get_address_in_network, |
375 | + is_address_in_network, |
376 | + is_ipv6, |
377 | +) |
378 | + |
379 | +from charmhelpers.contrib.hahelpers.cluster import is_clustered |
380 | + |
381 | +PUBLIC = 'public' |
382 | +INTERNAL = 'int' |
383 | +ADMIN = 'admin' |
384 | + |
385 | +_address_map = { |
386 | + PUBLIC: { |
387 | + 'config': 'os-public-network', |
388 | + 'fallback': 'public-address' |
389 | + }, |
390 | + INTERNAL: { |
391 | + 'config': 'os-internal-network', |
392 | + 'fallback': 'private-address' |
393 | + }, |
394 | + ADMIN: { |
395 | + 'config': 'os-admin-network', |
396 | + 'fallback': 'private-address' |
397 | + } |
398 | +} |
399 | + |
400 | + |
401 | +def canonical_url(configs, endpoint_type=PUBLIC): |
402 | + ''' |
403 | + Returns the correct HTTP URL to this host given the state of HTTPS |
404 | + configuration, hacluster and charm configuration. |
405 | + |
406 | + :configs OSTemplateRenderer: A config tempating object to inspect for |
407 | + a complete https context. |
408 | + :endpoint_type str: The endpoint type to resolve. |
409 | + |
410 | + :returns str: Base URL for services on the current service unit. |
411 | + ''' |
412 | + scheme = 'http' |
413 | + if 'https' in configs.complete_contexts(): |
414 | + scheme = 'https' |
415 | + address = resolve_address(endpoint_type) |
416 | + if is_ipv6(address): |
417 | + address = "[{}]".format(address) |
418 | + return '%s://%s' % (scheme, address) |
419 | + |
420 | + |
421 | +def resolve_address(endpoint_type=PUBLIC): |
422 | + resolved_address = None |
423 | + if is_clustered(): |
424 | + if config(_address_map[endpoint_type]['config']) is None: |
425 | + # Assume vip is simple and pass back directly |
426 | + resolved_address = config('vip') |
427 | + else: |
428 | + for vip in config('vip').split(): |
429 | + if is_address_in_network( |
430 | + config(_address_map[endpoint_type]['config']), |
431 | + vip): |
432 | + resolved_address = vip |
433 | + else: |
434 | + resolved_address = get_address_in_network( |
435 | + config(_address_map[endpoint_type]['config']), |
436 | + unit_get(_address_map[endpoint_type]['fallback']) |
437 | + ) |
438 | + if resolved_address is None: |
439 | + raise ValueError('Unable to resolve a suitable IP address' |
440 | + ' based on charm state and configuration') |
441 | + else: |
442 | + return resolved_address |
443 | |
444 | === added file 'hooks/charmhelpers/contrib/openstack/templates/ceph.conf' |
445 | --- hooks/charmhelpers/contrib/openstack/templates/ceph.conf 1970-01-01 00:00:00 +0000 |
446 | +++ hooks/charmhelpers/contrib/openstack/templates/ceph.conf 2014-07-25 09:37:37 +0000 |
447 | @@ -0,0 +1,15 @@ |
448 | +############################################################################### |
449 | +# [ WARNING ] |
450 | +# cinder configuration file maintained by Juju |
451 | +# local changes may be overwritten. |
452 | +############################################################################### |
453 | +[global] |
454 | +{% if auth -%} |
455 | + auth_supported = {{ auth }} |
456 | + keyring = /etc/ceph/$cluster.$name.keyring |
457 | + mon host = {{ mon_hosts }} |
458 | +{% endif -%} |
459 | + log to syslog = {{ use_syslog }} |
460 | + err to syslog = {{ use_syslog }} |
461 | + clog to syslog = {{ use_syslog }} |
462 | + |
463 | |
464 | === added file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' |
465 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 1970-01-01 00:00:00 +0000 |
466 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-25 09:37:37 +0000 |
467 | @@ -0,0 +1,41 @@ |
468 | +global |
469 | + log 127.0.0.1 local0 |
470 | + log 127.0.0.1 local1 notice |
471 | + maxconn 20000 |
472 | + user haproxy |
473 | + group haproxy |
474 | + spread-checks 0 |
475 | + |
476 | +defaults |
477 | + log global |
478 | + mode tcp |
479 | + option tcplog |
480 | + option dontlognull |
481 | + retries 3 |
482 | + timeout queue 1000 |
483 | + timeout connect 1000 |
484 | + timeout client 30000 |
485 | + timeout server 30000 |
486 | + |
487 | +listen stats :8888 |
488 | + mode http |
489 | + stats enable |
490 | + stats hide-version |
491 | + stats realm Haproxy\ Statistics |
492 | + stats uri / |
493 | + stats auth admin:password |
494 | + |
495 | +{% if units -%} |
496 | +{% for service, ports in service_ports.iteritems() -%} |
497 | +listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} |
498 | + balance roundrobin |
499 | + {% for unit, address in units.iteritems() -%} |
500 | + server {{ unit }} {{ address }}:{{ ports[1] }} check |
501 | + {% endfor %} |
502 | +listen {{ service }}_ipv6 :::{{ ports[0] }} |
503 | + balance roundrobin |
504 | + {% for unit, address in units.iteritems() -%} |
505 | + server {{ unit }} {{ address }}:{{ ports[1] }} check |
506 | + {% endfor %} |
507 | +{% endfor -%} |
508 | +{% endif -%} |
509 | |
510 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend' |
511 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 1970-01-01 00:00:00 +0000 |
512 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend 2014-07-25 09:37:37 +0000 |
513 | @@ -0,0 +1,23 @@ |
514 | +{% if endpoints -%} |
515 | +{% for ext, int in endpoints -%} |
516 | +Listen {{ ext }} |
517 | +NameVirtualHost *:{{ ext }} |
518 | +<VirtualHost *:{{ ext }}> |
519 | + ServerName {{ private_address }} |
520 | + SSLEngine on |
521 | + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert |
522 | + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key |
523 | + ProxyPass / http://localhost:{{ int }}/ |
524 | + ProxyPassReverse / http://localhost:{{ int }}/ |
525 | + ProxyPreserveHost on |
526 | +</VirtualHost> |
527 | +<Proxy *> |
528 | + Order deny,allow |
529 | + Allow from all |
530 | +</Proxy> |
531 | +<Location /> |
532 | + Order allow,deny |
533 | + Allow from all |
534 | +</Location> |
535 | +{% endfor -%} |
536 | +{% endif -%} |
537 | |
538 | === added file 'hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf' |
539 | --- hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 1970-01-01 00:00:00 +0000 |
540 | +++ hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf 2014-07-25 09:37:37 +0000 |
541 | @@ -0,0 +1,23 @@ |
542 | +{% if endpoints -%} |
543 | +{% for ext, int in endpoints -%} |
544 | +Listen {{ ext }} |
545 | +NameVirtualHost *:{{ ext }} |
546 | +<VirtualHost *:{{ ext }}> |
547 | + ServerName {{ private_address }} |
548 | + SSLEngine on |
549 | + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert |
550 | + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key |
551 | + ProxyPass / http://localhost:{{ int }}/ |
552 | + ProxyPassReverse / http://localhost:{{ int }}/ |
553 | + ProxyPreserveHost on |
554 | +</VirtualHost> |
555 | +<Proxy *> |
556 | + Order deny,allow |
557 | + Allow from all |
558 | +</Proxy> |
559 | +<Location /> |
560 | + Order allow,deny |
561 | + Allow from all |
562 | +</Location> |
563 | +{% endfor -%} |
564 | +{% endif -%} |
565 | |
566 | === modified file 'hooks/charmhelpers/core/host.py' |
567 | --- hooks/charmhelpers/core/host.py 2014-07-10 21:43:51 +0000 |
568 | +++ hooks/charmhelpers/core/host.py 2014-07-25 09:37:37 +0000 |
569 | @@ -322,6 +322,10 @@ |
570 | import apt_pkg |
571 | if not pkgcache: |
572 | apt_pkg.init() |
573 | + # Force Apt to build its cache in memory. That way we avoid race |
574 | + # conditions with other applications building the cache in the same |
575 | + # place. |
576 | + apt_pkg.config.set("Dir::Cache::pkgcache", "") |
577 | pkgcache = apt_pkg.Cache() |
578 | pkg = pkgcache[package] |
579 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
580 | |
581 | === added symlink 'hooks/cluster-relation-joined' |
582 | === target is u'glance_relations.py' |
583 | === modified file 'hooks/glance_contexts.py' |
584 | --- hooks/glance_contexts.py 2014-04-16 08:18:06 +0000 |
585 | +++ hooks/glance_contexts.py 2014-07-25 09:37:37 +0000 |
586 | @@ -78,5 +78,6 @@ |
587 | |
588 | |
589 | class LoggingConfigContext(OSContextGenerator): |
590 | + |
591 | def __call__(self): |
592 | return {'debug': config('debug'), 'verbose': config('verbose')} |
593 | |
594 | === modified file 'hooks/glance_relations.py' |
595 | --- hooks/glance_relations.py 2014-04-10 15:48:30 +0000 |
596 | +++ hooks/glance_relations.py 2014-07-25 09:37:37 +0000 |
597 | @@ -44,7 +44,9 @@ |
598 | ) |
599 | |
600 | from charmhelpers.contrib.hahelpers.cluster import ( |
601 | - canonical_url, eligible_leader) |
602 | + eligible_leader, |
603 | + get_hacluster_config |
604 | +) |
605 | |
606 | from charmhelpers.contrib.openstack.utils import ( |
607 | configure_installation_source, |
608 | @@ -54,6 +56,15 @@ |
609 | |
610 | from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring |
611 | from charmhelpers.payload.execd import execd_preinstall |
612 | +from charmhelpers.contrib.network.ip import ( |
613 | + get_address_in_network, |
614 | + get_netmask_for_address, |
615 | + get_iface_for_address, |
616 | +) |
617 | +from charmhelpers.contrib.openstack.ip import ( |
618 | + canonical_url, |
619 | + PUBLIC, INTERNAL, ADMIN |
620 | +) |
621 | |
622 | from subprocess import ( |
623 | check_call, |
624 | @@ -70,7 +81,7 @@ |
625 | execd_preinstall() |
626 | src = config('openstack-origin') |
627 | if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and |
628 | - src == 'distro'): |
629 | + src == 'distro'): |
630 | src = 'cloud:precise-folsom' |
631 | |
632 | configure_installation_source(src) |
633 | @@ -163,7 +174,8 @@ |
634 | return |
635 | |
636 | relation_data = { |
637 | - 'glance-api-server': canonical_url(CONFIGS) + ":9292" |
638 | + 'glance-api-server': |
639 | + "{}:9292".format(canonical_url(CONFIGS, INTERNAL)) |
640 | } |
641 | |
642 | juju_log("%s: image-service_joined: To peer glance-api-server=%s" % |
643 | @@ -222,13 +234,15 @@ |
644 | juju_log('Deferring keystone_joined() to service leader.') |
645 | return |
646 | |
647 | - url = canonical_url(CONFIGS) + ":9292" |
648 | + public_url = '{}:9292'.format(canonical_url(CONFIGS, PUBLIC)) |
649 | + internal_url = '{}:9292'.format(canonical_url(CONFIGS, INTERNAL)) |
650 | + admin_url = '{}:9292'.format(canonical_url(CONFIGS, ADMIN)) |
651 | relation_data = { |
652 | 'service': 'glance', |
653 | 'region': config('region'), |
654 | - 'public_url': url, |
655 | - 'admin_url': url, |
656 | - 'internal_url': url, } |
657 | + 'public_url': public_url, |
658 | + 'admin_url': admin_url, |
659 | + 'internal_url': internal_url, } |
660 | |
661 | relation_set(relation_id=relation_id, **relation_data) |
662 | |
663 | @@ -265,10 +279,19 @@ |
664 | open_port(9292) |
665 | configure_https() |
666 | |
667 | - # env_vars = {'OPENSTACK_PORT_MCASTPORT': config("ha-mcastport"), |
668 | - # 'OPENSTACK_SERVICE_API': "glance-api", |
669 | - # 'OPENSTACK_SERVICE_REGISTRY': "glance-registry"} |
670 | - # save_script_rc(**env_vars) |
671 | + # Pickup and changes due to network reference architecture |
672 | + # configuration |
673 | + [keystone_joined(rid) for rid in relation_ids('identity-service')] |
674 | + [image_service_joined(rid) for rid in relation_ids('image-service')] |
675 | + [cluster_joined(rid) for rid in relation_ids('cluster')] |
676 | + |
677 | + |
678 | +@hooks.hook('cluster-relation-joined') |
679 | +def cluster_joined(relation_id=None): |
680 | + address = get_address_in_network(config('os-internal-network'), |
681 | + unit_get('private-address')) |
682 | + relation_set(relation_id=relation_id, |
683 | + relation_settings={'private-address': address}) |
684 | |
685 | |
686 | @hooks.hook('cluster-relation-changed') |
687 | @@ -289,33 +312,44 @@ |
688 | |
689 | @hooks.hook('ha-relation-joined') |
690 | def ha_relation_joined(): |
691 | - corosync_bindiface = config("ha-bindiface") |
692 | - corosync_mcastport = config("ha-mcastport") |
693 | - vip = config("vip") |
694 | - vip_iface = config("vip_iface") |
695 | - vip_cidr = config("vip_cidr") |
696 | - |
697 | - # if vip and vip_iface and vip_cidr and \ |
698 | - # corosync_bindiface and corosync_mcastport: |
699 | + config = get_hacluster_config() |
700 | |
701 | resources = { |
702 | - 'res_glance_vip': 'ocf:heartbeat:IPaddr2', |
703 | - 'res_glance_haproxy': 'lsb:haproxy', } |
704 | + 'res_glance_haproxy': 'lsb:haproxy' |
705 | + } |
706 | |
707 | resource_params = { |
708 | - 'res_glance_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % |
709 | - (vip, vip_cidr, vip_iface), |
710 | - 'res_glance_haproxy': 'op monitor interval="5s"', } |
711 | + 'res_glance_haproxy': 'op monitor interval="5s"' |
712 | + } |
713 | + |
714 | + vip_group = [] |
715 | + for vip in config['vip'].split(): |
716 | + iface = get_iface_for_address(vip) |
717 | + if iface is not None: |
718 | + vip_key = 'res_glance_{}_vip'.format(iface) |
719 | + resources[vip_key] = 'ocf:heartbeat:IPaddr2' |
720 | + resource_params[vip_key] = ( |
721 | + 'params ip="{vip}" cidr_netmask="{netmask}"' |
722 | + ' nic="{iface}"'.format(vip=vip, |
723 | + iface=iface, |
724 | + netmask=get_netmask_for_address(vip)) |
725 | + ) |
726 | + vip_group.append(vip_key) |
727 | + |
728 | + if len(vip_group) > 1: |
729 | + relation_set(groups={'grp_glance_vips': ' '.join(vip_group)}) |
730 | |
731 | init_services = { |
732 | - 'res_glance_haproxy': 'haproxy', } |
733 | + 'res_glance_haproxy': 'haproxy', |
734 | + } |
735 | |
736 | clones = { |
737 | - 'cl_glance_haproxy': 'res_glance_haproxy', } |
738 | + 'cl_glance_haproxy': 'res_glance_haproxy', |
739 | + } |
740 | |
741 | relation_set(init_services=init_services, |
742 | - corosync_bindiface=corosync_bindiface, |
743 | - corosync_mcastport=corosync_mcastport, |
744 | + corosync_bindiface=config['ha-bindiface'], |
745 | + corosync_mcastport=config['ha-mcastport'], |
746 | resources=resources, |
747 | resource_params=resource_params, |
748 | clones=clones) |
749 | |
750 | === modified file 'hooks/glance_utils.py' |
751 | --- hooks/glance_utils.py 2014-04-12 17:10:59 +0000 |
752 | +++ hooks/glance_utils.py 2014-07-25 09:37:37 +0000 |
753 | @@ -42,7 +42,7 @@ |
754 | get_os_codename_package, |
755 | configure_installation_source) |
756 | |
757 | -CLUSTER_RES = "res_glance_vip" |
758 | +CLUSTER_RES = "grp_glance_vips" |
759 | |
760 | PACKAGES = [ |
761 | "apache2", "glance", "python-mysqldb", "python-swift", |
762 | |
763 | === removed file 'templates/haproxy.cfg' |
764 | --- templates/haproxy.cfg 2014-02-16 20:40:22 +0000 |
765 | +++ templates/haproxy.cfg 1970-01-01 00:00:00 +0000 |
766 | @@ -1,36 +0,0 @@ |
767 | -global |
768 | - log 127.0.0.1 local0 |
769 | - log 127.0.0.1 local1 notice |
770 | - maxconn 20000 |
771 | - user haproxy |
772 | - group haproxy |
773 | - spread-checks 0 |
774 | - |
775 | -defaults |
776 | - log global |
777 | - mode tcp |
778 | - option tcplog |
779 | - option dontlognull |
780 | - retries 3 |
781 | - timeout queue 1000 |
782 | - timeout connect 1000 |
783 | - timeout client 30000 |
784 | - timeout server 30000 |
785 | - |
786 | -listen stats :8888 |
787 | - mode http |
788 | - stats enable |
789 | - stats hide-version |
790 | - stats realm Haproxy\ Statistics |
791 | - stats uri / |
792 | - stats auth admin:password |
793 | - |
794 | -{% if units %} |
795 | -{% for service, ports in service_ports.iteritems() -%} |
796 | -listen {{ service }} 0.0.0.0:{{ ports[0] }} |
797 | - balance roundrobin |
798 | - {% for unit, address in units.iteritems() -%} |
799 | - server {{ unit }} {{ address }}:{{ ports[1] }} check |
800 | - {% endfor %} |
801 | -{% endfor %} |
802 | -{% endif %} |
803 | |
804 | === modified file 'tests/charmhelpers/contrib/amulet/deployment.py' |
805 | --- tests/charmhelpers/contrib/amulet/deployment.py 2014-07-10 21:43:51 +0000 |
806 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-07-25 09:37:37 +0000 |
807 | @@ -1,40 +1,35 @@ |
808 | import amulet |
809 | -import re |
810 | |
811 | |
812 | class AmuletDeployment(object): |
813 | """This class provides generic Amulet deployment and test runner |
814 | methods.""" |
815 | |
816 | - def __init__(self, series): |
817 | + def __init__(self, series=None): |
818 | """Initialize the deployment environment.""" |
819 | - self.series = series |
820 | - self.d = amulet.Deployment(series=self.series) |
821 | + self.series = None |
822 | |
823 | - def _get_charm_name(self, service_name): |
824 | - """Gets the charm name from the service name. Unique service names can |
825 | - be specified with a '-service#' suffix (e.g. mysql-service1).""" |
826 | - if re.match(r"^.*-service\d{1,3}$", service_name): |
827 | - charm_name = re.sub('\-service\d{1,3}$', '', service_name) |
828 | + if series: |
829 | + self.series = series |
830 | + self.d = amulet.Deployment(series=self.series) |
831 | else: |
832 | - charm_name = service_name |
833 | - return charm_name |
834 | + self.d = amulet.Deployment() |
835 | |
836 | def _add_services(self, this_service, other_services): |
837 | """Add services to the deployment where this_service is the local charm |
838 | that we're focused on testing and other_services are the other |
839 | charms that come from the charm store.""" |
840 | name, units = range(2) |
841 | - |
842 | - charm_name = self._get_charm_name(this_service[name]) |
843 | - self.d.add(this_service[name], |
844 | - units=this_service[units]) |
845 | + self.this_service = this_service[name] |
846 | + self.d.add(this_service[name], units=this_service[units]) |
847 | |
848 | for svc in other_services: |
849 | - charm_name = self._get_charm_name(svc[name]) |
850 | - self.d.add(svc[name], |
851 | - charm='cs:{}/{}'.format(self.series, charm_name), |
852 | - units=svc[units]) |
853 | + if self.series: |
854 | + self.d.add(svc[name], |
855 | + charm='cs:{}/{}'.format(self.series, svc[name]), |
856 | + units=svc[units]) |
857 | + else: |
858 | + self.d.add(svc[name], units=svc[units]) |
859 | |
860 | def _add_relations(self, relations): |
861 | """Add all of the relations for the services.""" |
862 | |
863 | === modified file 'tests/charmhelpers/contrib/amulet/utils.py' |
864 | --- tests/charmhelpers/contrib/amulet/utils.py 2014-07-10 21:43:51 +0000 |
865 | +++ tests/charmhelpers/contrib/amulet/utils.py 2014-07-25 09:37:37 +0000 |
866 | @@ -139,11 +139,11 @@ |
867 | return self._get_dir_mtime(sentry_unit, proc_dir) |
868 | |
869 | def service_restarted(self, sentry_unit, service, filename, |
870 | - pgrep_full=False, sleep_time=20): |
871 | + pgrep_full=False): |
872 | """Compare a service's start time vs a file's last modification time |
873 | (such as a config file for that service) to determine if the service |
874 | has been restarted.""" |
875 | - sleep(sleep_time) |
876 | + sleep(10) |
877 | if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ |
878 | self._get_file_mtime(sentry_unit, filename): |
879 | return True |
880 | |
881 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
882 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-10 21:43:51 +0000 |
883 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-07-25 09:37:37 +0000 |
884 | @@ -7,7 +7,7 @@ |
885 | """This class inherits from AmuletDeployment and has additional support |
886 | that is specifically for use by OpenStack charms.""" |
887 | |
888 | - def __init__(self, series, openstack=None, source=None): |
889 | + def __init__(self, series=None, openstack=None, source=None): |
890 | """Initialize the deployment environment.""" |
891 | super(OpenStackAmuletDeployment, self).__init__(series) |
892 | self.openstack = openstack |
893 | @@ -24,15 +24,13 @@ |
894 | |
895 | if self.openstack: |
896 | for svc in services: |
897 | - charm_name = self._get_charm_name(svc[name]) |
898 | - if charm_name not in use_source: |
899 | + if svc[name] not in use_source: |
900 | config = {'openstack-origin': self.openstack} |
901 | self.d.configure(svc[name], config) |
902 | |
903 | if self.source: |
904 | for svc in services: |
905 | - charm_name = self._get_charm_name(svc[name]) |
906 | - if charm_name in use_source: |
907 | + if svc[name] in use_source: |
908 | config = {'source': self.source} |
909 | self.d.configure(svc[name], config) |
910 | |
911 | |
912 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/utils.py' |
913 | --- tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-10 21:43:51 +0000 |
914 | +++ tests/charmhelpers/contrib/openstack/amulet/utils.py 2014-07-25 09:37:37 +0000 |
915 | @@ -177,40 +177,12 @@ |
916 | image = glance.images.create(name=image_name, is_public=True, |
917 | disk_format='qcow2', |
918 | container_format='bare', data=f) |
919 | - count = 1 |
920 | - status = image.status |
921 | - while status != 'active' and count < 10: |
922 | - time.sleep(3) |
923 | - image = glance.images.get(image.id) |
924 | - status = image.status |
925 | - self.log.debug('image status: {}'.format(status)) |
926 | - count += 1 |
927 | - |
928 | - if status != 'active': |
929 | - self.log.error('image creation timed out') |
930 | - return None |
931 | - |
932 | return image |
933 | |
934 | def delete_image(self, glance, image): |
935 | """Delete the specified image.""" |
936 | - num_before = len(list(glance.images.list())) |
937 | glance.images.delete(image) |
938 | |
939 | - count = 1 |
940 | - num_after = len(list(glance.images.list())) |
941 | - while num_after != (num_before - 1) and count < 10: |
942 | - time.sleep(3) |
943 | - num_after = len(list(glance.images.list())) |
944 | - self.log.debug('number of images: {}'.format(num_after)) |
945 | - count += 1 |
946 | - |
947 | - if num_after != (num_before - 1): |
948 | - self.log.error('image deletion timed out') |
949 | - return False |
950 | - |
951 | - return True |
952 | - |
953 | def create_instance(self, nova, image_name, instance_name, flavor): |
954 | """Create the specified instance.""" |
955 | image = nova.images.find(name=image_name) |
956 | @@ -227,27 +199,11 @@ |
957 | self.log.debug('instance status: {}'.format(status)) |
958 | count += 1 |
959 | |
960 | - if status != 'ACTIVE': |
961 | - self.log.error('instance creation timed out') |
962 | + if status == 'BUILD': |
963 | return None |
964 | |
965 | return instance |
966 | |
967 | def delete_instance(self, nova, instance): |
968 | """Delete the specified instance.""" |
969 | - num_before = len(list(nova.servers.list())) |
970 | nova.servers.delete(instance) |
971 | - |
972 | - count = 1 |
973 | - num_after = len(list(nova.servers.list())) |
974 | - while num_after != (num_before - 1) and count < 10: |
975 | - time.sleep(3) |
976 | - num_after = len(list(nova.servers.list())) |
977 | - self.log.debug('number of instances: {}'.format(num_after)) |
978 | - count += 1 |
979 | - |
980 | - if num_after != (num_before - 1): |
981 | - self.log.error('instance deletion timed out') |
982 | - return False |
983 | - |
984 | - return True |
985 | |
986 | === modified file 'unit_tests/test_glance_relations.py' |
987 | --- unit_tests/test_glance_relations.py 2014-03-31 11:38:11 +0000 |
988 | +++ unit_tests/test_glance_relations.py 2014-07-25 09:37:37 +0000 |
989 | @@ -54,7 +54,10 @@ |
990 | 'check_call', |
991 | 'execd_preinstall', |
992 | 'lsb_release', |
993 | - 'filter_installed_packages' |
994 | + 'filter_installed_packages', |
995 | + 'get_hacluster_config', |
996 | + 'get_netmask_for_address', |
997 | + 'get_iface_for_address' |
998 | ] |
999 | |
1000 | |
1001 | @@ -435,21 +438,23 @@ |
1002 | self.assertTrue(configs.write_all.called) |
1003 | |
1004 | def test_ha_relation_joined(self): |
1005 | - self.test_config.set('ha-bindiface', 'em0') |
1006 | - self.test_config.set('ha-mcastport', '8080') |
1007 | - self.test_config.set('vip', '10.10.10.10') |
1008 | - self.test_config.set('vip_iface', 'em1') |
1009 | - self.test_config.set('vip_cidr', '24') |
1010 | + self.get_hacluster_config.return_value = { |
1011 | + 'ha-bindiface': 'em0', |
1012 | + 'ha-mcastport': '8080', |
1013 | + 'vip': '10.10.10.10', |
1014 | + } |
1015 | + self.get_iface_for_address.return_value = 'eth1' |
1016 | + self.get_netmask_for_address.return_value = '255.255.0.0' |
1017 | relations.ha_relation_joined() |
1018 | args = { |
1019 | 'corosync_bindiface': 'em0', |
1020 | 'corosync_mcastport': '8080', |
1021 | 'init_services': {'res_glance_haproxy': 'haproxy'}, |
1022 | - 'resources': {'res_glance_vip': 'ocf:heartbeat:IPaddr2', |
1023 | + 'resources': {'res_glance_eth1_vip': 'ocf:heartbeat:IPaddr2', |
1024 | 'res_glance_haproxy': 'lsb:haproxy'}, |
1025 | 'resource_params': { |
1026 | - 'res_glance_vip': 'params ip="10.10.10.10"' |
1027 | - ' cidr_netmask="24" nic="em1"', |
1028 | + 'res_glance_eth1_vip': 'params ip="10.10.10.10"' |
1029 | + ' cidr_netmask="255.255.0.0" nic="eth1"', |
1030 | 'res_glance_haproxy': 'op monitor interval="5s"'}, |
1031 | 'clones': {'cl_glance_haproxy': 'res_glance_haproxy'} |
1032 | } |
1033 | |
1034 | === modified file 'unit_tests/test_utils.py' |
1035 | --- unit_tests/test_utils.py 2014-01-15 13:04:21 +0000 |
1036 | +++ unit_tests/test_utils.py 2014-07-25 09:37:37 +0000 |
1037 | @@ -80,9 +80,9 @@ |
1038 | return self.config |
1039 | |
1040 | def set(self, attr, value): |
1041 | - if attr not in self.config: |
1042 | - raise KeyError |
1043 | - self.config[attr] = value |
1044 | + if attr not in self.config: |
1045 | + raise KeyError |
1046 | + self.config[attr] = value |
1047 | |
1048 | |
1049 | class TestRelation(object): |
This has merge conflicts artifacts in the diff.