Merge lp:~james-page/charms/trusty/cinder/network-splits into lp:~openstack-charmers-archive/charms/trusty/cinder/next
- Trusty Tahr (14.04)
- network-splits
- Merge into next
Proposed by
James Page
Status: | Merged |
---|---|
Merged at revision: | 40 |
Proposed branch: | lp:~james-page/charms/trusty/cinder/network-splits |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/cinder/next |
Diff against target: |
690 lines (+382/-42) 15 files modified
.bzrignore (+2/-0) Makefile (+9/-5) charm-helpers.yaml (+1/-0) config.yaml (+28/-9) hooks/charmhelpers/contrib/hahelpers/cluster.py (+2/-2) hooks/charmhelpers/contrib/network/ip.py (+156/-0) hooks/charmhelpers/contrib/openstack/context.py (+28/-3) hooks/charmhelpers/contrib/openstack/ip.py (+75/-0) hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg (+6/-1) hooks/charmhelpers/core/host.py (+4/-0) hooks/cinder_contexts.py (+1/-0) hooks/cinder_hooks.py (+57/-16) hooks/cinder_utils.py (+2/-2) unit_tests/test_cinder_hooks.py (+1/-0) unit_tests/test_cluster_hooks.py (+10/-4) |
To merge this branch: | bzr merge lp:~james-page/charms/trusty/cinder/network-splits |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Liam Young (community) | Approve | ||
Review via email: mp+228138@code.launchpad.net |
This proposal supersedes a proposal from 2014-07-24.
Commit message
Description of the change
Add support for multiple network configuration.
To post a comment you must log in.
- 65. By James Page
-
Rebase
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added file '.bzrignore' |
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 |
3 | +++ .bzrignore 2014-07-25 08:12:05 +0000 |
4 | @@ -0,0 +1,2 @@ |
5 | +bin |
6 | +.coverage |
7 | |
8 | === modified file 'Makefile' |
9 | --- Makefile 2014-05-21 10:11:37 +0000 |
10 | +++ Makefile 2014-07-25 08:12:05 +0000 |
11 | @@ -6,11 +6,15 @@ |
12 | @charm proof |
13 | |
14 | test: |
15 | - @echo Starting tests... |
16 | - @$(PYTHON) /usr/bin/nosetests --nologcapture unit_tests |
17 | - |
18 | -sync: |
19 | - @charm-helper-sync -c charm-helpers.yaml |
20 | + @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests |
21 | + |
22 | +bin/charm_helpers_sync.py: |
23 | + @mkdir -p bin |
24 | + @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py \ |
25 | + > bin/charm_helpers_sync.py |
26 | + |
27 | +sync: bin/charm_helpers_sync.py |
28 | + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml |
29 | |
30 | publish: lint test |
31 | bzr push lp:charms/cinder |
32 | |
33 | === modified file 'charm-helpers.yaml' |
34 | --- charm-helpers.yaml 2014-07-03 12:52:54 +0000 |
35 | +++ charm-helpers.yaml 2014-07-25 08:12:05 +0000 |
36 | @@ -10,3 +10,4 @@ |
37 | - cluster |
38 | - fetch |
39 | - payload.execd |
40 | + - contrib.network.ip |
41 | |
42 | === modified file 'config.yaml' |
43 | --- config.yaml 2014-04-12 19:12:09 +0000 |
44 | +++ config.yaml 2014-07-25 08:12:05 +0000 |
45 | @@ -102,15 +102,11 @@ |
46 | # HA configuration settings |
47 | vip: |
48 | type: string |
49 | - description: "Virtual IP to use to front cinder API in ha configuration" |
50 | - vip_iface: |
51 | - type: string |
52 | - default: eth0 |
53 | - description: "Network Interface where to place the Virtual IP" |
54 | - vip_cidr: |
55 | - type: int |
56 | - default: 24 |
57 | - description: "Netmask that will be used for the Virtual IP" |
58 | + description: | |
59 | + Virtual IP(s) to use to front API services in HA configuration. |
60 | + . |
61 | + If multiple networks are being used, a VIP should be provided for each |
62 | + network, separated by spaces. |
63 | ha-bindiface: |
64 | type: string |
65 | default: eth0 |
66 | @@ -142,4 +138,27 @@ |
67 | config-flags: |
68 | type: string |
69 | description: Comma separated list of key=value config flags to be set in cinder.conf. |
70 | + # Network configuration options |
71 | + # by default all access is over 'private-address' |
72 | + os-admin-network: |
73 | + type: string |
74 | + description: | |
75 | + The IP address and netmask of the OpenStack Admin network (e.g., |
76 | + 192.168.0.0/24) |
77 | + . |
78 | + This network will be used for admin endpoints. |
79 | + os-internal-network: |
80 | + type: string |
81 | + description: | |
82 | + The IP address and netmask of the OpenStack Internal network (e.g., |
83 | + 192.168.0.0/24) |
84 | + . |
85 | + This network will be used for internal endpoints. |
86 | + os-public-network: |
87 | + type: string |
88 | + description: | |
89 | + The IP address and netmask of the OpenStack Public network (e.g., |
90 | + 192.168.0.0/24) |
91 | + . |
92 | + This network will be used for public endpoints. |
93 | |
94 | |
95 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
96 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-03 12:44:32 +0000 |
97 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-07-25 08:12:05 +0000 |
98 | @@ -146,12 +146,12 @@ |
99 | Obtains all relevant configuration from charm configuration required |
100 | for initiating a relation to hacluster: |
101 | |
102 | - ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr |
103 | + ha-bindiface, ha-mcastport, vip |
104 | |
105 | returns: dict: A dict containing settings keyed by setting name. |
106 | raises: HAIncompleteConfig if settings are missing. |
107 | ''' |
108 | - settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] |
109 | + settings = ['ha-bindiface', 'ha-mcastport', 'vip'] |
110 | conf = {} |
111 | for setting in settings: |
112 | conf[setting] = config_get(setting) |
113 | |
114 | === added directory 'hooks/charmhelpers/contrib/network' |
115 | === added file 'hooks/charmhelpers/contrib/network/__init__.py' |
116 | === added file 'hooks/charmhelpers/contrib/network/ip.py' |
117 | --- hooks/charmhelpers/contrib/network/ip.py 1970-01-01 00:00:00 +0000 |
118 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-07-25 08:12:05 +0000 |
119 | @@ -0,0 +1,156 @@ |
120 | +import sys |
121 | + |
122 | +from functools import partial |
123 | + |
124 | +from charmhelpers.fetch import apt_install |
125 | +from charmhelpers.core.hookenv import ( |
126 | + ERROR, log, |
127 | +) |
128 | + |
129 | +try: |
130 | + import netifaces |
131 | +except ImportError: |
132 | + apt_install('python-netifaces') |
133 | + import netifaces |
134 | + |
135 | +try: |
136 | + import netaddr |
137 | +except ImportError: |
138 | + apt_install('python-netaddr') |
139 | + import netaddr |
140 | + |
141 | + |
142 | +def _validate_cidr(network): |
143 | + try: |
144 | + netaddr.IPNetwork(network) |
145 | + except (netaddr.core.AddrFormatError, ValueError): |
146 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
147 | + network) |
148 | + |
149 | + |
150 | +def get_address_in_network(network, fallback=None, fatal=False): |
151 | + """ |
152 | + Get an IPv4 or IPv6 address within the network from the host. |
153 | + |
154 | + :param network (str): CIDR presentation format. For example, |
155 | + '192.168.1.0/24'. |
156 | + :param fallback (str): If no address is found, return fallback. |
157 | + :param fatal (boolean): If no address is found, fallback is not |
158 | + set and fatal is True then exit(1). |
159 | + |
160 | + """ |
161 | + |
162 | + def not_found_error_out(): |
163 | + log("No IP address found in network: %s" % network, |
164 | + level=ERROR) |
165 | + sys.exit(1) |
166 | + |
167 | + if network is None: |
168 | + if fallback is not None: |
169 | + return fallback |
170 | + else: |
171 | + if fatal: |
172 | + not_found_error_out() |
173 | + |
174 | + _validate_cidr(network) |
175 | + network = netaddr.IPNetwork(network) |
176 | + for iface in netifaces.interfaces(): |
177 | + addresses = netifaces.ifaddresses(iface) |
178 | + if network.version == 4 and netifaces.AF_INET in addresses: |
179 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
180 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
181 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
182 | + if cidr in network: |
183 | + return str(cidr.ip) |
184 | + if network.version == 6 and netifaces.AF_INET6 in addresses: |
185 | + for addr in addresses[netifaces.AF_INET6]: |
186 | + if not addr['addr'].startswith('fe80'): |
187 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
188 | + addr['netmask'])) |
189 | + if cidr in network: |
190 | + return str(cidr.ip) |
191 | + |
192 | + if fallback is not None: |
193 | + return fallback |
194 | + |
195 | + if fatal: |
196 | + not_found_error_out() |
197 | + |
198 | + return None |
199 | + |
200 | + |
201 | +def is_ipv6(address): |
202 | + '''Determine whether provided address is IPv6 or not''' |
203 | + try: |
204 | + address = netaddr.IPAddress(address) |
205 | + except netaddr.AddrFormatError: |
206 | + # probably a hostname - so not an address at all! |
207 | + return False |
208 | + else: |
209 | + return address.version == 6 |
210 | + |
211 | + |
212 | +def is_address_in_network(network, address): |
213 | + """ |
214 | + Determine whether the provided address is within a network range. |
215 | + |
216 | + :param network (str): CIDR presentation format. For example, |
217 | + '192.168.1.0/24'. |
218 | + :param address: An individual IPv4 or IPv6 address without a net |
219 | + mask or subnet prefix. For example, '192.168.1.1'. |
220 | + :returns boolean: Flag indicating whether address is in network. |
221 | + """ |
222 | + try: |
223 | + network = netaddr.IPNetwork(network) |
224 | + except (netaddr.core.AddrFormatError, ValueError): |
225 | + raise ValueError("Network (%s) is not in CIDR presentation format" % |
226 | + network) |
227 | + try: |
228 | + address = netaddr.IPAddress(address) |
229 | + except (netaddr.core.AddrFormatError, ValueError): |
230 | + raise ValueError("Address (%s) is not in correct presentation format" % |
231 | + address) |
232 | + if address in network: |
233 | + return True |
234 | + else: |
235 | + return False |
236 | + |
237 | + |
238 | +def _get_for_address(address, key): |
239 | + """Retrieve an attribute of or the physical interface that |
240 | + the IP address provided could be bound to. |
241 | + |
242 | + :param address (str): An individual IPv4 or IPv6 address without a net |
243 | + mask or subnet prefix. For example, '192.168.1.1'. |
244 | + :param key: 'iface' for the physical interface name or an attribute |
245 | + of the configured interface, for example 'netmask'. |
246 | + :returns str: Requested attribute or None if address is not bindable. |
247 | + """ |
248 | + address = netaddr.IPAddress(address) |
249 | + for iface in netifaces.interfaces(): |
250 | + addresses = netifaces.ifaddresses(iface) |
251 | + if address.version == 4 and netifaces.AF_INET in addresses: |
252 | + addr = addresses[netifaces.AF_INET][0]['addr'] |
253 | + netmask = addresses[netifaces.AF_INET][0]['netmask'] |
254 | + cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
255 | + if address in cidr: |
256 | + if key == 'iface': |
257 | + return iface |
258 | + else: |
259 | + return addresses[netifaces.AF_INET][0][key] |
260 | + if address.version == 6 and netifaces.AF_INET6 in addresses: |
261 | + for addr in addresses[netifaces.AF_INET6]: |
262 | + if not addr['addr'].startswith('fe80'): |
263 | + cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], |
264 | + addr['netmask'])) |
265 | + if address in cidr: |
266 | + if key == 'iface': |
267 | + return iface |
268 | + else: |
269 | + return addr[key] |
270 | + return None |
271 | + |
272 | + |
273 | +get_iface_for_address = partial(_get_for_address, key='iface') |
274 | + |
275 | +get_netmask_for_address = partial(_get_for_address, key='netmask') |
276 | |
277 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
278 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-07-03 12:44:32 +0000 |
279 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-07-25 08:12:05 +0000 |
280 | @@ -21,6 +21,7 @@ |
281 | relation_get, |
282 | relation_ids, |
283 | related_units, |
284 | + relation_set, |
285 | unit_get, |
286 | unit_private_ip, |
287 | ERROR, |
288 | @@ -43,6 +44,8 @@ |
289 | neutron_plugin_attribute, |
290 | ) |
291 | |
292 | +from charmhelpers.contrib.network.ip import get_address_in_network |
293 | + |
294 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
295 | |
296 | |
297 | @@ -135,8 +138,26 @@ |
298 | 'Missing required charm config options. ' |
299 | '(database name and user)') |
300 | raise OSContextError |
301 | + |
302 | ctxt = {} |
303 | |
304 | + # NOTE(jamespage) if mysql charm provides a network upon which |
305 | + # access to the database should be made, reconfigure relation |
306 | + # with the service units local address and defer execution |
307 | + access_network = relation_get('access-network') |
308 | + if access_network is not None: |
309 | + if self.relation_prefix is not None: |
310 | + hostname_key = "{}_hostname".format(self.relation_prefix) |
311 | + else: |
312 | + hostname_key = "hostname" |
313 | + access_hostname = get_address_in_network(access_network, |
314 | + unit_get('private-address')) |
315 | + set_hostname = relation_get(attribute=hostname_key, |
316 | + unit=local_unit()) |
317 | + if set_hostname != access_hostname: |
318 | + relation_set(relation_settings={hostname_key: access_hostname}) |
319 | + return ctxt # Defer any further hook execution for now.... |
320 | + |
321 | password_setting = 'password' |
322 | if self.relation_prefix: |
323 | password_setting = self.relation_prefix + '_password' |
324 | @@ -341,10 +362,12 @@ |
325 | use_syslog = str(config('use-syslog')).lower() |
326 | for rid in relation_ids('ceph'): |
327 | for unit in related_units(rid): |
328 | - mon_hosts.append(relation_get('private-address', rid=rid, |
329 | - unit=unit)) |
330 | auth = relation_get('auth', rid=rid, unit=unit) |
331 | key = relation_get('key', rid=rid, unit=unit) |
332 | + ceph_addr = \ |
333 | + relation_get('ceph-public-address', rid=rid, unit=unit) or \ |
334 | + relation_get('private-address', rid=rid, unit=unit) |
335 | + mon_hosts.append(ceph_addr) |
336 | |
337 | ctxt = { |
338 | 'mon_hosts': ' '.join(mon_hosts), |
339 | @@ -378,7 +401,9 @@ |
340 | |
341 | cluster_hosts = {} |
342 | l_unit = local_unit().replace('/', '-') |
343 | - cluster_hosts[l_unit] = unit_get('private-address') |
344 | + cluster_hosts[l_unit] = \ |
345 | + get_address_in_network(config('os-internal-network'), |
346 | + unit_get('private-address')) |
347 | |
348 | for rid in relation_ids('cluster'): |
349 | for unit in related_units(rid): |
350 | |
351 | === added file 'hooks/charmhelpers/contrib/openstack/ip.py' |
352 | --- hooks/charmhelpers/contrib/openstack/ip.py 1970-01-01 00:00:00 +0000 |
353 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-07-25 08:12:05 +0000 |
354 | @@ -0,0 +1,75 @@ |
355 | +from charmhelpers.core.hookenv import ( |
356 | + config, |
357 | + unit_get, |
358 | +) |
359 | + |
360 | +from charmhelpers.contrib.network.ip import ( |
361 | + get_address_in_network, |
362 | + is_address_in_network, |
363 | + is_ipv6, |
364 | +) |
365 | + |
366 | +from charmhelpers.contrib.hahelpers.cluster import is_clustered |
367 | + |
368 | +PUBLIC = 'public' |
369 | +INTERNAL = 'int' |
370 | +ADMIN = 'admin' |
371 | + |
372 | +_address_map = { |
373 | + PUBLIC: { |
374 | + 'config': 'os-public-network', |
375 | + 'fallback': 'public-address' |
376 | + }, |
377 | + INTERNAL: { |
378 | + 'config': 'os-internal-network', |
379 | + 'fallback': 'private-address' |
380 | + }, |
381 | + ADMIN: { |
382 | + 'config': 'os-admin-network', |
383 | + 'fallback': 'private-address' |
384 | + } |
385 | +} |
386 | + |
387 | + |
388 | +def canonical_url(configs, endpoint_type=PUBLIC): |
389 | + ''' |
390 | + Returns the correct HTTP URL to this host given the state of HTTPS |
391 | + configuration, hacluster and charm configuration. |
392 | + |
393 | + :configs OSTemplateRenderer: A config tempating object to inspect for |
394 | + a complete https context. |
395 | + :endpoint_type str: The endpoint type to resolve. |
396 | + |
397 | + :returns str: Base URL for services on the current service unit. |
398 | + ''' |
399 | + scheme = 'http' |
400 | + if 'https' in configs.complete_contexts(): |
401 | + scheme = 'https' |
402 | + address = resolve_address(endpoint_type) |
403 | + if is_ipv6(address): |
404 | + address = "[{}]".format(address) |
405 | + return '%s://%s' % (scheme, address) |
406 | + |
407 | + |
408 | +def resolve_address(endpoint_type=PUBLIC): |
409 | + resolved_address = None |
410 | + if is_clustered(): |
411 | + if config(_address_map[endpoint_type]['config']) is None: |
412 | + # Assume vip is simple and pass back directly |
413 | + resolved_address = config('vip') |
414 | + else: |
415 | + for vip in config('vip').split(): |
416 | + if is_address_in_network( |
417 | + config(_address_map[endpoint_type]['config']), |
418 | + vip): |
419 | + resolved_address = vip |
420 | + else: |
421 | + resolved_address = get_address_in_network( |
422 | + config(_address_map[endpoint_type]['config']), |
423 | + unit_get(_address_map[endpoint_type]['fallback']) |
424 | + ) |
425 | + if resolved_address is None: |
426 | + raise ValueError('Unable to resolve a suitable IP address' |
427 | + ' based on charm state and configuration') |
428 | + else: |
429 | + return resolved_address |
430 | |
431 | === modified file 'hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg' |
432 | --- hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-04-02 07:58:11 +0000 |
433 | +++ hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg 2014-07-25 08:12:05 +0000 |
434 | @@ -27,7 +27,12 @@ |
435 | |
436 | {% if units -%} |
437 | {% for service, ports in service_ports.iteritems() -%} |
438 | -listen {{ service }} 0.0.0.0:{{ ports[0] }} |
439 | +listen {{ service }}_ipv4 0.0.0.0:{{ ports[0] }} |
440 | + balance roundrobin |
441 | + {% for unit, address in units.iteritems() -%} |
442 | + server {{ unit }} {{ address }}:{{ ports[1] }} check |
443 | + {% endfor %} |
444 | +listen {{ service }}_ipv6 :::{{ ports[0] }} |
445 | balance roundrobin |
446 | {% for unit, address in units.iteritems() -%} |
447 | server {{ unit }} {{ address }}:{{ ports[1] }} check |
448 | |
449 | === modified file 'hooks/charmhelpers/core/host.py' |
450 | --- hooks/charmhelpers/core/host.py 2014-07-03 12:44:32 +0000 |
451 | +++ hooks/charmhelpers/core/host.py 2014-07-25 08:12:05 +0000 |
452 | @@ -322,6 +322,10 @@ |
453 | import apt_pkg |
454 | if not pkgcache: |
455 | apt_pkg.init() |
456 | + # Force Apt to build its cache in memory. That way we avoid race |
457 | + # conditions with other applications building the cache in the same |
458 | + # place. |
459 | + apt_pkg.config.set("Dir::Cache::pkgcache", "") |
460 | pkgcache = apt_pkg.Cache() |
461 | pkg = pkgcache[package] |
462 | return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) |
463 | |
464 | === modified file 'hooks/cinder_contexts.py' |
465 | --- hooks/cinder_contexts.py 2014-04-12 19:12:09 +0000 |
466 | +++ hooks/cinder_contexts.py 2014-07-25 08:12:05 +0000 |
467 | @@ -105,5 +105,6 @@ |
468 | |
469 | |
470 | class LoggingConfigContext(OSContextGenerator): |
471 | + |
472 | def __call__(self): |
473 | return {'debug': config('debug'), 'verbose': config('verbose')} |
474 | |
475 | === modified file 'hooks/cinder_hooks.py' |
476 | --- hooks/cinder_hooks.py 2014-04-09 09:19:50 +0000 |
477 | +++ hooks/cinder_hooks.py 2014-07-25 08:12:05 +0000 |
478 | @@ -34,7 +34,7 @@ |
479 | service_name, |
480 | unit_get, |
481 | log, |
482 | - ERROR |
483 | + ERROR, |
484 | ) |
485 | |
486 | from charmhelpers.fetch import apt_install, apt_update |
487 | @@ -46,13 +46,21 @@ |
488 | from charmhelpers.contrib.storage.linux.ceph import ensure_ceph_keyring |
489 | |
490 | from charmhelpers.contrib.hahelpers.cluster import ( |
491 | - canonical_url, |
492 | eligible_leader, |
493 | is_leader, |
494 | get_hacluster_config, |
495 | ) |
496 | |
497 | from charmhelpers.payload.execd import execd_preinstall |
498 | +from charmhelpers.contrib.network.ip import ( |
499 | + get_iface_for_address, |
500 | + get_netmask_for_address, |
501 | + get_address_in_network |
502 | +) |
503 | +from charmhelpers.contrib.openstack.ip import ( |
504 | + canonical_url, |
505 | + PUBLIC, INTERNAL, ADMIN |
506 | +) |
507 | |
508 | hooks = Hooks() |
509 | |
510 | @@ -65,7 +73,7 @@ |
511 | conf = config() |
512 | src = conf['openstack-origin'] |
513 | if (lsb_release()['DISTRIB_CODENAME'] == 'precise' and |
514 | - src == 'distro'): |
515 | + src == 'distro'): |
516 | src = 'cloud:precise-folsom' |
517 | configure_installation_source(src) |
518 | apt_update() |
519 | @@ -93,6 +101,9 @@ |
520 | CONFIGS.write_all() |
521 | configure_https() |
522 | |
523 | + for rid in relation_ids('cluster'): |
524 | + cluster_joined(relation_id=rid) |
525 | + |
526 | |
527 | @hooks.hook('shared-db-relation-joined') |
528 | def db_joined(): |
529 | @@ -175,17 +186,24 @@ |
530 | if not eligible_leader(CLUSTER_RES): |
531 | return |
532 | |
533 | - conf = config() |
534 | - |
535 | - port = conf['api-listening-port'] |
536 | - url = canonical_url(CONFIGS) + ':%s/v1/$(tenant_id)s' % port |
537 | - |
538 | + public_url = '{}:{}/v1/$(tenant_id)s'.format( |
539 | + canonical_url(CONFIGS, PUBLIC), |
540 | + config('api-listening-port') |
541 | + ) |
542 | + internal_url = '{}:{}/v1/$(tenant_id)s'.format( |
543 | + canonical_url(CONFIGS, INTERNAL), |
544 | + config('api-listening-port') |
545 | + ) |
546 | + admin_url = '{}:{}/v1/$(tenant_id)s'.format( |
547 | + canonical_url(CONFIGS, ADMIN), |
548 | + config('api-listening-port') |
549 | + ) |
550 | settings = { |
551 | - 'region': conf['region'], |
552 | + 'region': config('region'), |
553 | 'service': 'cinder', |
554 | - 'public_url': url, |
555 | - 'internal_url': url, |
556 | - 'admin_url': url, |
557 | + 'public_url': public_url, |
558 | + 'internal_url': internal_url, |
559 | + 'admin_url': admin_url, |
560 | } |
561 | relation_set(relation_id=rid, **settings) |
562 | |
563 | @@ -228,6 +246,14 @@ |
564 | replicas=_config['ceph-osd-replication-count']) |
565 | |
566 | |
567 | +@hooks.hook('cluster-relation-joined') |
568 | +def cluster_joined(relation_id=None): |
569 | + address = get_address_in_network(config('os-internal-network'), |
570 | + unit_get('private-address')) |
571 | + relation_set(relation_id=relation_id, |
572 | + relation_settings={'private-address': address}) |
573 | + |
574 | + |
575 | @hooks.hook('cluster-relation-changed', |
576 | 'cluster-relation-departed') |
577 | @restart_on_change(restart_map(), stopstart=True) |
578 | @@ -238,17 +264,32 @@ |
579 | @hooks.hook('ha-relation-joined') |
580 | def ha_joined(): |
581 | config = get_hacluster_config() |
582 | + |
583 | resources = { |
584 | - 'res_cinder_vip': 'ocf:heartbeat:IPaddr2', |
585 | 'res_cinder_haproxy': 'lsb:haproxy' |
586 | } |
587 | |
588 | - vip_params = 'params ip="%s" cidr_netmask="%s" nic="%s"' % \ |
589 | - (config['vip'], config['vip_cidr'], config['vip_iface']) |
590 | resource_params = { |
591 | - 'res_cinder_vip': vip_params, |
592 | 'res_cinder_haproxy': 'op monitor interval="5s"' |
593 | } |
594 | + |
595 | + vip_group = [] |
596 | + for vip in config['vip'].split(): |
597 | + iface = get_iface_for_address(vip) |
598 | + if iface is not None: |
599 | + vip_key = 'res_cinder_{}_vip'.format(iface) |
600 | + resources[vip_key] = 'ocf:heartbeat:IPaddr2' |
601 | + resource_params[vip_key] = ( |
602 | + 'params ip="{vip}" cidr_netmask="{netmask}"' |
603 | + ' nic="{iface}"'.format(vip=vip, |
604 | + iface=iface, |
605 | + netmask=get_netmask_for_address(vip)) |
606 | + ) |
607 | + vip_group.append(vip_key) |
608 | + |
609 | + if len(vip_group) > 1: |
610 | + relation_set(groups={'grp_cinder_vips': ' '.join(vip_group)}) |
611 | + |
612 | init_services = { |
613 | 'res_cinder_haproxy': 'haproxy' |
614 | } |
615 | |
616 | === modified file 'hooks/cinder_utils.py' |
617 | --- hooks/cinder_utils.py 2014-05-19 12:16:56 +0000 |
618 | +++ hooks/cinder_utils.py 2014-07-25 08:12:05 +0000 |
619 | @@ -86,7 +86,7 @@ |
620 | DEFAULT_LOOPBACK_SIZE = '5G' |
621 | |
622 | # Cluster resource used to determine leadership when hacluster'd |
623 | -CLUSTER_RES = 'res_cinder_vip' |
624 | +CLUSTER_RES = 'grp_cinder_vips' |
625 | |
626 | |
627 | class CinderCharmError(Exception): |
628 | @@ -391,7 +391,7 @@ |
629 | with open('/etc/environment', 'a') as out: |
630 | out.write('CEPH_ARGS="--id %s"\n' % service) |
631 | with open('/etc/init/cinder-volume.override', 'w') as out: |
632 | - out.write('env CEPH_ARGS="--id %s"\n' % service) |
633 | + out.write('env CEPH_ARGS="--id %s"\n' % service) |
634 | |
635 | |
636 | def do_openstack_upgrade(configs): |
637 | |
638 | === modified file 'unit_tests/test_cinder_hooks.py' |
639 | --- unit_tests/test_cinder_hooks.py 2014-04-07 13:09:04 +0000 |
640 | +++ unit_tests/test_cinder_hooks.py 2014-07-25 08:12:05 +0000 |
641 | @@ -300,6 +300,7 @@ |
642 | def test_identity_service_joined(self): |
643 | 'It properly requests unclustered endpoint via identity-service' |
644 | self.unit_get.return_value = 'cindernode1' |
645 | + self.config.side_effect = self.test_config.get |
646 | self.canonical_url.return_value = 'http://cindernode1' |
647 | hooks.hooks.execute(['hooks/identity-service-relation-joined']) |
648 | expected = { |
649 | |
650 | === modified file 'unit_tests/test_cluster_hooks.py' |
651 | --- unit_tests/test_cluster_hooks.py 2014-05-21 09:57:23 +0000 |
652 | +++ unit_tests/test_cluster_hooks.py 2014-07-25 08:12:05 +0000 |
653 | @@ -51,7 +51,10 @@ |
654 | # charmhelpers.contrib.hahelpers.cluster_utils |
655 | 'eligible_leader', |
656 | 'get_hacluster_config', |
657 | - 'is_leader' |
658 | + 'is_leader', |
659 | + # charmhelpers.contrib.network.ip |
660 | + 'get_iface_for_address', |
661 | + 'get_netmask_for_address' |
662 | ] |
663 | |
664 | |
665 | @@ -96,19 +99,22 @@ |
666 | 'vip_cidr': '19', |
667 | } |
668 | self.get_hacluster_config.return_value = conf |
669 | + self.get_iface_for_address.return_value = 'eth101' |
670 | + self.get_netmask_for_address.return_value = '255.255.224.0' |
671 | hooks.hooks.execute(['hooks/ha-relation-joined']) |
672 | ex_args = { |
673 | 'corosync_mcastport': '37373', |
674 | 'init_services': {'res_cinder_haproxy': 'haproxy'}, |
675 | 'resource_params': { |
676 | - 'res_cinder_vip': |
677 | - 'params ip="192.168.25.163" cidr_netmask="19" nic="eth101"', |
678 | + 'res_cinder_eth101_vip': |
679 | + 'params ip="192.168.25.163" cidr_netmask="255.255.224.0"' |
680 | + ' nic="eth101"', |
681 | 'res_cinder_haproxy': 'op monitor interval="5s"' |
682 | }, |
683 | 'corosync_bindiface': 'eth100', |
684 | 'clones': {'cl_cinder_haproxy': 'res_cinder_haproxy'}, |
685 | 'resources': { |
686 | - 'res_cinder_vip': 'ocf:heartbeat:IPaddr2', |
687 | + 'res_cinder_eth101_vip': 'ocf:heartbeat:IPaddr2', |
688 | 'res_cinder_haproxy': 'lsb:haproxy' |
689 | } |
690 | } |
LGTM