Merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-updates into lp:~openstack-charmers/charms/trusty/quantum-gateway/next
- Trusty Tahr (14.04)
- amulet-updates
- Merge into next
Proposed by
Corey Bryant
Status: | Merged |
---|---|
Merged at revision: | 69 |
Proposed branch: | lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-updates |
Merge into: | lp:~openstack-charmers/charms/trusty/quantum-gateway/next |
Diff against target: |
1141 lines (+490/-186) 14 files modified
Makefile (+2/-1) hooks/charmhelpers/contrib/hahelpers/apache.py (+10/-3) hooks/charmhelpers/contrib/hahelpers/cluster.py (+1/-2) hooks/charmhelpers/contrib/network/ip.py (+101/-14) hooks/charmhelpers/contrib/openstack/amulet/deployment.py (+30/-33) hooks/charmhelpers/contrib/openstack/context.py (+201/-71) hooks/charmhelpers/contrib/openstack/ip.py (+1/-1) hooks/charmhelpers/contrib/openstack/utils.py (+28/-1) hooks/charmhelpers/core/sysctl.py (+34/-0) tests/00-setup (+5/-4) tests/README (+6/-0) tests/basic_deployment.py (+26/-13) tests/charmhelpers/contrib/amulet/deployment.py (+15/-10) tests/charmhelpers/contrib/openstack/amulet/deployment.py (+30/-33) |
To merge this branch: | bzr merge lp:~corey.bryant/charms/trusty/quantum-gateway/amulet-updates |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
OpenStack Charmers | Pending | ||
Review via email:
|
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Makefile' |
2 | --- Makefile 2014-07-29 07:46:01 +0000 |
3 | +++ Makefile 2014-10-07 21:21:43 +0000 |
4 | @@ -23,7 +23,8 @@ |
5 | # coreycb note: The -v should only be temporary until Amulet sends |
6 | # raise_status() messages to stderr: |
7 | # https://bugs.launchpad.net/amulet/+bug/1320357 |
8 | - @juju test -v -p AMULET_HTTP_PROXY |
9 | + @juju test -v -p AMULET_HTTP_PROXY --timeout 900 \ |
10 | + 00-setup 14-basic-precise-icehouse 15-basic-trusty-icehouse |
11 | |
12 | publish: lint unit_test |
13 | bzr push lp:charms/quantum-gateway |
14 | |
15 | === modified file 'hooks/charmhelpers/contrib/hahelpers/apache.py' |
16 | --- hooks/charmhelpers/contrib/hahelpers/apache.py 2014-03-27 11:20:28 +0000 |
17 | +++ hooks/charmhelpers/contrib/hahelpers/apache.py 2014-10-07 21:21:43 +0000 |
18 | @@ -20,20 +20,27 @@ |
19 | ) |
20 | |
21 | |
22 | -def get_cert(): |
23 | +def get_cert(cn=None): |
24 | + # TODO: deal with multiple https endpoints via charm config |
25 | cert = config_get('ssl_cert') |
26 | key = config_get('ssl_key') |
27 | if not (cert and key): |
28 | log("Inspecting identity-service relations for SSL certificate.", |
29 | level=INFO) |
30 | cert = key = None |
31 | + if cn: |
32 | + ssl_cert_attr = 'ssl_cert_{}'.format(cn) |
33 | + ssl_key_attr = 'ssl_key_{}'.format(cn) |
34 | + else: |
35 | + ssl_cert_attr = 'ssl_cert' |
36 | + ssl_key_attr = 'ssl_key' |
37 | for r_id in relation_ids('identity-service'): |
38 | for unit in relation_list(r_id): |
39 | if not cert: |
40 | - cert = relation_get('ssl_cert', |
41 | + cert = relation_get(ssl_cert_attr, |
42 | rid=r_id, unit=unit) |
43 | if not key: |
44 | - key = relation_get('ssl_key', |
45 | + key = relation_get(ssl_key_attr, |
46 | rid=r_id, unit=unit) |
47 | return (cert, key) |
48 | |
49 | |
50 | === modified file 'hooks/charmhelpers/contrib/hahelpers/cluster.py' |
51 | --- hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-08-13 13:12:47 +0000 |
52 | +++ hooks/charmhelpers/contrib/hahelpers/cluster.py 2014-10-07 21:21:43 +0000 |
53 | @@ -139,10 +139,9 @@ |
54 | return True |
55 | for r_id in relation_ids('identity-service'): |
56 | for unit in relation_list(r_id): |
57 | + # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN |
58 | rel_state = [ |
59 | relation_get('https_keystone', rid=r_id, unit=unit), |
60 | - relation_get('ssl_cert', rid=r_id, unit=unit), |
61 | - relation_get('ssl_key', rid=r_id, unit=unit), |
62 | relation_get('ca_cert', rid=r_id, unit=unit), |
63 | ] |
64 | # NOTE: works around (LP: #1203241) |
65 | |
66 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' |
67 | --- hooks/charmhelpers/contrib/network/ip.py 2014-09-19 10:56:29 +0000 |
68 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-10-07 21:21:43 +0000 |
69 | @@ -1,11 +1,16 @@ |
70 | import glob |
71 | +import re |
72 | +import subprocess |
73 | import sys |
74 | |
75 | from functools import partial |
76 | |
77 | +from charmhelpers.core.hookenv import unit_get |
78 | from charmhelpers.fetch import apt_install |
79 | from charmhelpers.core.hookenv import ( |
80 | - ERROR, log, |
81 | + WARNING, |
82 | + ERROR, |
83 | + log |
84 | ) |
85 | |
86 | try: |
87 | @@ -52,6 +57,8 @@ |
88 | else: |
89 | if fatal: |
90 | not_found_error_out() |
91 | + else: |
92 | + return None |
93 | |
94 | _validate_cidr(network) |
95 | network = netaddr.IPNetwork(network) |
96 | @@ -164,13 +171,14 @@ |
97 | if is_ipv6(address): |
98 | address = "[%s]" % address |
99 | else: |
100 | - log("Not an valid ipv6 address: %s" % address, |
101 | - level=ERROR) |
102 | + log("Not a valid ipv6 address: %s" % address, level=WARNING) |
103 | address = None |
104 | + |
105 | return address |
106 | |
107 | |
108 | -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, fatal=True, exc_list=None): |
109 | +def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, |
110 | + fatal=True, exc_list=None): |
111 | """ |
112 | Return the assigned IP address for a given interface, if any, or []. |
113 | """ |
114 | @@ -210,26 +218,105 @@ |
115 | if 'addr' in entry and entry['addr'] not in exc_list: |
116 | addresses.append(entry['addr']) |
117 | if fatal and not addresses: |
118 | - raise Exception("Interface '%s' doesn't have any %s addresses." % (iface, inet_type)) |
119 | + raise Exception("Interface '%s' doesn't have any %s addresses." % |
120 | + (iface, inet_type)) |
121 | return addresses |
122 | |
123 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') |
124 | |
125 | |
126 | -def get_ipv6_addr(iface='eth0', inc_aliases=False, fatal=True, exc_list=None): |
127 | +def get_iface_from_addr(addr): |
128 | + """Work out on which interface the provided address is configured.""" |
129 | + for iface in netifaces.interfaces(): |
130 | + addresses = netifaces.ifaddresses(iface) |
131 | + for inet_type in addresses: |
132 | + for _addr in addresses[inet_type]: |
133 | + _addr = _addr['addr'] |
134 | + # link local |
135 | + ll_key = re.compile("(.+)%.*") |
136 | + raw = re.match(ll_key, _addr) |
137 | + if raw: |
138 | + _addr = raw.group(1) |
139 | + if _addr == addr: |
140 | + log("Address '%s' is configured on iface '%s'" % |
141 | + (addr, iface)) |
142 | + return iface |
143 | + |
144 | + msg = "Unable to infer net iface on which '%s' is configured" % (addr) |
145 | + raise Exception(msg) |
146 | + |
147 | + |
148 | +def sniff_iface(f): |
149 | + """If no iface provided, inject net iface inferred from unit private |
150 | + address. |
151 | """ |
152 | - Return the assigned IPv6 address for a given interface, if any, or []. |
153 | + def iface_sniffer(*args, **kwargs): |
154 | + if not kwargs.get('iface', None): |
155 | + kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) |
156 | + |
157 | + return f(*args, **kwargs) |
158 | + |
159 | + return iface_sniffer |
160 | + |
161 | + |
162 | +@sniff_iface |
163 | +def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, |
164 | + dynamic_only=True): |
165 | + """Get assigned IPv6 address for a given interface. |
166 | + |
167 | + Returns list of addresses found. If no address found, returns empty list. |
168 | + |
169 | + If iface is None, we infer the current primary interface by doing a reverse |
170 | + lookup on the unit private-address. |
171 | + |
172 | + We currently only support scope global IPv6 addresses i.e. non-temporary |
173 | + addresses. If no global IPv6 address is found, return the first one found |
174 | + in the ipv6 address list. |
175 | """ |
176 | addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', |
177 | inc_aliases=inc_aliases, fatal=fatal, |
178 | exc_list=exc_list) |
179 | - remotly_addressable = [] |
180 | - for address in addresses: |
181 | - if not address.startswith('fe80'): |
182 | - remotly_addressable.append(address) |
183 | - if fatal and not remotly_addressable: |
184 | - raise Exception("Interface '%s' doesn't have global ipv6 address." % iface) |
185 | - return remotly_addressable |
186 | + |
187 | + if addresses: |
188 | + global_addrs = [] |
189 | + for addr in addresses: |
190 | + key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") |
191 | + m = re.match(key_scope_link_local, addr) |
192 | + if m: |
193 | + eui_64_mac = m.group(1) |
194 | + iface = m.group(2) |
195 | + else: |
196 | + global_addrs.append(addr) |
197 | + |
198 | + if global_addrs: |
199 | + # Make sure any found global addresses are not temporary |
200 | + cmd = ['ip', 'addr', 'show', iface] |
201 | + out = subprocess.check_output(cmd) |
202 | + if dynamic_only: |
203 | + key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") |
204 | + else: |
205 | + key = re.compile("inet6 (.+)/[0-9]+ scope global.*") |
206 | + |
207 | + addrs = [] |
208 | + for line in out.split('\n'): |
209 | + line = line.strip() |
210 | + m = re.match(key, line) |
211 | + if m and 'temporary' not in line: |
212 | + # Return the first valid address we find |
213 | + for addr in global_addrs: |
214 | + if m.group(1) == addr: |
215 | + if not dynamic_only or \ |
216 | + m.group(1).endswith(eui_64_mac): |
217 | + addrs.append(addr) |
218 | + |
219 | + if addrs: |
220 | + return addrs |
221 | + |
222 | + if fatal: |
223 | + raise Exception("Interface '%s' doesn't have a scope global " |
224 | + "non-temporary ipv6 address." % iface) |
225 | + |
226 | + return [] |
227 | |
228 | |
229 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): |
230 | |
231 | === modified file 'hooks/charmhelpers/contrib/openstack/amulet/deployment.py' |
232 | --- hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-25 15:37:05 +0000 |
233 | +++ hooks/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-07 21:21:43 +0000 |
234 | @@ -1,6 +1,3 @@ |
235 | -from bzrlib.branch import Branch |
236 | -import os |
237 | -import re |
238 | from charmhelpers.contrib.amulet.deployment import ( |
239 | AmuletDeployment |
240 | ) |
241 | @@ -13,62 +10,62 @@ |
242 | that is specifically for use by OpenStack charms. |
243 | """ |
244 | |
245 | - def __init__(self, series=None, openstack=None, source=None): |
246 | + def __init__(self, series=None, openstack=None, source=None, stable=True): |
247 | """Initialize the deployment environment.""" |
248 | super(OpenStackAmuletDeployment, self).__init__(series) |
249 | self.openstack = openstack |
250 | self.source = source |
251 | - |
252 | - def _is_dev_branch(self): |
253 | - """Determine if branch being tested is a dev (i.e. next) branch.""" |
254 | - branch = Branch.open(os.getcwd()) |
255 | - parent = branch.get_parent() |
256 | - pattern = re.compile("^.*/next/$") |
257 | - if (pattern.match(parent)): |
258 | - return True |
259 | - else: |
260 | - return False |
261 | + self.stable = stable |
262 | + # Note(coreycb): this needs to be changed when new next branches come |
263 | + # out. |
264 | + self.current_next = "trusty" |
265 | |
266 | def _determine_branch_locations(self, other_services): |
267 | """Determine the branch locations for the other services. |
268 | |
269 | - If the branch being tested is a dev branch, then determine the |
270 | - development branch locations for the other services. Otherwise, |
271 | - the default charm store branches will be used.""" |
272 | - name = 0 |
273 | - if self._is_dev_branch(): |
274 | - updated_services = [] |
275 | - for svc in other_services: |
276 | - if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: |
277 | - location = 'lp:charms/{}'.format(svc[name]) |
278 | + Determine if the local branch being tested is derived from its |
279 | + stable or next (dev) branch, and based on this, use the corresonding |
280 | + stable or next branches for the other_services.""" |
281 | + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] |
282 | + |
283 | + if self.stable: |
284 | + for svc in other_services: |
285 | + temp = 'lp:charms/{}' |
286 | + svc['location'] = temp.format(svc['name']) |
287 | + else: |
288 | + for svc in other_services: |
289 | + if svc['name'] in base_charms: |
290 | + temp = 'lp:charms/{}' |
291 | + svc['location'] = temp.format(svc['name']) |
292 | else: |
293 | - temp = 'lp:~openstack-charmers/charms/trusty/{}/next' |
294 | - location = temp.format(svc[name]) |
295 | - updated_services.append(svc + (location,)) |
296 | - other_services = updated_services |
297 | + temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
298 | + svc['location'] = temp.format(self.current_next, |
299 | + svc['name']) |
300 | return other_services |
301 | |
302 | def _add_services(self, this_service, other_services): |
303 | """Add services to the deployment and set openstack-origin/source.""" |
304 | - name = 0 |
305 | other_services = self._determine_branch_locations(other_services) |
306 | + |
307 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
308 | other_services) |
309 | + |
310 | services = other_services |
311 | services.append(this_service) |
312 | - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] |
313 | + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
314 | + 'ceph-osd', 'ceph-radosgw'] |
315 | |
316 | if self.openstack: |
317 | for svc in services: |
318 | - if svc[name] not in use_source: |
319 | + if svc['name'] not in use_source: |
320 | config = {'openstack-origin': self.openstack} |
321 | - self.d.configure(svc[name], config) |
322 | + self.d.configure(svc['name'], config) |
323 | |
324 | if self.source: |
325 | for svc in services: |
326 | - if svc[name] in use_source: |
327 | + if svc['name'] in use_source: |
328 | config = {'source': self.source} |
329 | - self.d.configure(svc[name], config) |
330 | + self.d.configure(svc['name'], config) |
331 | |
332 | def _configure_services(self, configs): |
333 | """Configure all of the services.""" |
334 | |
335 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' |
336 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-09-25 15:37:05 +0000 |
337 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-10-07 21:21:43 +0000 |
338 | @@ -8,7 +8,6 @@ |
339 | check_call |
340 | ) |
341 | |
342 | - |
343 | from charmhelpers.fetch import ( |
344 | apt_install, |
345 | filter_installed_packages, |
346 | @@ -28,6 +27,11 @@ |
347 | INFO |
348 | ) |
349 | |
350 | +from charmhelpers.core.host import ( |
351 | + mkdir, |
352 | + write_file |
353 | +) |
354 | + |
355 | from charmhelpers.contrib.hahelpers.cluster import ( |
356 | determine_apache_port, |
357 | determine_api_port, |
358 | @@ -38,6 +42,7 @@ |
359 | from charmhelpers.contrib.hahelpers.apache import ( |
360 | get_cert, |
361 | get_ca_cert, |
362 | + install_ca_cert, |
363 | ) |
364 | |
365 | from charmhelpers.contrib.openstack.neutron import ( |
366 | @@ -47,8 +52,13 @@ |
367 | from charmhelpers.contrib.network.ip import ( |
368 | get_address_in_network, |
369 | get_ipv6_addr, |
370 | + get_netmask_for_address, |
371 | + format_ipv6_addr, |
372 | + is_address_in_network |
373 | ) |
374 | |
375 | +from charmhelpers.contrib.openstack.utils import get_host_ip |
376 | + |
377 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
378 | |
379 | |
380 | @@ -168,8 +178,10 @@ |
381 | for rid in relation_ids('shared-db'): |
382 | for unit in related_units(rid): |
383 | rdata = relation_get(rid=rid, unit=unit) |
384 | + host = rdata.get('db_host') |
385 | + host = format_ipv6_addr(host) or host |
386 | ctxt = { |
387 | - 'database_host': rdata.get('db_host'), |
388 | + 'database_host': host, |
389 | 'database': self.database, |
390 | 'database_user': self.user, |
391 | 'database_password': rdata.get(password_setting), |
392 | @@ -245,10 +257,15 @@ |
393 | for rid in relation_ids('identity-service'): |
394 | for unit in related_units(rid): |
395 | rdata = relation_get(rid=rid, unit=unit) |
396 | + serv_host = rdata.get('service_host') |
397 | + serv_host = format_ipv6_addr(serv_host) or serv_host |
398 | + auth_host = rdata.get('auth_host') |
399 | + auth_host = format_ipv6_addr(auth_host) or auth_host |
400 | + |
401 | ctxt = { |
402 | 'service_port': rdata.get('service_port'), |
403 | - 'service_host': rdata.get('service_host'), |
404 | - 'auth_host': rdata.get('auth_host'), |
405 | + 'service_host': serv_host, |
406 | + 'auth_host': auth_host, |
407 | 'auth_port': rdata.get('auth_port'), |
408 | 'admin_tenant_name': rdata.get('service_tenant'), |
409 | 'admin_user': rdata.get('service_username'), |
410 | @@ -297,11 +314,13 @@ |
411 | for unit in related_units(rid): |
412 | if relation_get('clustered', rid=rid, unit=unit): |
413 | ctxt['clustered'] = True |
414 | - ctxt['rabbitmq_host'] = relation_get('vip', rid=rid, |
415 | - unit=unit) |
416 | + vip = relation_get('vip', rid=rid, unit=unit) |
417 | + vip = format_ipv6_addr(vip) or vip |
418 | + ctxt['rabbitmq_host'] = vip |
419 | else: |
420 | - ctxt['rabbitmq_host'] = relation_get('private-address', |
421 | - rid=rid, unit=unit) |
422 | + host = relation_get('private-address', rid=rid, unit=unit) |
423 | + host = format_ipv6_addr(host) or host |
424 | + ctxt['rabbitmq_host'] = host |
425 | ctxt.update({ |
426 | 'rabbitmq_user': username, |
427 | 'rabbitmq_password': relation_get('password', rid=rid, |
428 | @@ -340,8 +359,9 @@ |
429 | and len(related_units(rid)) > 1: |
430 | rabbitmq_hosts = [] |
431 | for unit in related_units(rid): |
432 | - rabbitmq_hosts.append(relation_get('private-address', |
433 | - rid=rid, unit=unit)) |
434 | + host = relation_get('private-address', rid=rid, unit=unit) |
435 | + host = format_ipv6_addr(host) or host |
436 | + rabbitmq_hosts.append(host) |
437 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) |
438 | if not context_complete(ctxt): |
439 | return {} |
440 | @@ -370,6 +390,7 @@ |
441 | ceph_addr = \ |
442 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ |
443 | relation_get('private-address', rid=rid, unit=unit) |
444 | + ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
445 | mon_hosts.append(ceph_addr) |
446 | |
447 | ctxt = { |
448 | @@ -390,6 +411,9 @@ |
449 | return ctxt |
450 | |
451 | |
452 | +ADDRESS_TYPES = ['admin', 'internal', 'public'] |
453 | + |
454 | + |
455 | class HAProxyContext(OSContextGenerator): |
456 | interfaces = ['cluster'] |
457 | |
458 | @@ -402,29 +426,62 @@ |
459 | if not relation_ids('cluster'): |
460 | return {} |
461 | |
462 | + l_unit = local_unit().replace('/', '-') |
463 | + |
464 | + if config('prefer-ipv6'): |
465 | + addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
466 | + else: |
467 | + addr = get_host_ip(unit_get('private-address')) |
468 | + |
469 | cluster_hosts = {} |
470 | - l_unit = local_unit().replace('/', '-') |
471 | - if config('prefer-ipv6'): |
472 | - addr = get_ipv6_addr() |
473 | - else: |
474 | - addr = unit_get('private-address') |
475 | - cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'), |
476 | - addr) |
477 | - |
478 | - for rid in relation_ids('cluster'): |
479 | - for unit in related_units(rid): |
480 | - _unit = unit.replace('/', '-') |
481 | - addr = relation_get('private-address', rid=rid, unit=unit) |
482 | - cluster_hosts[_unit] = addr |
483 | + |
484 | + # NOTE(jamespage): build out map of configured network endpoints |
485 | + # and associated backends |
486 | + for addr_type in ADDRESS_TYPES: |
487 | + laddr = get_address_in_network( |
488 | + config('os-{}-network'.format(addr_type))) |
489 | + if laddr: |
490 | + cluster_hosts[laddr] = {} |
491 | + cluster_hosts[laddr]['network'] = "{}/{}".format( |
492 | + laddr, |
493 | + get_netmask_for_address(laddr) |
494 | + ) |
495 | + cluster_hosts[laddr]['backends'] = {} |
496 | + cluster_hosts[laddr]['backends'][l_unit] = laddr |
497 | + for rid in relation_ids('cluster'): |
498 | + for unit in related_units(rid): |
499 | + _unit = unit.replace('/', '-') |
500 | + _laddr = relation_get('{}-address'.format(addr_type), |
501 | + rid=rid, unit=unit) |
502 | + if _laddr: |
503 | + cluster_hosts[laddr]['backends'][_unit] = _laddr |
504 | + |
505 | + # NOTE(jamespage) no split configurations found, just use |
506 | + # private addresses |
507 | + if not cluster_hosts: |
508 | + cluster_hosts[addr] = {} |
509 | + cluster_hosts[addr]['network'] = "{}/{}".format( |
510 | + addr, |
511 | + get_netmask_for_address(addr) |
512 | + ) |
513 | + cluster_hosts[addr]['backends'] = {} |
514 | + cluster_hosts[addr]['backends'][l_unit] = addr |
515 | + for rid in relation_ids('cluster'): |
516 | + for unit in related_units(rid): |
517 | + _unit = unit.replace('/', '-') |
518 | + _laddr = relation_get('private-address', |
519 | + rid=rid, unit=unit) |
520 | + if _laddr: |
521 | + cluster_hosts[addr]['backends'][_unit] = _laddr |
522 | |
523 | ctxt = { |
524 | - 'units': cluster_hosts, |
525 | + 'frontends': cluster_hosts, |
526 | } |
527 | |
528 | if config('haproxy-server-timeout'): |
529 | - ctxt['haproxy-server-timeout'] = config('haproxy-server-timeout') |
530 | + ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') |
531 | if config('haproxy-client-timeout'): |
532 | - ctxt['haproxy-client-timeout'] = config('haproxy-client-timeout') |
533 | + ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
534 | |
535 | if config('prefer-ipv6'): |
536 | ctxt['local_host'] = 'ip6-localhost' |
537 | @@ -435,12 +492,13 @@ |
538 | ctxt['haproxy_host'] = '0.0.0.0' |
539 | ctxt['stat_port'] = ':8888' |
540 | |
541 | - if len(cluster_hosts.keys()) > 1: |
542 | - # Enable haproxy when we have enough peers. |
543 | - log('Ensuring haproxy enabled in /etc/default/haproxy.') |
544 | - with open('/etc/default/haproxy', 'w') as out: |
545 | - out.write('ENABLED=1\n') |
546 | - return ctxt |
547 | + for frontend in cluster_hosts: |
548 | + if len(cluster_hosts[frontend]['backends']) > 1: |
549 | + # Enable haproxy when we have enough peers. |
550 | + log('Ensuring haproxy enabled in /etc/default/haproxy.') |
551 | + with open('/etc/default/haproxy', 'w') as out: |
552 | + out.write('ENABLED=1\n') |
553 | + return ctxt |
554 | log('HAProxy context is incomplete, this unit has no peers.') |
555 | return {} |
556 | |
557 | @@ -495,22 +553,36 @@ |
558 | cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] |
559 | check_call(cmd) |
560 | |
561 | - def configure_cert(self): |
562 | - if not os.path.isdir('/etc/apache2/ssl'): |
563 | - os.mkdir('/etc/apache2/ssl') |
564 | + def configure_cert(self, cn=None): |
565 | ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) |
566 | - if not os.path.isdir(ssl_dir): |
567 | - os.mkdir(ssl_dir) |
568 | - cert, key = get_cert() |
569 | - with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: |
570 | - cert_out.write(b64decode(cert)) |
571 | - with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: |
572 | - key_out.write(b64decode(key)) |
573 | + mkdir(path=ssl_dir) |
574 | + cert, key = get_cert(cn) |
575 | + if cn: |
576 | + cert_filename = 'cert_{}'.format(cn) |
577 | + key_filename = 'key_{}'.format(cn) |
578 | + else: |
579 | + cert_filename = 'cert' |
580 | + key_filename = 'key' |
581 | + write_file(path=os.path.join(ssl_dir, cert_filename), |
582 | + content=b64decode(cert)) |
583 | + write_file(path=os.path.join(ssl_dir, key_filename), |
584 | + content=b64decode(key)) |
585 | + |
586 | + def configure_ca(self): |
587 | ca_cert = get_ca_cert() |
588 | if ca_cert: |
589 | - with open(CA_CERT_PATH, 'w') as ca_out: |
590 | - ca_out.write(b64decode(ca_cert)) |
591 | - check_call(['update-ca-certificates']) |
592 | + install_ca_cert(b64decode(ca_cert)) |
593 | + |
594 | + def canonical_names(self): |
595 | + '''Figure out which canonical names clients will access this service''' |
596 | + cns = [] |
597 | + for r_id in relation_ids('identity-service'): |
598 | + for unit in related_units(r_id): |
599 | + rdata = relation_get(rid=r_id, unit=unit) |
600 | + for k in rdata: |
601 | + if k.startswith('ssl_key_'): |
602 | + cns.append(k.lstrip('ssl_key_')) |
603 | + return list(set(cns)) |
604 | |
605 | def __call__(self): |
606 | if isinstance(self.external_ports, basestring): |
607 | @@ -518,21 +590,47 @@ |
608 | if (not self.external_ports or not https()): |
609 | return {} |
610 | |
611 | - self.configure_cert() |
612 | + self.configure_ca() |
613 | self.enable_modules() |
614 | |
615 | ctxt = { |
616 | 'namespace': self.service_namespace, |
617 | - 'private_address': unit_get('private-address'), |
618 | - 'endpoints': [] |
619 | + 'endpoints': [], |
620 | + 'ext_ports': [] |
621 | } |
622 | - if is_clustered(): |
623 | - ctxt['private_address'] = config('vip') |
624 | - for api_port in self.external_ports: |
625 | - ext_port = determine_apache_port(api_port) |
626 | - int_port = determine_api_port(api_port) |
627 | - portmap = (int(ext_port), int(int_port)) |
628 | - ctxt['endpoints'].append(portmap) |
629 | + |
630 | + for cn in self.canonical_names(): |
631 | + self.configure_cert(cn) |
632 | + |
633 | + addresses = [] |
634 | + vips = [] |
635 | + if config('vip'): |
636 | + vips = config('vip').split() |
637 | + |
638 | + for network_type in ['os-internal-network', |
639 | + 'os-admin-network', |
640 | + 'os-public-network']: |
641 | + address = get_address_in_network(config(network_type), |
642 | + unit_get('private-address')) |
643 | + if len(vips) > 0 and is_clustered(): |
644 | + for vip in vips: |
645 | + if is_address_in_network(config(network_type), |
646 | + vip): |
647 | + addresses.append((address, vip)) |
648 | + break |
649 | + elif is_clustered(): |
650 | + addresses.append((address, config('vip'))) |
651 | + else: |
652 | + addresses.append((address, address)) |
653 | + |
654 | + for address, endpoint in set(addresses): |
655 | + for api_port in self.external_ports: |
656 | + ext_port = determine_apache_port(api_port) |
657 | + int_port = determine_api_port(api_port) |
658 | + portmap = (address, endpoint, int(ext_port), int(int_port)) |
659 | + ctxt['endpoints'].append(portmap) |
660 | + ctxt['ext_ports'].append(int(ext_port)) |
661 | + ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) |
662 | return ctxt |
663 | |
664 | |
665 | @@ -662,22 +760,22 @@ |
666 | |
667 | class OSConfigFlagContext(OSContextGenerator): |
668 | |
669 | - """ |
670 | - Responsible for adding user-defined config-flags in charm config to a |
671 | - template context. |
672 | - |
673 | - NOTE: the value of config-flags may be a comma-separated list of |
674 | - key=value pairs and some Openstack config files support |
675 | - comma-separated lists as values. |
676 | - """ |
677 | - |
678 | - def __call__(self): |
679 | - config_flags = config('config-flags') |
680 | - if not config_flags: |
681 | - return {} |
682 | - |
683 | - flags = config_flags_parser(config_flags) |
684 | - return {'user_config_flags': flags} |
685 | + """ |
686 | + Responsible for adding user-defined config-flags in charm config to a |
687 | + template context. |
688 | + |
689 | + NOTE: the value of config-flags may be a comma-separated list of |
690 | + key=value pairs and some Openstack config files support |
691 | + comma-separated lists as values. |
692 | + """ |
693 | + |
694 | + def __call__(self): |
695 | + config_flags = config('config-flags') |
696 | + if not config_flags: |
697 | + return {} |
698 | + |
699 | + flags = config_flags_parser(config_flags) |
700 | + return {'user_config_flags': flags} |
701 | |
702 | |
703 | class SubordinateConfigContext(OSContextGenerator): |
704 | @@ -792,3 +890,35 @@ |
705 | 'use_syslog': config('use-syslog') |
706 | } |
707 | return ctxt |
708 | + |
709 | + |
710 | +class BindHostContext(OSContextGenerator): |
711 | + |
712 | + def __call__(self): |
713 | + if config('prefer-ipv6'): |
714 | + return { |
715 | + 'bind_host': '::' |
716 | + } |
717 | + else: |
718 | + return { |
719 | + 'bind_host': '0.0.0.0' |
720 | + } |
721 | + |
722 | + |
723 | +class WorkerConfigContext(OSContextGenerator): |
724 | + |
725 | + @property |
726 | + def num_cpus(self): |
727 | + try: |
728 | + from psutil import NUM_CPUS |
729 | + except ImportError: |
730 | + apt_install('python-psutil', fatal=True) |
731 | + from psutil import NUM_CPUS |
732 | + return NUM_CPUS |
733 | + |
734 | + def __call__(self): |
735 | + multiplier = config('worker-multiplier') or 1 |
736 | + ctxt = { |
737 | + "workers": self.num_cpus * multiplier |
738 | + } |
739 | + return ctxt |
740 | |
741 | === modified file 'hooks/charmhelpers/contrib/openstack/ip.py' |
742 | --- hooks/charmhelpers/contrib/openstack/ip.py 2014-08-13 13:12:47 +0000 |
743 | +++ hooks/charmhelpers/contrib/openstack/ip.py 2014-10-07 21:21:43 +0000 |
744 | @@ -66,7 +66,7 @@ |
745 | resolved_address = vip |
746 | else: |
747 | if config('prefer-ipv6'): |
748 | - fallback_addr = get_ipv6_addr() |
749 | + fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
750 | else: |
751 | fallback_addr = unit_get(_address_map[endpoint_type]['fallback']) |
752 | resolved_address = get_address_in_network( |
753 | |
754 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' |
755 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-09-17 10:33:02 +0000 |
756 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-10-07 21:21:43 +0000 |
757 | @@ -4,6 +4,7 @@ |
758 | from collections import OrderedDict |
759 | |
760 | import subprocess |
761 | +import json |
762 | import os |
763 | import socket |
764 | import sys |
765 | @@ -13,7 +14,9 @@ |
766 | log as juju_log, |
767 | charm_dir, |
768 | ERROR, |
769 | - INFO |
770 | + INFO, |
771 | + relation_ids, |
772 | + relation_set |
773 | ) |
774 | |
775 | from charmhelpers.contrib.storage.linux.lvm import ( |
776 | @@ -22,6 +25,10 @@ |
777 | remove_lvm_physical_volume, |
778 | ) |
779 | |
780 | +from charmhelpers.contrib.network.ip import ( |
781 | + get_ipv6_addr |
782 | +) |
783 | + |
784 | from charmhelpers.core.host import lsb_release, mounts, umount |
785 | from charmhelpers.fetch import apt_install, apt_cache |
786 | from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk |
787 | @@ -71,6 +78,8 @@ |
788 | ('1.12.0', 'icehouse'), |
789 | ('1.11.0', 'icehouse'), |
790 | ('2.0.0', 'juno'), |
791 | + ('2.1.0', 'juno'), |
792 | + ('2.2.0', 'juno'), |
793 | ]) |
794 | |
795 | DEFAULT_LOOPBACK_SIZE = '5G' |
796 | @@ -457,3 +466,21 @@ |
797 | return result |
798 | else: |
799 | return result.split('.')[0] |
800 | + |
801 | + |
802 | +def sync_db_with_multi_ipv6_addresses(database, database_user, |
803 | + relation_prefix=None): |
804 | + hosts = get_ipv6_addr(dynamic_only=False) |
805 | + |
806 | + kwargs = {'database': database, |
807 | + 'username': database_user, |
808 | + 'hostname': json.dumps(hosts)} |
809 | + |
810 | + if relation_prefix: |
811 | + keys = kwargs.keys() |
812 | + for key in keys: |
813 | + kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] |
814 | + del kwargs[key] |
815 | + |
816 | + for rid in relation_ids('shared-db'): |
817 | + relation_set(relation_id=rid, **kwargs) |
818 | |
819 | === added file 'hooks/charmhelpers/core/sysctl.py' |
820 | --- hooks/charmhelpers/core/sysctl.py 1970-01-01 00:00:00 +0000 |
821 | +++ hooks/charmhelpers/core/sysctl.py 2014-10-07 21:21:43 +0000 |
822 | @@ -0,0 +1,34 @@ |
823 | +#!/usr/bin/env python |
824 | +# -*- coding: utf-8 -*- |
825 | + |
826 | +__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' |
827 | + |
828 | +import yaml |
829 | + |
830 | +from subprocess import check_call |
831 | + |
832 | +from charmhelpers.core.hookenv import ( |
833 | + log, |
834 | + DEBUG, |
835 | +) |
836 | + |
837 | + |
838 | +def create(sysctl_dict, sysctl_file): |
839 | + """Creates a sysctl.conf file from a YAML associative array |
840 | + |
841 | + :param sysctl_dict: a dict of sysctl options eg { 'kernel.max_pid': 1337 } |
842 | + :type sysctl_dict: dict |
843 | + :param sysctl_file: path to the sysctl file to be saved |
844 | + :type sysctl_file: str or unicode |
845 | + :returns: None |
846 | + """ |
847 | + sysctl_dict = yaml.load(sysctl_dict) |
848 | + |
849 | + with open(sysctl_file, "w") as fd: |
850 | + for key, value in sysctl_dict.items(): |
851 | + fd.write("{}={}\n".format(key, value)) |
852 | + |
853 | + log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict), |
854 | + level=DEBUG) |
855 | + |
856 | + check_call(["sysctl", "-p", sysctl_file]) |
857 | |
858 | === modified file 'tests/00-setup' |
859 | --- tests/00-setup 2014-07-17 15:16:21 +0000 |
860 | +++ tests/00-setup 2014-10-07 21:21:43 +0000 |
861 | @@ -4,7 +4,8 @@ |
862 | |
863 | sudo add-apt-repository --yes ppa:juju/stable |
864 | sudo apt-get update --yes |
865 | -sudo apt-get install --yes python-amulet |
866 | -sudo apt-get install --yes python-neutronclient |
867 | -sudo apt-get install --yes python-keystoneclient |
868 | -sudo apt-get install --yes python-novaclient |
869 | +sudo apt-get install --yes python-amulet \ |
870 | + python-neutronclient \ |
871 | + python-keystoneclient \ |
872 | + python-novaclient \ |
873 | + python-glanceclient |
874 | |
875 | === modified file 'tests/README' |
876 | --- tests/README 2014-07-17 15:16:21 +0000 |
877 | +++ tests/README 2014-10-07 21:21:43 +0000 |
878 | @@ -1,6 +1,12 @@ |
879 | This directory provides Amulet tests that focus on verification of |
880 | quantum-gateway deployments. |
881 | |
882 | +In order to run tests, you'll need charm-tools installed (in addition to |
883 | +juju, of course): |
884 | + sudo add-apt-repository ppa:juju/stable |
885 | + sudo apt-get update |
886 | + sudo apt-get install charm-tools |
887 | + |
888 | If you use a web proxy server to access the web, you'll need to set the |
889 | AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. |
890 | |
891 | |
892 | === modified file 'tests/basic_deployment.py' |
893 | --- tests/basic_deployment.py 2014-09-08 19:02:06 +0000 |
894 | +++ tests/basic_deployment.py 2014-10-07 21:21:43 +0000 |
895 | @@ -1,6 +1,7 @@ |
896 | #!/usr/bin/python |
897 | |
898 | import amulet |
899 | +import time |
900 | try: |
901 | from quantumclient.v2_0 import client as neutronclient |
902 | except ImportError: |
903 | @@ -23,10 +24,10 @@ |
904 | class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment): |
905 | """Amulet tests on a basic quantum-gateway deployment.""" |
906 | |
907 | - def __init__(self, series, openstack=None, source=None): |
908 | + def __init__(self, series, openstack=None, source=None, stable=False): |
909 | """Deploy the entire test environment.""" |
910 | super(QuantumGatewayBasicDeployment, self).__init__(series, openstack, |
911 | - source) |
912 | + source, stable) |
913 | self._add_services() |
914 | self._add_relations() |
915 | self._configure_services() |
916 | @@ -34,13 +35,16 @@ |
917 | self._initialize_tests() |
918 | |
919 | def _add_services(self): |
920 | - """Add the service that we're testing, including the number of units, |
921 | - where quantum-gateway is local, and the other charms are from |
922 | - the charm store.""" |
923 | - this_service = ('quantum-gateway', 1) |
924 | - other_services = [('mysql', 1), |
925 | - ('rabbitmq-server', 1), ('keystone', 1), |
926 | - ('nova-cloud-controller', 1)] |
927 | + """Add services |
928 | + |
929 | + Add the services that we're testing, where quantum-gateway is local, |
930 | + and the rest of the service are from lp branches that are |
931 | + compatible with the local charm (e.g. stable or next). |
932 | + """ |
933 | + this_service = {'name': 'quantum-gateway'} |
934 | + other_services = [{'name': 'mysql'}, |
935 | + {'name': 'rabbitmq-server'}, {'name': 'keystone'}, |
936 | + {'name': 'nova-cloud-controller'}] |
937 | super(QuantumGatewayBasicDeployment, self)._add_services(this_service, |
938 | other_services) |
939 | |
940 | @@ -77,6 +81,9 @@ |
941 | self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] |
942 | self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0'] |
943 | |
944 | + # Let things settle a bit before moving forward |
945 | + time.sleep(30) |
946 | + |
947 | # Authenticate admin with keystone |
948 | self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, |
949 | user='admin', |
950 | @@ -238,9 +245,14 @@ |
951 | message = u.relation_error('nova-cc network-service', ret) |
952 | amulet.raise_status(amulet.FAIL, msg=message) |
953 | |
954 | - def test_restart_on_config_change(self): |
955 | + def test_z_restart_on_config_change(self): |
956 | """Verify that the specified services are restarted when the config |
957 | - is changed.""" |
958 | + is changed. |
959 | + |
960 | + Note(coreycb): The method name with the _z_ is a little odd |
961 | + but it forces the test to run last. It just makes things |
962 | + easier because restarting services requires re-authorization. |
963 | + """ |
964 | if self._get_openstack_release() >= self.precise_havana: |
965 | conf = '/etc/neutron/neutron.conf' |
966 | services = ['neutron-dhcp-agent', 'neutron-openvswitch-agent', |
967 | @@ -261,6 +273,7 @@ |
968 | for s in services: |
969 | if not u.service_restarted(self.quantum_gateway_sentry, s, conf, |
970 | pgrep_full=True, sleep_time=time): |
971 | + self.d.configure('quantum-gateway', {'debug': 'False'}) |
972 | msg = "service {} didn't restart after config change".format(s) |
973 | amulet.raise_status(amulet.FAIL, msg=msg) |
974 | time = 0 |
975 | @@ -347,7 +360,7 @@ |
976 | 'ml2': { |
977 | 'type_drivers': 'gre,vxlan', |
978 | 'tenant_network_types': 'gre,vxlan', |
979 | - 'mechanism_drivers': 'openvswitch' |
980 | + 'mechanism_drivers': 'openvswitch,l2population' |
981 | }, |
982 | 'ml2_type_gre': { |
983 | 'tunnel_id_ranges': '1:1000' |
984 | @@ -629,7 +642,7 @@ |
985 | 'nova_metadata_port': '8775' |
986 | } |
987 | if self._get_openstack_release() >= self.precise_icehouse: |
988 | - expected['cache_url'] = 'memory://?default_ttl=5' |
989 | + expected['cache_url'] = 'memory://?default_ttl=5' |
990 | |
991 | ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) |
992 | if ret: |
993 | |
994 | === modified file 'tests/charmhelpers/contrib/amulet/deployment.py' |
995 | --- tests/charmhelpers/contrib/amulet/deployment.py 2014-09-25 15:37:05 +0000 |
996 | +++ tests/charmhelpers/contrib/amulet/deployment.py 2014-10-07 21:21:43 +0000 |
997 | @@ -25,25 +25,30 @@ |
998 | |
999 | Add services to the deployment where this_service is the local charm |
1000 | that we're testing and other_services are the other services that |
1001 | - are being used in the amulet tests. |
1002 | + are being used in the local amulet tests. |
1003 | """ |
1004 | - name, units, location = range(3) |
1005 | - |
1006 | - if this_service[name] != os.path.basename(os.getcwd()): |
1007 | - s = this_service[name] |
1008 | + if this_service['name'] != os.path.basename(os.getcwd()): |
1009 | + s = this_service['name'] |
1010 | msg = "The charm's root directory name needs to be {}".format(s) |
1011 | amulet.raise_status(amulet.FAIL, msg=msg) |
1012 | |
1013 | - self.d.add(this_service[name], units=this_service[units]) |
1014 | + if 'units' not in this_service: |
1015 | + this_service['units'] = 1 |
1016 | + |
1017 | + self.d.add(this_service['name'], units=this_service['units']) |
1018 | |
1019 | for svc in other_services: |
1020 | - if len(svc) > 2: |
1021 | - branch_location = svc[location] |
1022 | + if 'location' in svc: |
1023 | + branch_location = svc['location'] |
1024 | elif self.series: |
1025 | - branch_location = 'cs:{}/{}'.format(self.series, svc[name]), |
1026 | + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), |
1027 | else: |
1028 | branch_location = None |
1029 | - self.d.add(svc[name], charm=branch_location, units=svc[units]) |
1030 | + |
1031 | + if 'units' not in svc: |
1032 | + svc['units'] = 1 |
1033 | + |
1034 | + self.d.add(svc['name'], charm=branch_location, units=svc['units']) |
1035 | |
1036 | def _add_relations(self, relations): |
1037 | """Add all of the relations for the services.""" |
1038 | |
1039 | === modified file 'tests/charmhelpers/contrib/openstack/amulet/deployment.py' |
1040 | --- tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-09-25 15:37:05 +0000 |
1041 | +++ tests/charmhelpers/contrib/openstack/amulet/deployment.py 2014-10-07 21:21:43 +0000 |
1042 | @@ -1,6 +1,3 @@ |
1043 | -from bzrlib.branch import Branch |
1044 | -import os |
1045 | -import re |
1046 | from charmhelpers.contrib.amulet.deployment import ( |
1047 | AmuletDeployment |
1048 | ) |
1049 | @@ -13,62 +10,62 @@ |
1050 | that is specifically for use by OpenStack charms. |
1051 | """ |
1052 | |
1053 | - def __init__(self, series=None, openstack=None, source=None): |
1054 | + def __init__(self, series=None, openstack=None, source=None, stable=True): |
1055 | """Initialize the deployment environment.""" |
1056 | super(OpenStackAmuletDeployment, self).__init__(series) |
1057 | self.openstack = openstack |
1058 | self.source = source |
1059 | - |
1060 | - def _is_dev_branch(self): |
1061 | - """Determine if branch being tested is a dev (i.e. next) branch.""" |
1062 | - branch = Branch.open(os.getcwd()) |
1063 | - parent = branch.get_parent() |
1064 | - pattern = re.compile("^.*/next/$") |
1065 | - if (pattern.match(parent)): |
1066 | - return True |
1067 | - else: |
1068 | - return False |
1069 | + self.stable = stable |
1070 | + # Note(coreycb): this needs to be changed when new next branches come |
1071 | + # out. |
1072 | + self.current_next = "trusty" |
1073 | |
1074 | def _determine_branch_locations(self, other_services): |
1075 | """Determine the branch locations for the other services. |
1076 | |
1077 | - If the branch being tested is a dev branch, then determine the |
1078 | - development branch locations for the other services. Otherwise, |
1079 | - the default charm store branches will be used.""" |
1080 | - name = 0 |
1081 | - if self._is_dev_branch(): |
1082 | - updated_services = [] |
1083 | - for svc in other_services: |
1084 | - if svc[name] in ['mysql', 'mongodb', 'rabbitmq-server']: |
1085 | - location = 'lp:charms/{}'.format(svc[name]) |
1086 | + Determine if the local branch being tested is derived from its |
1087 | + stable or next (dev) branch, and based on this, use the corresonding |
1088 | + stable or next branches for the other_services.""" |
1089 | + base_charms = ['mysql', 'mongodb', 'rabbitmq-server'] |
1090 | + |
1091 | + if self.stable: |
1092 | + for svc in other_services: |
1093 | + temp = 'lp:charms/{}' |
1094 | + svc['location'] = temp.format(svc['name']) |
1095 | + else: |
1096 | + for svc in other_services: |
1097 | + if svc['name'] in base_charms: |
1098 | + temp = 'lp:charms/{}' |
1099 | + svc['location'] = temp.format(svc['name']) |
1100 | else: |
1101 | - temp = 'lp:~openstack-charmers/charms/trusty/{}/next' |
1102 | - location = temp.format(svc[name]) |
1103 | - updated_services.append(svc + (location,)) |
1104 | - other_services = updated_services |
1105 | + temp = 'lp:~openstack-charmers/charms/{}/{}/next' |
1106 | + svc['location'] = temp.format(self.current_next, |
1107 | + svc['name']) |
1108 | return other_services |
1109 | |
1110 | def _add_services(self, this_service, other_services): |
1111 | """Add services to the deployment and set openstack-origin/source.""" |
1112 | - name = 0 |
1113 | other_services = self._determine_branch_locations(other_services) |
1114 | + |
1115 | super(OpenStackAmuletDeployment, self)._add_services(this_service, |
1116 | other_services) |
1117 | + |
1118 | services = other_services |
1119 | services.append(this_service) |
1120 | - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] |
1121 | + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', |
1122 | + 'ceph-osd', 'ceph-radosgw'] |
1123 | |
1124 | if self.openstack: |
1125 | for svc in services: |
1126 | - if svc[name] not in use_source: |
1127 | + if svc['name'] not in use_source: |
1128 | config = {'openstack-origin': self.openstack} |
1129 | - self.d.configure(svc[name], config) |
1130 | + self.d.configure(svc['name'], config) |
1131 | |
1132 | if self.source: |
1133 | for svc in services: |
1134 | - if svc[name] in use_source: |
1135 | + if svc['name'] in use_source: |
1136 | config = {'source': self.source} |
1137 | - self.d.configure(svc[name], config) |
1138 | + self.d.configure(svc['name'], config) |
1139 | |
1140 | def _configure_services(self, configs): |
1141 | """Configure all of the services.""" |