Merge lp:~springfield-team/charms/trusty/nova-cloud-controller/n1kv into lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next
- Trusty Tahr (14.04)
- n1kv
- Merge into next
Status: | Superseded |
---|---|
Proposed branch: | lp:~springfield-team/charms/trusty/nova-cloud-controller/n1kv |
Merge into: | lp:~openstack-charmers-archive/charms/trusty/nova-cloud-controller/next |
Diff against target: |
1976 lines (+597/-368) 13 files modified
hooks/charmhelpers/contrib/network/ip.py (+50/-48) hooks/charmhelpers/contrib/openstack/context.py (+303/-215) hooks/charmhelpers/contrib/openstack/neutron.py (+18/-2) hooks/charmhelpers/contrib/openstack/utils.py (+24/-0) hooks/charmhelpers/contrib/storage/linux/ceph.py (+81/-97) hooks/charmhelpers/core/hookenv.py (+6/-0) hooks/charmhelpers/core/host.py (+8/-2) hooks/charmhelpers/core/services/__init__.py (+2/-2) hooks/charmhelpers/fetch/__init__.py (+5/-1) hooks/charmhelpers/fetch/giturl.py (+44/-0) templates/icehouse/cisco_plugins.ini (+43/-0) templates/icehouse/nova.conf (+12/-0) tests/basic_deployment.py (+1/-1) |
To merge this branch: | bzr merge lp:~springfield-team/charms/trusty/nova-cloud-controller/n1kv |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Edward Hope-Morley | Pending | ||
Review via email: mp+242447@code.launchpad.net |
This proposal supersedes a proposal from 2014-11-20.
Commit message
Description of the change
uosci-testing-bot (uosci-testing-bot) wrote : | # |
uosci-testing-bot (uosci-testing-bot) wrote : | # |
UOSCI bot says:
charm_unit_test #1000 nova-cloud-
UNIT OK: passed
UNIT Results (max last 5 lines):
hooks/
hooks/
TOTAL 1038 366 65%
Ran 96 tests in 8.559s
OK
Full unit test output: http://
Build: http://
uosci-testing-bot (uosci-testing-bot) wrote : | # |
UOSCI bot says:
charm_amulet_test #508 nova-cloud-
AMULET FAIL: amulet-test failed
AMULET Results (max last 5 lines):
WARNING cannot delete security group "juju-osci-sv05-0". Used by another environmenERROR
t?
juju-test INFO : Results: 0 passed, 2 failed, 1 errored
ERROR subprocess encountered error code 2
make: *** [test] Error 2
Full amulet test output: http://
Build: http://
- 128. By Jorge Niedbalski
-
[all] resync with /next && charm helpers "make sync"
uosci-testing-bot (uosci-testing-bot) wrote : | # |
UOSCI bot says:
charm_lint_check #1168 nova-cloud-
LINT OK: passed
LINT Results (max last 5 lines):
I: config.yaml: option os-admin-network has no default value
I: config.yaml: option haproxy-
I: config.yaml: option ssl_cert has no default value
I: config.yaml: option nvp-l3-uuid has no default value
I: config.yaml: option os-internal-network has no default value
Full lint test output: http://
Build: http://
uosci-testing-bot (uosci-testing-bot) wrote : | # |
UOSCI bot says:
charm_unit_test #1002 nova-cloud-
UNIT OK: passed
UNIT Results (max last 5 lines):
hooks/
hooks/
TOTAL 1038 366 65%
Ran 96 tests in 8.973s
OK
Full unit test output: http://
Build: http://
uosci-testing-bot (uosci-testing-bot) wrote : | # |
UOSCI bot says:
charm_amulet_test #510 nova-cloud-
AMULET FAIL: amulet-test failed
AMULET Results (max last 5 lines):
ERROR waited for 10m0s without being able to connect: ssh: connect to host 10.215.3.235 port 22: No route to host
juju-
juju-test INFO : Results: 0 passed, 0 failed, 3 errored
ERROR subprocess encountered error code 124
make: *** [test] Error 124
Full amulet test output: http://
Build: http://
- 129. By Shiv Prasad Rao
-
Changes for n1kv
- 130. By Edward Hope-Morley
-
[hopem] synced /next
Unmerged revisions
- 130. By Edward Hope-Morley
-
[hopem] synced /next
- 129. By Shiv Prasad Rao
-
Changes for n1kv
- 128. By Jorge Niedbalski
-
[all] resync with /next && charm helpers "make sync"
- 127. By Shiv Prasad Rao
-
Additional changes for n1kv
- 126. By Shiv Prasad Rao
-
Cisco Nexus 1000V changes
Preview Diff
1 | === modified file 'hooks/charmhelpers/contrib/network/ip.py' | |||
2 | --- hooks/charmhelpers/contrib/network/ip.py 2014-10-09 10:31:45 +0000 | |||
3 | +++ hooks/charmhelpers/contrib/network/ip.py 2014-11-21 15:33:38 +0000 | |||
4 | @@ -1,15 +1,12 @@ | |||
5 | 1 | import glob | 1 | import glob |
6 | 2 | import re | 2 | import re |
7 | 3 | import subprocess | 3 | import subprocess |
8 | 4 | import sys | ||
9 | 5 | 4 | ||
10 | 6 | from functools import partial | 5 | from functools import partial |
11 | 7 | 6 | ||
12 | 8 | from charmhelpers.core.hookenv import unit_get | 7 | from charmhelpers.core.hookenv import unit_get |
13 | 9 | from charmhelpers.fetch import apt_install | 8 | from charmhelpers.fetch import apt_install |
14 | 10 | from charmhelpers.core.hookenv import ( | 9 | from charmhelpers.core.hookenv import ( |
15 | 11 | WARNING, | ||
16 | 12 | ERROR, | ||
17 | 13 | log | 10 | log |
18 | 14 | ) | 11 | ) |
19 | 15 | 12 | ||
20 | @@ -34,31 +31,28 @@ | |||
21 | 34 | network) | 31 | network) |
22 | 35 | 32 | ||
23 | 36 | 33 | ||
24 | 34 | def no_ip_found_error_out(network): | ||
25 | 35 | errmsg = ("No IP address found in network: %s" % network) | ||
26 | 36 | raise ValueError(errmsg) | ||
27 | 37 | |||
28 | 38 | |||
29 | 37 | def get_address_in_network(network, fallback=None, fatal=False): | 39 | def get_address_in_network(network, fallback=None, fatal=False): |
32 | 38 | """ | 40 | """Get an IPv4 or IPv6 address within the network from the host. |
31 | 39 | Get an IPv4 or IPv6 address within the network from the host. | ||
33 | 40 | 41 | ||
34 | 41 | :param network (str): CIDR presentation format. For example, | 42 | :param network (str): CIDR presentation format. For example, |
35 | 42 | '192.168.1.0/24'. | 43 | '192.168.1.0/24'. |
36 | 43 | :param fallback (str): If no address is found, return fallback. | 44 | :param fallback (str): If no address is found, return fallback. |
37 | 44 | :param fatal (boolean): If no address is found, fallback is not | 45 | :param fatal (boolean): If no address is found, fallback is not |
38 | 45 | set and fatal is True then exit(1). | 46 | set and fatal is True then exit(1). |
39 | 46 | |||
40 | 47 | """ | 47 | """ |
41 | 48 | |||
42 | 49 | def not_found_error_out(): | ||
43 | 50 | log("No IP address found in network: %s" % network, | ||
44 | 51 | level=ERROR) | ||
45 | 52 | sys.exit(1) | ||
46 | 53 | |||
47 | 54 | if network is None: | 48 | if network is None: |
48 | 55 | if fallback is not None: | 49 | if fallback is not None: |
49 | 56 | return fallback | 50 | return fallback |
50 | 51 | |||
51 | 52 | if fatal: | ||
52 | 53 | no_ip_found_error_out(network) | ||
53 | 57 | else: | 54 | else: |
58 | 58 | if fatal: | 55 | return None |
55 | 59 | not_found_error_out() | ||
56 | 60 | else: | ||
57 | 61 | return None | ||
59 | 62 | 56 | ||
60 | 63 | _validate_cidr(network) | 57 | _validate_cidr(network) |
61 | 64 | network = netaddr.IPNetwork(network) | 58 | network = netaddr.IPNetwork(network) |
62 | @@ -70,6 +64,7 @@ | |||
63 | 70 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) | 64 | cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) |
64 | 71 | if cidr in network: | 65 | if cidr in network: |
65 | 72 | return str(cidr.ip) | 66 | return str(cidr.ip) |
66 | 67 | |||
67 | 73 | if network.version == 6 and netifaces.AF_INET6 in addresses: | 68 | if network.version == 6 and netifaces.AF_INET6 in addresses: |
68 | 74 | for addr in addresses[netifaces.AF_INET6]: | 69 | for addr in addresses[netifaces.AF_INET6]: |
69 | 75 | if not addr['addr'].startswith('fe80'): | 70 | if not addr['addr'].startswith('fe80'): |
70 | @@ -82,20 +77,20 @@ | |||
71 | 82 | return fallback | 77 | return fallback |
72 | 83 | 78 | ||
73 | 84 | if fatal: | 79 | if fatal: |
75 | 85 | not_found_error_out() | 80 | no_ip_found_error_out(network) |
76 | 86 | 81 | ||
77 | 87 | return None | 82 | return None |
78 | 88 | 83 | ||
79 | 89 | 84 | ||
80 | 90 | def is_ipv6(address): | 85 | def is_ipv6(address): |
82 | 91 | '''Determine whether provided address is IPv6 or not''' | 86 | """Determine whether provided address is IPv6 or not.""" |
83 | 92 | try: | 87 | try: |
84 | 93 | address = netaddr.IPAddress(address) | 88 | address = netaddr.IPAddress(address) |
85 | 94 | except netaddr.AddrFormatError: | 89 | except netaddr.AddrFormatError: |
86 | 95 | # probably a hostname - so not an address at all! | 90 | # probably a hostname - so not an address at all! |
87 | 96 | return False | 91 | return False |
90 | 97 | else: | 92 | |
91 | 98 | return address.version == 6 | 93 | return address.version == 6 |
92 | 99 | 94 | ||
93 | 100 | 95 | ||
94 | 101 | def is_address_in_network(network, address): | 96 | def is_address_in_network(network, address): |
95 | @@ -113,11 +108,13 @@ | |||
96 | 113 | except (netaddr.core.AddrFormatError, ValueError): | 108 | except (netaddr.core.AddrFormatError, ValueError): |
97 | 114 | raise ValueError("Network (%s) is not in CIDR presentation format" % | 109 | raise ValueError("Network (%s) is not in CIDR presentation format" % |
98 | 115 | network) | 110 | network) |
99 | 111 | |||
100 | 116 | try: | 112 | try: |
101 | 117 | address = netaddr.IPAddress(address) | 113 | address = netaddr.IPAddress(address) |
102 | 118 | except (netaddr.core.AddrFormatError, ValueError): | 114 | except (netaddr.core.AddrFormatError, ValueError): |
103 | 119 | raise ValueError("Address (%s) is not in correct presentation format" % | 115 | raise ValueError("Address (%s) is not in correct presentation format" % |
104 | 120 | address) | 116 | address) |
105 | 117 | |||
106 | 121 | if address in network: | 118 | if address in network: |
107 | 122 | return True | 119 | return True |
108 | 123 | else: | 120 | else: |
109 | @@ -147,6 +144,7 @@ | |||
110 | 147 | return iface | 144 | return iface |
111 | 148 | else: | 145 | else: |
112 | 149 | return addresses[netifaces.AF_INET][0][key] | 146 | return addresses[netifaces.AF_INET][0][key] |
113 | 147 | |||
114 | 150 | if address.version == 6 and netifaces.AF_INET6 in addresses: | 148 | if address.version == 6 and netifaces.AF_INET6 in addresses: |
115 | 151 | for addr in addresses[netifaces.AF_INET6]: | 149 | for addr in addresses[netifaces.AF_INET6]: |
116 | 152 | if not addr['addr'].startswith('fe80'): | 150 | if not addr['addr'].startswith('fe80'): |
117 | @@ -160,41 +158,42 @@ | |||
118 | 160 | return str(cidr).split('/')[1] | 158 | return str(cidr).split('/')[1] |
119 | 161 | else: | 159 | else: |
120 | 162 | return addr[key] | 160 | return addr[key] |
121 | 161 | |||
122 | 163 | return None | 162 | return None |
123 | 164 | 163 | ||
124 | 165 | 164 | ||
125 | 166 | get_iface_for_address = partial(_get_for_address, key='iface') | 165 | get_iface_for_address = partial(_get_for_address, key='iface') |
126 | 167 | 166 | ||
127 | 167 | |||
128 | 168 | get_netmask_for_address = partial(_get_for_address, key='netmask') | 168 | get_netmask_for_address = partial(_get_for_address, key='netmask') |
129 | 169 | 169 | ||
130 | 170 | 170 | ||
131 | 171 | def format_ipv6_addr(address): | 171 | def format_ipv6_addr(address): |
134 | 172 | """ | 172 | """If address is IPv6, wrap it in '[]' otherwise return None. |
135 | 173 | IPv6 needs to be wrapped with [] in url link to parse correctly. | 173 | |
136 | 174 | This is required by most configuration files when specifying IPv6 | ||
137 | 175 | addresses. | ||
138 | 174 | """ | 176 | """ |
139 | 175 | if is_ipv6(address): | 177 | if is_ipv6(address): |
144 | 176 | address = "[%s]" % address | 178 | return "[%s]" % address |
141 | 177 | else: | ||
142 | 178 | log("Not a valid ipv6 address: %s" % address, level=WARNING) | ||
143 | 179 | address = None | ||
145 | 180 | 179 | ||
147 | 181 | return address | 180 | return None |
148 | 182 | 181 | ||
149 | 183 | 182 | ||
150 | 184 | def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, | 183 | def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, |
151 | 185 | fatal=True, exc_list=None): | 184 | fatal=True, exc_list=None): |
155 | 186 | """ | 185 | """Return the assigned IP address for a given interface, if any.""" |
153 | 187 | Return the assigned IP address for a given interface, if any, or []. | ||
154 | 188 | """ | ||
156 | 189 | # Extract nic if passed /dev/ethX | 186 | # Extract nic if passed /dev/ethX |
157 | 190 | if '/' in iface: | 187 | if '/' in iface: |
158 | 191 | iface = iface.split('/')[-1] | 188 | iface = iface.split('/')[-1] |
159 | 189 | |||
160 | 192 | if not exc_list: | 190 | if not exc_list: |
161 | 193 | exc_list = [] | 191 | exc_list = [] |
162 | 192 | |||
163 | 194 | try: | 193 | try: |
164 | 195 | inet_num = getattr(netifaces, inet_type) | 194 | inet_num = getattr(netifaces, inet_type) |
165 | 196 | except AttributeError: | 195 | except AttributeError: |
167 | 197 | raise Exception('Unknown inet type ' + str(inet_type)) | 196 | raise Exception("Unknown inet type '%s'" % str(inet_type)) |
168 | 198 | 197 | ||
169 | 199 | interfaces = netifaces.interfaces() | 198 | interfaces = netifaces.interfaces() |
170 | 200 | if inc_aliases: | 199 | if inc_aliases: |
171 | @@ -202,15 +201,18 @@ | |||
172 | 202 | for _iface in interfaces: | 201 | for _iface in interfaces: |
173 | 203 | if iface == _iface or _iface.split(':')[0] == iface: | 202 | if iface == _iface or _iface.split(':')[0] == iface: |
174 | 204 | ifaces.append(_iface) | 203 | ifaces.append(_iface) |
175 | 204 | |||
176 | 205 | if fatal and not ifaces: | 205 | if fatal and not ifaces: |
177 | 206 | raise Exception("Invalid interface '%s'" % iface) | 206 | raise Exception("Invalid interface '%s'" % iface) |
178 | 207 | |||
179 | 207 | ifaces.sort() | 208 | ifaces.sort() |
180 | 208 | else: | 209 | else: |
181 | 209 | if iface not in interfaces: | 210 | if iface not in interfaces: |
182 | 210 | if fatal: | 211 | if fatal: |
184 | 211 | raise Exception("%s not found " % (iface)) | 212 | raise Exception("Interface '%s' not found " % (iface)) |
185 | 212 | else: | 213 | else: |
186 | 213 | return [] | 214 | return [] |
187 | 215 | |||
188 | 214 | else: | 216 | else: |
189 | 215 | ifaces = [iface] | 217 | ifaces = [iface] |
190 | 216 | 218 | ||
191 | @@ -221,11 +223,14 @@ | |||
192 | 221 | for entry in net_info[inet_num]: | 223 | for entry in net_info[inet_num]: |
193 | 222 | if 'addr' in entry and entry['addr'] not in exc_list: | 224 | if 'addr' in entry and entry['addr'] not in exc_list: |
194 | 223 | addresses.append(entry['addr']) | 225 | addresses.append(entry['addr']) |
195 | 226 | |||
196 | 224 | if fatal and not addresses: | 227 | if fatal and not addresses: |
197 | 225 | raise Exception("Interface '%s' doesn't have any %s addresses." % | 228 | raise Exception("Interface '%s' doesn't have any %s addresses." % |
198 | 226 | (iface, inet_type)) | 229 | (iface, inet_type)) |
199 | 230 | |||
200 | 227 | return addresses | 231 | return addresses |
201 | 228 | 232 | ||
202 | 233 | |||
203 | 229 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') | 234 | get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') |
204 | 230 | 235 | ||
205 | 231 | 236 | ||
206 | @@ -241,6 +246,7 @@ | |||
207 | 241 | raw = re.match(ll_key, _addr) | 246 | raw = re.match(ll_key, _addr) |
208 | 242 | if raw: | 247 | if raw: |
209 | 243 | _addr = raw.group(1) | 248 | _addr = raw.group(1) |
210 | 249 | |||
211 | 244 | if _addr == addr: | 250 | if _addr == addr: |
212 | 245 | log("Address '%s' is configured on iface '%s'" % | 251 | log("Address '%s' is configured on iface '%s'" % |
213 | 246 | (addr, iface)) | 252 | (addr, iface)) |
214 | @@ -251,8 +257,9 @@ | |||
215 | 251 | 257 | ||
216 | 252 | 258 | ||
217 | 253 | def sniff_iface(f): | 259 | def sniff_iface(f): |
220 | 254 | """If no iface provided, inject net iface inferred from unit private | 260 | """Ensure decorated function is called with a value for iface. |
221 | 255 | address. | 261 | |
222 | 262 | If no iface provided, inject net iface inferred from unit private address. | ||
223 | 256 | """ | 263 | """ |
224 | 257 | def iface_sniffer(*args, **kwargs): | 264 | def iface_sniffer(*args, **kwargs): |
225 | 258 | if not kwargs.get('iface', None): | 265 | if not kwargs.get('iface', None): |
226 | @@ -317,33 +324,28 @@ | |||
227 | 317 | return addrs | 324 | return addrs |
228 | 318 | 325 | ||
229 | 319 | if fatal: | 326 | if fatal: |
231 | 320 | raise Exception("Interface '%s' doesn't have a scope global " | 327 | raise Exception("Interface '%s' does not have a scope global " |
232 | 321 | "non-temporary ipv6 address." % iface) | 328 | "non-temporary ipv6 address." % iface) |
233 | 322 | 329 | ||
234 | 323 | return [] | 330 | return [] |
235 | 324 | 331 | ||
236 | 325 | 332 | ||
237 | 326 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): | 333 | def get_bridges(vnic_dir='/sys/devices/virtual/net'): |
243 | 327 | """ | 334 | """Return a list of bridges on the system.""" |
244 | 328 | Return a list of bridges on the system or [] | 335 | b_regex = "%s/*/bridge" % vnic_dir |
245 | 329 | """ | 336 | return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] |
241 | 330 | b_rgex = vnic_dir + '/*/bridge' | ||
242 | 331 | return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)] | ||
246 | 332 | 337 | ||
247 | 333 | 338 | ||
248 | 334 | def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): | 339 | def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): |
254 | 335 | """ | 340 | """Return a list of nics comprising a given bridge on the system.""" |
255 | 336 | Return a list of nics comprising a given bridge on the system or [] | 341 | brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) |
256 | 337 | """ | 342 | return [x.split('/')[-1] for x in glob.glob(brif_regex)] |
252 | 338 | brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge) | ||
253 | 339 | return [x.split('/')[-1] for x in glob.glob(brif_rgex)] | ||
257 | 340 | 343 | ||
258 | 341 | 344 | ||
259 | 342 | def is_bridge_member(nic): | 345 | def is_bridge_member(nic): |
263 | 343 | """ | 346 | """Check if a given nic is a member of a bridge.""" |
261 | 344 | Check if a given nic is a member of a bridge | ||
262 | 345 | """ | ||
264 | 346 | for bridge in get_bridges(): | 347 | for bridge in get_bridges(): |
265 | 347 | if nic in get_bridge_nics(bridge): | 348 | if nic in get_bridge_nics(bridge): |
266 | 348 | return True | 349 | return True |
267 | 350 | |||
268 | 349 | return False | 351 | return False |
269 | 350 | 352 | ||
270 | === modified file 'hooks/charmhelpers/contrib/openstack/context.py' | |||
271 | --- hooks/charmhelpers/contrib/openstack/context.py 2014-10-13 16:18:58 +0000 | |||
272 | +++ hooks/charmhelpers/contrib/openstack/context.py 2014-11-21 15:33:38 +0000 | |||
273 | @@ -3,18 +3,15 @@ | |||
274 | 3 | import time | 3 | import time |
275 | 4 | 4 | ||
276 | 5 | from base64 import b64decode | 5 | from base64 import b64decode |
281 | 6 | 6 | from subprocess import check_call | |
278 | 7 | from subprocess import ( | ||
279 | 8 | check_call | ||
280 | 9 | ) | ||
282 | 10 | 7 | ||
283 | 11 | from charmhelpers.fetch import ( | 8 | from charmhelpers.fetch import ( |
284 | 12 | apt_install, | 9 | apt_install, |
285 | 13 | filter_installed_packages, | 10 | filter_installed_packages, |
286 | 14 | ) | 11 | ) |
287 | 15 | |||
288 | 16 | from charmhelpers.core.hookenv import ( | 12 | from charmhelpers.core.hookenv import ( |
289 | 17 | config, | 13 | config, |
290 | 14 | is_relation_made, | ||
291 | 18 | local_unit, | 15 | local_unit, |
292 | 19 | log, | 16 | log, |
293 | 20 | relation_get, | 17 | relation_get, |
294 | @@ -23,43 +20,40 @@ | |||
295 | 23 | relation_set, | 20 | relation_set, |
296 | 24 | unit_get, | 21 | unit_get, |
297 | 25 | unit_private_ip, | 22 | unit_private_ip, |
298 | 23 | DEBUG, | ||
299 | 24 | INFO, | ||
300 | 25 | WARNING, | ||
301 | 26 | ERROR, | 26 | ERROR, |
302 | 27 | INFO | ||
303 | 28 | ) | 27 | ) |
304 | 29 | |||
305 | 30 | from charmhelpers.core.host import ( | 28 | from charmhelpers.core.host import ( |
306 | 31 | mkdir, | 29 | mkdir, |
308 | 32 | write_file | 30 | write_file, |
309 | 33 | ) | 31 | ) |
310 | 34 | |||
311 | 35 | from charmhelpers.contrib.hahelpers.cluster import ( | 32 | from charmhelpers.contrib.hahelpers.cluster import ( |
312 | 36 | determine_apache_port, | 33 | determine_apache_port, |
313 | 37 | determine_api_port, | 34 | determine_api_port, |
314 | 38 | https, | 35 | https, |
316 | 39 | is_clustered | 36 | is_clustered, |
317 | 40 | ) | 37 | ) |
318 | 41 | |||
319 | 42 | from charmhelpers.contrib.hahelpers.apache import ( | 38 | from charmhelpers.contrib.hahelpers.apache import ( |
320 | 43 | get_cert, | 39 | get_cert, |
321 | 44 | get_ca_cert, | 40 | get_ca_cert, |
322 | 45 | install_ca_cert, | 41 | install_ca_cert, |
323 | 46 | ) | 42 | ) |
324 | 47 | |||
325 | 48 | from charmhelpers.contrib.openstack.neutron import ( | 43 | from charmhelpers.contrib.openstack.neutron import ( |
326 | 49 | neutron_plugin_attribute, | 44 | neutron_plugin_attribute, |
327 | 50 | ) | 45 | ) |
328 | 51 | |||
329 | 52 | from charmhelpers.contrib.network.ip import ( | 46 | from charmhelpers.contrib.network.ip import ( |
330 | 53 | get_address_in_network, | 47 | get_address_in_network, |
331 | 54 | get_ipv6_addr, | 48 | get_ipv6_addr, |
332 | 55 | get_netmask_for_address, | 49 | get_netmask_for_address, |
333 | 56 | format_ipv6_addr, | 50 | format_ipv6_addr, |
335 | 57 | is_address_in_network | 51 | is_address_in_network, |
336 | 58 | ) | 52 | ) |
337 | 59 | |||
338 | 60 | from charmhelpers.contrib.openstack.utils import get_host_ip | 53 | from charmhelpers.contrib.openstack.utils import get_host_ip |
339 | 61 | 54 | ||
340 | 62 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' | 55 | CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' |
341 | 56 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | ||
342 | 63 | 57 | ||
343 | 64 | 58 | ||
344 | 65 | class OSContextError(Exception): | 59 | class OSContextError(Exception): |
345 | @@ -67,7 +61,7 @@ | |||
346 | 67 | 61 | ||
347 | 68 | 62 | ||
348 | 69 | def ensure_packages(packages): | 63 | def ensure_packages(packages): |
350 | 70 | '''Install but do not upgrade required plugin packages''' | 64 | """Install but do not upgrade required plugin packages.""" |
351 | 71 | required = filter_installed_packages(packages) | 65 | required = filter_installed_packages(packages) |
352 | 72 | if required: | 66 | if required: |
353 | 73 | apt_install(required, fatal=True) | 67 | apt_install(required, fatal=True) |
354 | @@ -78,17 +72,24 @@ | |||
355 | 78 | for k, v in ctxt.iteritems(): | 72 | for k, v in ctxt.iteritems(): |
356 | 79 | if v is None or v == '': | 73 | if v is None or v == '': |
357 | 80 | _missing.append(k) | 74 | _missing.append(k) |
358 | 75 | |||
359 | 81 | if _missing: | 76 | if _missing: |
361 | 82 | log('Missing required data: %s' % ' '.join(_missing), level='INFO') | 77 | log('Missing required data: %s' % ' '.join(_missing), level=INFO) |
362 | 83 | return False | 78 | return False |
363 | 79 | |||
364 | 84 | return True | 80 | return True |
365 | 85 | 81 | ||
366 | 86 | 82 | ||
367 | 87 | def config_flags_parser(config_flags): | 83 | def config_flags_parser(config_flags): |
368 | 84 | """Parses config flags string into dict. | ||
369 | 85 | |||
370 | 86 | The provided config_flags string may be a list of comma-separated values | ||
371 | 87 | which themselves may be comma-separated list of values. | ||
372 | 88 | """ | ||
373 | 88 | if config_flags.find('==') >= 0: | 89 | if config_flags.find('==') >= 0: |
376 | 89 | log("config_flags is not in expected format (key=value)", | 90 | log("config_flags is not in expected format (key=value)", level=ERROR) |
375 | 90 | level=ERROR) | ||
377 | 91 | raise OSContextError | 91 | raise OSContextError |
378 | 92 | |||
379 | 92 | # strip the following from each value. | 93 | # strip the following from each value. |
380 | 93 | post_strippers = ' ,' | 94 | post_strippers = ' ,' |
381 | 94 | # we strip any leading/trailing '=' or ' ' from the string then | 95 | # we strip any leading/trailing '=' or ' ' from the string then |
382 | @@ -111,17 +112,18 @@ | |||
383 | 111 | # if this not the first entry, expect an embedded key. | 112 | # if this not the first entry, expect an embedded key. |
384 | 112 | index = current.rfind(',') | 113 | index = current.rfind(',') |
385 | 113 | if index < 0: | 114 | if index < 0: |
388 | 114 | log("invalid config value(s) at index %s" % (i), | 115 | log("Invalid config value(s) at index %s" % (i), level=ERROR) |
387 | 115 | level=ERROR) | ||
389 | 116 | raise OSContextError | 116 | raise OSContextError |
390 | 117 | key = current[index + 1:] | 117 | key = current[index + 1:] |
391 | 118 | 118 | ||
392 | 119 | # Add to collection. | 119 | # Add to collection. |
393 | 120 | flags[key.strip(post_strippers)] = value.rstrip(post_strippers) | 120 | flags[key.strip(post_strippers)] = value.rstrip(post_strippers) |
394 | 121 | |||
395 | 121 | return flags | 122 | return flags |
396 | 122 | 123 | ||
397 | 123 | 124 | ||
398 | 124 | class OSContextGenerator(object): | 125 | class OSContextGenerator(object): |
399 | 126 | """Base class for all context generators.""" | ||
400 | 125 | interfaces = [] | 127 | interfaces = [] |
401 | 126 | 128 | ||
402 | 127 | def __call__(self): | 129 | def __call__(self): |
403 | @@ -133,11 +135,11 @@ | |||
404 | 133 | 135 | ||
405 | 134 | def __init__(self, | 136 | def __init__(self, |
406 | 135 | database=None, user=None, relation_prefix=None, ssl_dir=None): | 137 | database=None, user=None, relation_prefix=None, ssl_dir=None): |
412 | 136 | ''' | 138 | """Allows inspecting relation for settings prefixed with |
413 | 137 | Allows inspecting relation for settings prefixed with relation_prefix. | 139 | relation_prefix. This is useful for parsing access for multiple |
414 | 138 | This is useful for parsing access for multiple databases returned via | 140 | databases returned via the shared-db interface (eg, nova_password, |
415 | 139 | the shared-db interface (eg, nova_password, quantum_password) | 141 | quantum_password) |
416 | 140 | ''' | 142 | """ |
417 | 141 | self.relation_prefix = relation_prefix | 143 | self.relation_prefix = relation_prefix |
418 | 142 | self.database = database | 144 | self.database = database |
419 | 143 | self.user = user | 145 | self.user = user |
420 | @@ -147,9 +149,8 @@ | |||
421 | 147 | self.database = self.database or config('database') | 149 | self.database = self.database or config('database') |
422 | 148 | self.user = self.user or config('database-user') | 150 | self.user = self.user or config('database-user') |
423 | 149 | if None in [self.database, self.user]: | 151 | if None in [self.database, self.user]: |
427 | 150 | log('Could not generate shared_db context. ' | 152 | log("Could not generate shared_db context. Missing required charm " |
428 | 151 | 'Missing required charm config options. ' | 153 | "config options. (database name and user)", level=ERROR) |
426 | 152 | '(database name and user)') | ||
429 | 153 | raise OSContextError | 154 | raise OSContextError |
430 | 154 | 155 | ||
431 | 155 | ctxt = {} | 156 | ctxt = {} |
432 | @@ -202,23 +203,24 @@ | |||
433 | 202 | def __call__(self): | 203 | def __call__(self): |
434 | 203 | self.database = self.database or config('database') | 204 | self.database = self.database or config('database') |
435 | 204 | if self.database is None: | 205 | if self.database is None: |
439 | 205 | log('Could not generate postgresql_db context. ' | 206 | log('Could not generate postgresql_db context. Missing required ' |
440 | 206 | 'Missing required charm config options. ' | 207 | 'charm config options. (database name)', level=ERROR) |
438 | 207 | '(database name)') | ||
441 | 208 | raise OSContextError | 208 | raise OSContextError |
442 | 209 | |||
443 | 209 | ctxt = {} | 210 | ctxt = {} |
444 | 210 | |||
445 | 211 | for rid in relation_ids(self.interfaces[0]): | 211 | for rid in relation_ids(self.interfaces[0]): |
446 | 212 | for unit in related_units(rid): | 212 | for unit in related_units(rid): |
454 | 213 | ctxt = { | 213 | rel_host = relation_get('host', rid=rid, unit=unit) |
455 | 214 | 'database_host': relation_get('host', rid=rid, unit=unit), | 214 | rel_user = relation_get('user', rid=rid, unit=unit) |
456 | 215 | 'database': self.database, | 215 | rel_passwd = relation_get('password', rid=rid, unit=unit) |
457 | 216 | 'database_user': relation_get('user', rid=rid, unit=unit), | 216 | ctxt = {'database_host': rel_host, |
458 | 217 | 'database_password': relation_get('password', rid=rid, unit=unit), | 217 | 'database': self.database, |
459 | 218 | 'database_type': 'postgresql', | 218 | 'database_user': rel_user, |
460 | 219 | } | 219 | 'database_password': rel_passwd, |
461 | 220 | 'database_type': 'postgresql'} | ||
462 | 220 | if context_complete(ctxt): | 221 | if context_complete(ctxt): |
463 | 221 | return ctxt | 222 | return ctxt |
464 | 223 | |||
465 | 222 | return {} | 224 | return {} |
466 | 223 | 225 | ||
467 | 224 | 226 | ||
468 | @@ -227,23 +229,29 @@ | |||
469 | 227 | ca_path = os.path.join(ssl_dir, 'db-client.ca') | 229 | ca_path = os.path.join(ssl_dir, 'db-client.ca') |
470 | 228 | with open(ca_path, 'w') as fh: | 230 | with open(ca_path, 'w') as fh: |
471 | 229 | fh.write(b64decode(rdata['ssl_ca'])) | 231 | fh.write(b64decode(rdata['ssl_ca'])) |
472 | 232 | |||
473 | 230 | ctxt['database_ssl_ca'] = ca_path | 233 | ctxt['database_ssl_ca'] = ca_path |
474 | 231 | elif 'ssl_ca' in rdata: | 234 | elif 'ssl_ca' in rdata: |
476 | 232 | log("Charm not setup for ssl support but ssl ca found") | 235 | log("Charm not setup for ssl support but ssl ca found", level=INFO) |
477 | 233 | return ctxt | 236 | return ctxt |
478 | 237 | |||
479 | 234 | if 'ssl_cert' in rdata: | 238 | if 'ssl_cert' in rdata: |
480 | 235 | cert_path = os.path.join( | 239 | cert_path = os.path.join( |
481 | 236 | ssl_dir, 'db-client.cert') | 240 | ssl_dir, 'db-client.cert') |
482 | 237 | if not os.path.exists(cert_path): | 241 | if not os.path.exists(cert_path): |
484 | 238 | log("Waiting 1m for ssl client cert validity") | 242 | log("Waiting 1m for ssl client cert validity", level=INFO) |
485 | 239 | time.sleep(60) | 243 | time.sleep(60) |
486 | 244 | |||
487 | 240 | with open(cert_path, 'w') as fh: | 245 | with open(cert_path, 'w') as fh: |
488 | 241 | fh.write(b64decode(rdata['ssl_cert'])) | 246 | fh.write(b64decode(rdata['ssl_cert'])) |
489 | 247 | |||
490 | 242 | ctxt['database_ssl_cert'] = cert_path | 248 | ctxt['database_ssl_cert'] = cert_path |
491 | 243 | key_path = os.path.join(ssl_dir, 'db-client.key') | 249 | key_path = os.path.join(ssl_dir, 'db-client.key') |
492 | 244 | with open(key_path, 'w') as fh: | 250 | with open(key_path, 'w') as fh: |
493 | 245 | fh.write(b64decode(rdata['ssl_key'])) | 251 | fh.write(b64decode(rdata['ssl_key'])) |
494 | 252 | |||
495 | 246 | ctxt['database_ssl_key'] = key_path | 253 | ctxt['database_ssl_key'] = key_path |
496 | 254 | |||
497 | 247 | return ctxt | 255 | return ctxt |
498 | 248 | 256 | ||
499 | 249 | 257 | ||
500 | @@ -251,9 +259,8 @@ | |||
501 | 251 | interfaces = ['identity-service'] | 259 | interfaces = ['identity-service'] |
502 | 252 | 260 | ||
503 | 253 | def __call__(self): | 261 | def __call__(self): |
505 | 254 | log('Generating template context for identity-service') | 262 | log('Generating template context for identity-service', level=DEBUG) |
506 | 255 | ctxt = {} | 263 | ctxt = {} |
507 | 256 | |||
508 | 257 | for rid in relation_ids('identity-service'): | 264 | for rid in relation_ids('identity-service'): |
509 | 258 | for unit in related_units(rid): | 265 | for unit in related_units(rid): |
510 | 259 | rdata = relation_get(rid=rid, unit=unit) | 266 | rdata = relation_get(rid=rid, unit=unit) |
511 | @@ -261,26 +268,24 @@ | |||
512 | 261 | serv_host = format_ipv6_addr(serv_host) or serv_host | 268 | serv_host = format_ipv6_addr(serv_host) or serv_host |
513 | 262 | auth_host = rdata.get('auth_host') | 269 | auth_host = rdata.get('auth_host') |
514 | 263 | auth_host = format_ipv6_addr(auth_host) or auth_host | 270 | auth_host = format_ipv6_addr(auth_host) or auth_host |
529 | 264 | 271 | svc_protocol = rdata.get('service_protocol') or 'http' | |
530 | 265 | ctxt = { | 272 | auth_protocol = rdata.get('auth_protocol') or 'http' |
531 | 266 | 'service_port': rdata.get('service_port'), | 273 | ctxt = {'service_port': rdata.get('service_port'), |
532 | 267 | 'service_host': serv_host, | 274 | 'service_host': serv_host, |
533 | 268 | 'auth_host': auth_host, | 275 | 'auth_host': auth_host, |
534 | 269 | 'auth_port': rdata.get('auth_port'), | 276 | 'auth_port': rdata.get('auth_port'), |
535 | 270 | 'admin_tenant_name': rdata.get('service_tenant'), | 277 | 'admin_tenant_name': rdata.get('service_tenant'), |
536 | 271 | 'admin_user': rdata.get('service_username'), | 278 | 'admin_user': rdata.get('service_username'), |
537 | 272 | 'admin_password': rdata.get('service_password'), | 279 | 'admin_password': rdata.get('service_password'), |
538 | 273 | 'service_protocol': | 280 | 'service_protocol': svc_protocol, |
539 | 274 | rdata.get('service_protocol') or 'http', | 281 | 'auth_protocol': auth_protocol} |
526 | 275 | 'auth_protocol': | ||
527 | 276 | rdata.get('auth_protocol') or 'http', | ||
528 | 277 | } | ||
540 | 278 | if context_complete(ctxt): | 282 | if context_complete(ctxt): |
541 | 279 | # NOTE(jamespage) this is required for >= icehouse | 283 | # NOTE(jamespage) this is required for >= icehouse |
542 | 280 | # so a missing value just indicates keystone needs | 284 | # so a missing value just indicates keystone needs |
543 | 281 | # upgrading | 285 | # upgrading |
544 | 282 | ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') | 286 | ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') |
545 | 283 | return ctxt | 287 | return ctxt |
546 | 288 | |||
547 | 284 | return {} | 289 | return {} |
548 | 285 | 290 | ||
549 | 286 | 291 | ||
550 | @@ -293,21 +298,23 @@ | |||
551 | 293 | self.interfaces = [rel_name] | 298 | self.interfaces = [rel_name] |
552 | 294 | 299 | ||
553 | 295 | def __call__(self): | 300 | def __call__(self): |
555 | 296 | log('Generating template context for amqp') | 301 | log('Generating template context for amqp', level=DEBUG) |
556 | 297 | conf = config() | 302 | conf = config() |
557 | 298 | user_setting = 'rabbit-user' | ||
558 | 299 | vhost_setting = 'rabbit-vhost' | ||
559 | 300 | if self.relation_prefix: | 303 | if self.relation_prefix: |
562 | 301 | user_setting = self.relation_prefix + '-rabbit-user' | 304 | user_setting = '%s-rabbit-user' % (self.relation_prefix) |
563 | 302 | vhost_setting = self.relation_prefix + '-rabbit-vhost' | 305 | vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) |
564 | 306 | else: | ||
565 | 307 | user_setting = 'rabbit-user' | ||
566 | 308 | vhost_setting = 'rabbit-vhost' | ||
567 | 303 | 309 | ||
568 | 304 | try: | 310 | try: |
569 | 305 | username = conf[user_setting] | 311 | username = conf[user_setting] |
570 | 306 | vhost = conf[vhost_setting] | 312 | vhost = conf[vhost_setting] |
571 | 307 | except KeyError as e: | 313 | except KeyError as e: |
574 | 308 | log('Could not generate shared_db context. ' | 314 | log('Could not generate shared_db context. Missing required charm ' |
575 | 309 | 'Missing required charm config options: %s.' % e) | 315 | 'config options: %s.' % e, level=ERROR) |
576 | 310 | raise OSContextError | 316 | raise OSContextError |
577 | 317 | |||
578 | 311 | ctxt = {} | 318 | ctxt = {} |
579 | 312 | for rid in relation_ids(self.rel_name): | 319 | for rid in relation_ids(self.rel_name): |
580 | 313 | ha_vip_only = False | 320 | ha_vip_only = False |
581 | @@ -321,6 +328,7 @@ | |||
582 | 321 | host = relation_get('private-address', rid=rid, unit=unit) | 328 | host = relation_get('private-address', rid=rid, unit=unit) |
583 | 322 | host = format_ipv6_addr(host) or host | 329 | host = format_ipv6_addr(host) or host |
584 | 323 | ctxt['rabbitmq_host'] = host | 330 | ctxt['rabbitmq_host'] = host |
585 | 331 | |||
586 | 324 | ctxt.update({ | 332 | ctxt.update({ |
587 | 325 | 'rabbitmq_user': username, | 333 | 'rabbitmq_user': username, |
588 | 326 | 'rabbitmq_password': relation_get('password', rid=rid, | 334 | 'rabbitmq_password': relation_get('password', rid=rid, |
589 | @@ -331,6 +339,7 @@ | |||
590 | 331 | ssl_port = relation_get('ssl_port', rid=rid, unit=unit) | 339 | ssl_port = relation_get('ssl_port', rid=rid, unit=unit) |
591 | 332 | if ssl_port: | 340 | if ssl_port: |
592 | 333 | ctxt['rabbit_ssl_port'] = ssl_port | 341 | ctxt['rabbit_ssl_port'] = ssl_port |
593 | 342 | |||
594 | 334 | ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) | 343 | ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) |
595 | 335 | if ssl_ca: | 344 | if ssl_ca: |
596 | 336 | ctxt['rabbit_ssl_ca'] = ssl_ca | 345 | ctxt['rabbit_ssl_ca'] = ssl_ca |
597 | @@ -344,41 +353,45 @@ | |||
598 | 344 | if context_complete(ctxt): | 353 | if context_complete(ctxt): |
599 | 345 | if 'rabbit_ssl_ca' in ctxt: | 354 | if 'rabbit_ssl_ca' in ctxt: |
600 | 346 | if not self.ssl_dir: | 355 | if not self.ssl_dir: |
603 | 347 | log(("Charm not setup for ssl support " | 356 | log("Charm not setup for ssl support but ssl ca " |
604 | 348 | "but ssl ca found")) | 357 | "found", level=INFO) |
605 | 349 | break | 358 | break |
606 | 359 | |||
607 | 350 | ca_path = os.path.join( | 360 | ca_path = os.path.join( |
608 | 351 | self.ssl_dir, 'rabbit-client-ca.pem') | 361 | self.ssl_dir, 'rabbit-client-ca.pem') |
609 | 352 | with open(ca_path, 'w') as fh: | 362 | with open(ca_path, 'w') as fh: |
610 | 353 | fh.write(b64decode(ctxt['rabbit_ssl_ca'])) | 363 | fh.write(b64decode(ctxt['rabbit_ssl_ca'])) |
611 | 354 | ctxt['rabbit_ssl_ca'] = ca_path | 364 | ctxt['rabbit_ssl_ca'] = ca_path |
612 | 365 | |||
613 | 355 | # Sufficient information found = break out! | 366 | # Sufficient information found = break out! |
614 | 356 | break | 367 | break |
615 | 368 | |||
616 | 357 | # Used for active/active rabbitmq >= grizzly | 369 | # Used for active/active rabbitmq >= grizzly |
619 | 358 | if ('clustered' not in ctxt or ha_vip_only) \ | 370 | if (('clustered' not in ctxt or ha_vip_only) and |
620 | 359 | and len(related_units(rid)) > 1: | 371 | len(related_units(rid)) > 1): |
621 | 360 | rabbitmq_hosts = [] | 372 | rabbitmq_hosts = [] |
622 | 361 | for unit in related_units(rid): | 373 | for unit in related_units(rid): |
623 | 362 | host = relation_get('private-address', rid=rid, unit=unit) | 374 | host = relation_get('private-address', rid=rid, unit=unit) |
624 | 363 | host = format_ipv6_addr(host) or host | 375 | host = format_ipv6_addr(host) or host |
625 | 364 | rabbitmq_hosts.append(host) | 376 | rabbitmq_hosts.append(host) |
626 | 377 | |||
627 | 365 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) | 378 | ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts) |
628 | 379 | |||
629 | 366 | if not context_complete(ctxt): | 380 | if not context_complete(ctxt): |
630 | 367 | return {} | 381 | return {} |
633 | 368 | else: | 382 | |
634 | 369 | return ctxt | 383 | return ctxt |
635 | 370 | 384 | ||
636 | 371 | 385 | ||
637 | 372 | class CephContext(OSContextGenerator): | 386 | class CephContext(OSContextGenerator): |
638 | 387 | """Generates context for /etc/ceph/ceph.conf templates.""" | ||
639 | 373 | interfaces = ['ceph'] | 388 | interfaces = ['ceph'] |
640 | 374 | 389 | ||
641 | 375 | def __call__(self): | 390 | def __call__(self): |
642 | 376 | '''This generates context for /etc/ceph/ceph.conf templates''' | ||
643 | 377 | if not relation_ids('ceph'): | 391 | if not relation_ids('ceph'): |
644 | 378 | return {} | 392 | return {} |
645 | 379 | 393 | ||
648 | 380 | log('Generating template context for ceph') | 394 | log('Generating template context for ceph', level=DEBUG) |
647 | 381 | |||
649 | 382 | mon_hosts = [] | 395 | mon_hosts = [] |
650 | 383 | auth = None | 396 | auth = None |
651 | 384 | key = None | 397 | key = None |
652 | @@ -387,18 +400,18 @@ | |||
653 | 387 | for unit in related_units(rid): | 400 | for unit in related_units(rid): |
654 | 388 | auth = relation_get('auth', rid=rid, unit=unit) | 401 | auth = relation_get('auth', rid=rid, unit=unit) |
655 | 389 | key = relation_get('key', rid=rid, unit=unit) | 402 | key = relation_get('key', rid=rid, unit=unit) |
659 | 390 | ceph_addr = \ | 403 | ceph_pub_addr = relation_get('ceph-public-address', rid=rid, |
660 | 391 | relation_get('ceph-public-address', rid=rid, unit=unit) or \ | 404 | unit=unit) |
661 | 392 | relation_get('private-address', rid=rid, unit=unit) | 405 | unit_priv_addr = relation_get('private-address', rid=rid, |
662 | 406 | unit=unit) | ||
663 | 407 | ceph_addr = ceph_pub_addr or unit_priv_addr | ||
664 | 393 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr | 408 | ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr |
665 | 394 | mon_hosts.append(ceph_addr) | 409 | mon_hosts.append(ceph_addr) |
666 | 395 | 410 | ||
673 | 396 | ctxt = { | 411 | ctxt = {'mon_hosts': ' '.join(mon_hosts), |
674 | 397 | 'mon_hosts': ' '.join(mon_hosts), | 412 | 'auth': auth, |
675 | 398 | 'auth': auth, | 413 | 'key': key, |
676 | 399 | 'key': key, | 414 | 'use_syslog': use_syslog} |
671 | 400 | 'use_syslog': use_syslog | ||
672 | 401 | } | ||
677 | 402 | 415 | ||
678 | 403 | if not os.path.isdir('/etc/ceph'): | 416 | if not os.path.isdir('/etc/ceph'): |
679 | 404 | os.mkdir('/etc/ceph') | 417 | os.mkdir('/etc/ceph') |
680 | @@ -407,79 +420,65 @@ | |||
681 | 407 | return {} | 420 | return {} |
682 | 408 | 421 | ||
683 | 409 | ensure_packages(['ceph-common']) | 422 | ensure_packages(['ceph-common']) |
684 | 410 | |||
685 | 411 | return ctxt | 423 | return ctxt |
686 | 412 | 424 | ||
687 | 413 | 425 | ||
688 | 414 | ADDRESS_TYPES = ['admin', 'internal', 'public'] | ||
689 | 415 | |||
690 | 416 | |||
691 | 417 | class HAProxyContext(OSContextGenerator): | 426 | class HAProxyContext(OSContextGenerator): |
692 | 427 | """Provides half a context for the haproxy template, which describes | ||
693 | 428 | all peers to be included in the cluster. Each charm needs to include | ||
694 | 429 | its own context generator that describes the port mapping. | ||
695 | 430 | """ | ||
696 | 418 | interfaces = ['cluster'] | 431 | interfaces = ['cluster'] |
697 | 419 | 432 | ||
698 | 420 | def __call__(self): | 433 | def __call__(self): |
699 | 421 | ''' | ||
700 | 422 | Builds half a context for the haproxy template, which describes | ||
701 | 423 | all peers to be included in the cluster. Each charm needs to include | ||
702 | 424 | its own context generator that describes the port mapping. | ||
703 | 425 | ''' | ||
704 | 426 | if not relation_ids('cluster'): | 434 | if not relation_ids('cluster'): |
705 | 427 | return {} | 435 | return {} |
706 | 428 | 436 | ||
707 | 429 | l_unit = local_unit().replace('/', '-') | ||
708 | 430 | |||
709 | 431 | if config('prefer-ipv6'): | 437 | if config('prefer-ipv6'): |
710 | 432 | addr = get_ipv6_addr(exc_list=[config('vip')])[0] | 438 | addr = get_ipv6_addr(exc_list=[config('vip')])[0] |
711 | 433 | else: | 439 | else: |
712 | 434 | addr = get_host_ip(unit_get('private-address')) | 440 | addr = get_host_ip(unit_get('private-address')) |
713 | 435 | 441 | ||
714 | 442 | l_unit = local_unit().replace('/', '-') | ||
715 | 436 | cluster_hosts = {} | 443 | cluster_hosts = {} |
716 | 437 | 444 | ||
717 | 438 | # NOTE(jamespage): build out map of configured network endpoints | 445 | # NOTE(jamespage): build out map of configured network endpoints |
718 | 439 | # and associated backends | 446 | # and associated backends |
719 | 440 | for addr_type in ADDRESS_TYPES: | 447 | for addr_type in ADDRESS_TYPES: |
722 | 441 | laddr = get_address_in_network( | 448 | cfg_opt = 'os-{}-network'.format(addr_type) |
723 | 442 | config('os-{}-network'.format(addr_type))) | 449 | laddr = get_address_in_network(config(cfg_opt)) |
724 | 443 | if laddr: | 450 | if laddr: |
732 | 444 | cluster_hosts[laddr] = {} | 451 | netmask = get_netmask_for_address(laddr) |
733 | 445 | cluster_hosts[laddr]['network'] = "{}/{}".format( | 452 | cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, |
734 | 446 | laddr, | 453 | netmask), |
735 | 447 | get_netmask_for_address(laddr) | 454 | 'backends': {l_unit: laddr}} |
729 | 448 | ) | ||
730 | 449 | cluster_hosts[laddr]['backends'] = {} | ||
731 | 450 | cluster_hosts[laddr]['backends'][l_unit] = laddr | ||
736 | 451 | for rid in relation_ids('cluster'): | 455 | for rid in relation_ids('cluster'): |
737 | 452 | for unit in related_units(rid): | 456 | for unit in related_units(rid): |
738 | 453 | _unit = unit.replace('/', '-') | ||
739 | 454 | _laddr = relation_get('{}-address'.format(addr_type), | 457 | _laddr = relation_get('{}-address'.format(addr_type), |
740 | 455 | rid=rid, unit=unit) | 458 | rid=rid, unit=unit) |
741 | 456 | if _laddr: | 459 | if _laddr: |
742 | 460 | _unit = unit.replace('/', '-') | ||
743 | 457 | cluster_hosts[laddr]['backends'][_unit] = _laddr | 461 | cluster_hosts[laddr]['backends'][_unit] = _laddr |
744 | 458 | 462 | ||
745 | 459 | # NOTE(jamespage) no split configurations found, just use | 463 | # NOTE(jamespage) no split configurations found, just use |
746 | 460 | # private addresses | 464 | # private addresses |
747 | 461 | if not cluster_hosts: | 465 | if not cluster_hosts: |
755 | 462 | cluster_hosts[addr] = {} | 466 | netmask = get_netmask_for_address(addr) |
756 | 463 | cluster_hosts[addr]['network'] = "{}/{}".format( | 467 | cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), |
757 | 464 | addr, | 468 | 'backends': {l_unit: addr}} |
751 | 465 | get_netmask_for_address(addr) | ||
752 | 466 | ) | ||
753 | 467 | cluster_hosts[addr]['backends'] = {} | ||
754 | 468 | cluster_hosts[addr]['backends'][l_unit] = addr | ||
758 | 469 | for rid in relation_ids('cluster'): | 469 | for rid in relation_ids('cluster'): |
759 | 470 | for unit in related_units(rid): | 470 | for unit in related_units(rid): |
760 | 471 | _unit = unit.replace('/', '-') | ||
761 | 472 | _laddr = relation_get('private-address', | 471 | _laddr = relation_get('private-address', |
762 | 473 | rid=rid, unit=unit) | 472 | rid=rid, unit=unit) |
763 | 474 | if _laddr: | 473 | if _laddr: |
764 | 474 | _unit = unit.replace('/', '-') | ||
765 | 475 | cluster_hosts[addr]['backends'][_unit] = _laddr | 475 | cluster_hosts[addr]['backends'][_unit] = _laddr |
766 | 476 | 476 | ||
770 | 477 | ctxt = { | 477 | ctxt = {'frontends': cluster_hosts} |
768 | 478 | 'frontends': cluster_hosts, | ||
769 | 479 | } | ||
771 | 480 | 478 | ||
772 | 481 | if config('haproxy-server-timeout'): | 479 | if config('haproxy-server-timeout'): |
773 | 482 | ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') | 480 | ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') |
774 | 481 | |||
775 | 483 | if config('haproxy-client-timeout'): | 482 | if config('haproxy-client-timeout'): |
776 | 484 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') | 483 | ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') |
777 | 485 | 484 | ||
778 | @@ -495,11 +494,15 @@ | |||
779 | 495 | for frontend in cluster_hosts: | 494 | for frontend in cluster_hosts: |
780 | 496 | if len(cluster_hosts[frontend]['backends']) > 1: | 495 | if len(cluster_hosts[frontend]['backends']) > 1: |
781 | 497 | # Enable haproxy when we have enough peers. | 496 | # Enable haproxy when we have enough peers. |
783 | 498 | log('Ensuring haproxy enabled in /etc/default/haproxy.') | 497 | log('Ensuring haproxy enabled in /etc/default/haproxy.', |
784 | 498 | level=DEBUG) | ||
785 | 499 | with open('/etc/default/haproxy', 'w') as out: | 499 | with open('/etc/default/haproxy', 'w') as out: |
786 | 500 | out.write('ENABLED=1\n') | 500 | out.write('ENABLED=1\n') |
787 | 501 | |||
788 | 501 | return ctxt | 502 | return ctxt |
790 | 502 | log('HAProxy context is incomplete, this unit has no peers.') | 503 | |
791 | 504 | log('HAProxy context is incomplete, this unit has no peers.', | ||
792 | 505 | level=INFO) | ||
793 | 503 | return {} | 506 | return {} |
794 | 504 | 507 | ||
795 | 505 | 508 | ||
796 | @@ -507,29 +510,28 @@ | |||
797 | 507 | interfaces = ['image-service'] | 510 | interfaces = ['image-service'] |
798 | 508 | 511 | ||
799 | 509 | def __call__(self): | 512 | def __call__(self): |
805 | 510 | ''' | 513 | """Obtains the glance API server from the image-service relation. |
806 | 511 | Obtains the glance API server from the image-service relation. Useful | 514 | Useful in nova and cinder (currently). |
807 | 512 | in nova and cinder (currently). | 515 | """ |
808 | 513 | ''' | 516 | log('Generating template context for image-service.', level=DEBUG) |
804 | 514 | log('Generating template context for image-service.') | ||
809 | 515 | rids = relation_ids('image-service') | 517 | rids = relation_ids('image-service') |
810 | 516 | if not rids: | 518 | if not rids: |
811 | 517 | return {} | 519 | return {} |
812 | 520 | |||
813 | 518 | for rid in rids: | 521 | for rid in rids: |
814 | 519 | for unit in related_units(rid): | 522 | for unit in related_units(rid): |
815 | 520 | api_server = relation_get('glance-api-server', | 523 | api_server = relation_get('glance-api-server', |
816 | 521 | rid=rid, unit=unit) | 524 | rid=rid, unit=unit) |
817 | 522 | if api_server: | 525 | if api_server: |
818 | 523 | return {'glance_api_servers': api_server} | 526 | return {'glance_api_servers': api_server} |
821 | 524 | log('ImageService context is incomplete. ' | 527 | |
822 | 525 | 'Missing required relation data.') | 528 | log("ImageService context is incomplete. Missing required relation " |
823 | 529 | "data.", level=INFO) | ||
824 | 526 | return {} | 530 | return {} |
825 | 527 | 531 | ||
826 | 528 | 532 | ||
827 | 529 | class ApacheSSLContext(OSContextGenerator): | 533 | class ApacheSSLContext(OSContextGenerator): |
831 | 530 | 534 | """Generates a context for an apache vhost configuration that configures | |
829 | 531 | """ | ||
830 | 532 | Generates a context for an apache vhost configuration that configures | ||
832 | 533 | HTTPS reverse proxying for one or many endpoints. Generated context | 535 | HTTPS reverse proxying for one or many endpoints. Generated context |
833 | 534 | looks something like:: | 536 | looks something like:: |
834 | 535 | 537 | ||
835 | @@ -563,6 +565,7 @@ | |||
836 | 563 | else: | 565 | else: |
837 | 564 | cert_filename = 'cert' | 566 | cert_filename = 'cert' |
838 | 565 | key_filename = 'key' | 567 | key_filename = 'key' |
839 | 568 | |||
840 | 566 | write_file(path=os.path.join(ssl_dir, cert_filename), | 569 | write_file(path=os.path.join(ssl_dir, cert_filename), |
841 | 567 | content=b64decode(cert)) | 570 | content=b64decode(cert)) |
842 | 568 | write_file(path=os.path.join(ssl_dir, key_filename), | 571 | write_file(path=os.path.join(ssl_dir, key_filename), |
843 | @@ -574,7 +577,8 @@ | |||
844 | 574 | install_ca_cert(b64decode(ca_cert)) | 577 | install_ca_cert(b64decode(ca_cert)) |
845 | 575 | 578 | ||
846 | 576 | def canonical_names(self): | 579 | def canonical_names(self): |
848 | 577 | '''Figure out which canonical names clients will access this service''' | 580 | """Figure out which canonical names clients will access this service. |
849 | 581 | """ | ||
850 | 578 | cns = [] | 582 | cns = [] |
851 | 579 | for r_id in relation_ids('identity-service'): | 583 | for r_id in relation_ids('identity-service'): |
852 | 580 | for unit in related_units(r_id): | 584 | for unit in related_units(r_id): |
853 | @@ -582,47 +586,71 @@ | |||
854 | 582 | for k in rdata: | 586 | for k in rdata: |
855 | 583 | if k.startswith('ssl_key_'): | 587 | if k.startswith('ssl_key_'): |
856 | 584 | cns.append(k.lstrip('ssl_key_')) | 588 | cns.append(k.lstrip('ssl_key_')) |
857 | 589 | |||
858 | 585 | return list(set(cns)) | 590 | return list(set(cns)) |
859 | 586 | 591 | ||
860 | 592 | def get_network_addresses(self): | ||
861 | 593 | """For each network configured, return corresponding address and vip | ||
862 | 594 | (if available). | ||
863 | 595 | |||
864 | 596 | Returns a list of tuples of the form: | ||
865 | 597 | |||
866 | 598 | [(address_in_net_a, vip_in_net_a), | ||
867 | 599 | (address_in_net_b, vip_in_net_b), | ||
868 | 600 | ...] | ||
869 | 601 | |||
870 | 602 | or, if no vip(s) available: | ||
871 | 603 | |||
872 | 604 | [(address_in_net_a, address_in_net_a), | ||
873 | 605 | (address_in_net_b, address_in_net_b), | ||
874 | 606 | ...] | ||
875 | 607 | """ | ||
876 | 608 | addresses = [] | ||
877 | 609 | if config('vip'): | ||
878 | 610 | vips = config('vip').split() | ||
879 | 611 | else: | ||
880 | 612 | vips = [] | ||
881 | 613 | |||
882 | 614 | for net_type in ['os-internal-network', 'os-admin-network', | ||
883 | 615 | 'os-public-network']: | ||
884 | 616 | addr = get_address_in_network(config(net_type), | ||
885 | 617 | unit_get('private-address')) | ||
886 | 618 | if len(vips) > 1 and is_clustered(): | ||
887 | 619 | if not config(net_type): | ||
888 | 620 | log("Multiple networks configured but net_type " | ||
889 | 621 | "is None (%s)." % net_type, level=WARNING) | ||
890 | 622 | continue | ||
891 | 623 | |||
892 | 624 | for vip in vips: | ||
893 | 625 | if is_address_in_network(config(net_type), vip): | ||
894 | 626 | addresses.append((addr, vip)) | ||
895 | 627 | break | ||
896 | 628 | |||
897 | 629 | elif is_clustered() and config('vip'): | ||
898 | 630 | addresses.append((addr, config('vip'))) | ||
899 | 631 | else: | ||
900 | 632 | addresses.append((addr, addr)) | ||
901 | 633 | |||
902 | 634 | return addresses | ||
903 | 635 | |||
904 | 587 | def __call__(self): | 636 | def __call__(self): |
905 | 588 | if isinstance(self.external_ports, basestring): | 637 | if isinstance(self.external_ports, basestring): |
906 | 589 | self.external_ports = [self.external_ports] | 638 | self.external_ports = [self.external_ports] |
908 | 590 | if (not self.external_ports or not https()): | 639 | |
909 | 640 | if not self.external_ports or not https(): | ||
910 | 591 | return {} | 641 | return {} |
911 | 592 | 642 | ||
912 | 593 | self.configure_ca() | 643 | self.configure_ca() |
913 | 594 | self.enable_modules() | 644 | self.enable_modules() |
914 | 595 | 645 | ||
920 | 596 | ctxt = { | 646 | ctxt = {'namespace': self.service_namespace, |
921 | 597 | 'namespace': self.service_namespace, | 647 | 'endpoints': [], |
922 | 598 | 'endpoints': [], | 648 | 'ext_ports': []} |
918 | 599 | 'ext_ports': [] | ||
919 | 600 | } | ||
923 | 601 | 649 | ||
924 | 602 | for cn in self.canonical_names(): | 650 | for cn in self.canonical_names(): |
925 | 603 | self.configure_cert(cn) | 651 | self.configure_cert(cn) |
926 | 604 | 652 | ||
948 | 605 | addresses = [] | 653 | addresses = self.get_network_addresses() |
928 | 606 | vips = [] | ||
929 | 607 | if config('vip'): | ||
930 | 608 | vips = config('vip').split() | ||
931 | 609 | |||
932 | 610 | for network_type in ['os-internal-network', | ||
933 | 611 | 'os-admin-network', | ||
934 | 612 | 'os-public-network']: | ||
935 | 613 | address = get_address_in_network(config(network_type), | ||
936 | 614 | unit_get('private-address')) | ||
937 | 615 | if len(vips) > 0 and is_clustered(): | ||
938 | 616 | for vip in vips: | ||
939 | 617 | if is_address_in_network(config(network_type), | ||
940 | 618 | vip): | ||
941 | 619 | addresses.append((address, vip)) | ||
942 | 620 | break | ||
943 | 621 | elif is_clustered(): | ||
944 | 622 | addresses.append((address, config('vip'))) | ||
945 | 623 | else: | ||
946 | 624 | addresses.append((address, address)) | ||
947 | 625 | |||
949 | 626 | for address, endpoint in set(addresses): | 654 | for address, endpoint in set(addresses): |
950 | 627 | for api_port in self.external_ports: | 655 | for api_port in self.external_ports: |
951 | 628 | ext_port = determine_apache_port(api_port) | 656 | ext_port = determine_apache_port(api_port) |
952 | @@ -630,6 +658,7 @@ | |||
953 | 630 | portmap = (address, endpoint, int(ext_port), int(int_port)) | 658 | portmap = (address, endpoint, int(ext_port), int(int_port)) |
954 | 631 | ctxt['endpoints'].append(portmap) | 659 | ctxt['endpoints'].append(portmap) |
955 | 632 | ctxt['ext_ports'].append(int(ext_port)) | 660 | ctxt['ext_ports'].append(int(ext_port)) |
956 | 661 | |||
957 | 633 | ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) | 662 | ctxt['ext_ports'] = list(set(ctxt['ext_ports'])) |
958 | 634 | return ctxt | 663 | return ctxt |
959 | 635 | 664 | ||
960 | @@ -647,21 +676,23 @@ | |||
961 | 647 | 676 | ||
962 | 648 | @property | 677 | @property |
963 | 649 | def packages(self): | 678 | def packages(self): |
966 | 650 | return neutron_plugin_attribute( | 679 | return neutron_plugin_attribute(self.plugin, 'packages', |
967 | 651 | self.plugin, 'packages', self.network_manager) | 680 | self.network_manager) |
968 | 652 | 681 | ||
969 | 653 | @property | 682 | @property |
970 | 654 | def neutron_security_groups(self): | 683 | def neutron_security_groups(self): |
971 | 655 | return None | 684 | return None |
972 | 656 | 685 | ||
973 | 657 | def _ensure_packages(self): | 686 | def _ensure_packages(self): |
975 | 658 | [ensure_packages(pkgs) for pkgs in self.packages] | 687 | for pkgs in self.packages: |
976 | 688 | ensure_packages(pkgs) | ||
977 | 659 | 689 | ||
978 | 660 | def _save_flag_file(self): | 690 | def _save_flag_file(self): |
979 | 661 | if self.network_manager == 'quantum': | 691 | if self.network_manager == 'quantum': |
980 | 662 | _file = '/etc/nova/quantum_plugin.conf' | 692 | _file = '/etc/nova/quantum_plugin.conf' |
981 | 663 | else: | 693 | else: |
982 | 664 | _file = '/etc/nova/neutron_plugin.conf' | 694 | _file = '/etc/nova/neutron_plugin.conf' |
983 | 695 | |||
984 | 665 | with open(_file, 'wb') as out: | 696 | with open(_file, 'wb') as out: |
985 | 666 | out.write(self.plugin + '\n') | 697 | out.write(self.plugin + '\n') |
986 | 667 | 698 | ||
987 | @@ -670,13 +701,11 @@ | |||
988 | 670 | self.network_manager) | 701 | self.network_manager) |
989 | 671 | config = neutron_plugin_attribute(self.plugin, 'config', | 702 | config = neutron_plugin_attribute(self.plugin, 'config', |
990 | 672 | self.network_manager) | 703 | self.network_manager) |
998 | 673 | ovs_ctxt = { | 704 | ovs_ctxt = {'core_plugin': driver, |
999 | 674 | 'core_plugin': driver, | 705 | 'neutron_plugin': 'ovs', |
1000 | 675 | 'neutron_plugin': 'ovs', | 706 | 'neutron_security_groups': self.neutron_security_groups, |
1001 | 676 | 'neutron_security_groups': self.neutron_security_groups, | 707 | 'local_ip': unit_private_ip(), |
1002 | 677 | 'local_ip': unit_private_ip(), | 708 | 'config': config} |
996 | 678 | 'config': config | ||
997 | 679 | } | ||
1003 | 680 | 709 | ||
1004 | 681 | return ovs_ctxt | 710 | return ovs_ctxt |
1005 | 682 | 711 | ||
1006 | @@ -685,13 +714,11 @@ | |||
1007 | 685 | self.network_manager) | 714 | self.network_manager) |
1008 | 686 | config = neutron_plugin_attribute(self.plugin, 'config', | 715 | config = neutron_plugin_attribute(self.plugin, 'config', |
1009 | 687 | self.network_manager) | 716 | self.network_manager) |
1017 | 688 | nvp_ctxt = { | 717 | nvp_ctxt = {'core_plugin': driver, |
1018 | 689 | 'core_plugin': driver, | 718 | 'neutron_plugin': 'nvp', |
1019 | 690 | 'neutron_plugin': 'nvp', | 719 | 'neutron_security_groups': self.neutron_security_groups, |
1020 | 691 | 'neutron_security_groups': self.neutron_security_groups, | 720 | 'local_ip': unit_private_ip(), |
1021 | 692 | 'local_ip': unit_private_ip(), | 721 | 'config': config} |
1015 | 693 | 'config': config | ||
1016 | 694 | } | ||
1022 | 695 | 722 | ||
1023 | 696 | return nvp_ctxt | 723 | return nvp_ctxt |
1024 | 697 | 724 | ||
1025 | @@ -700,35 +727,50 @@ | |||
1026 | 700 | self.network_manager) | 727 | self.network_manager) |
1027 | 701 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', | 728 | n1kv_config = neutron_plugin_attribute(self.plugin, 'config', |
1028 | 702 | self.network_manager) | 729 | self.network_manager) |
1041 | 703 | n1kv_ctxt = { | 730 | n1kv_user_config_flags = config('n1kv-config-flags') |
1042 | 704 | 'core_plugin': driver, | 731 | restrict_policy_profiles = config('n1kv-restrict-policy-profiles') |
1043 | 705 | 'neutron_plugin': 'n1kv', | 732 | n1kv_ctxt = {'core_plugin': driver, |
1044 | 706 | 'neutron_security_groups': self.neutron_security_groups, | 733 | 'neutron_plugin': 'n1kv', |
1045 | 707 | 'local_ip': unit_private_ip(), | 734 | 'neutron_security_groups': self.neutron_security_groups, |
1046 | 708 | 'config': n1kv_config, | 735 | 'local_ip': unit_private_ip(), |
1047 | 709 | 'vsm_ip': config('n1kv-vsm-ip'), | 736 | 'config': n1kv_config, |
1048 | 710 | 'vsm_username': config('n1kv-vsm-username'), | 737 | 'vsm_ip': config('n1kv-vsm-ip'), |
1049 | 711 | 'vsm_password': config('n1kv-vsm-password'), | 738 | 'vsm_username': config('n1kv-vsm-username'), |
1050 | 712 | 'restrict_policy_profiles': config( | 739 | 'vsm_password': config('n1kv-vsm-password'), |
1051 | 713 | 'n1kv_restrict_policy_profiles'), | 740 | 'restrict_policy_profiles': restrict_policy_profiles} |
1052 | 714 | } | 741 | |
1053 | 742 | if n1kv_user_config_flags: | ||
1054 | 743 | flags = config_flags_parser(n1kv_user_config_flags) | ||
1055 | 744 | n1kv_ctxt['user_config_flags'] = flags | ||
1056 | 715 | 745 | ||
1057 | 716 | return n1kv_ctxt | 746 | return n1kv_ctxt |
1058 | 717 | 747 | ||
1059 | 748 | def calico_ctxt(self): | ||
1060 | 749 | driver = neutron_plugin_attribute(self.plugin, 'driver', | ||
1061 | 750 | self.network_manager) | ||
1062 | 751 | config = neutron_plugin_attribute(self.plugin, 'config', | ||
1063 | 752 | self.network_manager) | ||
1064 | 753 | calico_ctxt = {'core_plugin': driver, | ||
1065 | 754 | 'neutron_plugin': 'Calico', | ||
1066 | 755 | 'neutron_security_groups': self.neutron_security_groups, | ||
1067 | 756 | 'local_ip': unit_private_ip(), | ||
1068 | 757 | 'config': config} | ||
1069 | 758 | |||
1070 | 759 | return calico_ctxt | ||
1071 | 760 | |||
1072 | 718 | def neutron_ctxt(self): | 761 | def neutron_ctxt(self): |
1073 | 719 | if https(): | 762 | if https(): |
1074 | 720 | proto = 'https' | 763 | proto = 'https' |
1075 | 721 | else: | 764 | else: |
1076 | 722 | proto = 'http' | 765 | proto = 'http' |
1077 | 766 | |||
1078 | 723 | if is_clustered(): | 767 | if is_clustered(): |
1079 | 724 | host = config('vip') | 768 | host = config('vip') |
1080 | 725 | else: | 769 | else: |
1081 | 726 | host = unit_get('private-address') | 770 | host = unit_get('private-address') |
1087 | 727 | url = '%s://%s:%s' % (proto, host, '9696') | 771 | |
1088 | 728 | ctxt = { | 772 | ctxt = {'network_manager': self.network_manager, |
1089 | 729 | 'network_manager': self.network_manager, | 773 | 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} |
1085 | 730 | 'neutron_url': url, | ||
1086 | 731 | } | ||
1090 | 732 | return ctxt | 774 | return ctxt |
1091 | 733 | 775 | ||
1092 | 734 | def __call__(self): | 776 | def __call__(self): |
1093 | @@ -748,6 +790,8 @@ | |||
1094 | 748 | ctxt.update(self.nvp_ctxt()) | 790 | ctxt.update(self.nvp_ctxt()) |
1095 | 749 | elif self.plugin == 'n1kv': | 791 | elif self.plugin == 'n1kv': |
1096 | 750 | ctxt.update(self.n1kv_ctxt()) | 792 | ctxt.update(self.n1kv_ctxt()) |
1097 | 793 | elif self.plugin == 'Calico': | ||
1098 | 794 | ctxt.update(self.calico_ctxt()) | ||
1099 | 751 | 795 | ||
1100 | 752 | alchemy_flags = config('neutron-alchemy-flags') | 796 | alchemy_flags = config('neutron-alchemy-flags') |
1101 | 753 | if alchemy_flags: | 797 | if alchemy_flags: |
1102 | @@ -759,23 +803,40 @@ | |||
1103 | 759 | 803 | ||
1104 | 760 | 804 | ||
1105 | 761 | class OSConfigFlagContext(OSContextGenerator): | 805 | class OSConfigFlagContext(OSContextGenerator): |
1110 | 762 | 806 | """Provides support for user-defined config flags. | |
1111 | 763 | """ | 807 | |
1112 | 764 | Responsible for adding user-defined config-flags in charm config to a | 808 | Users can define a comma-seperated list of key=value pairs |
1113 | 765 | template context. | 809 | in the charm configuration and apply them at any point in |
1114 | 810 | any file by using a template flag. | ||
1115 | 811 | |||
1116 | 812 | Sometimes users might want config flags inserted within a | ||
1117 | 813 | specific section so this class allows users to specify the | ||
1118 | 814 | template flag name, allowing for multiple template flags | ||
1119 | 815 | (sections) within the same context. | ||
1120 | 766 | 816 | ||
1121 | 767 | NOTE: the value of config-flags may be a comma-separated list of | 817 | NOTE: the value of config-flags may be a comma-separated list of |
1122 | 768 | key=value pairs and some Openstack config files support | 818 | key=value pairs and some Openstack config files support |
1123 | 769 | comma-separated lists as values. | 819 | comma-separated lists as values. |
1124 | 770 | """ | 820 | """ |
1125 | 771 | 821 | ||
1126 | 822 | def __init__(self, charm_flag='config-flags', | ||
1127 | 823 | template_flag='user_config_flags'): | ||
1128 | 824 | """ | ||
1129 | 825 | :param charm_flag: config flags in charm configuration. | ||
1130 | 826 | :param template_flag: insert point for user-defined flags in template | ||
1131 | 827 | file. | ||
1132 | 828 | """ | ||
1133 | 829 | super(OSConfigFlagContext, self).__init__() | ||
1134 | 830 | self._charm_flag = charm_flag | ||
1135 | 831 | self._template_flag = template_flag | ||
1136 | 832 | |||
1137 | 772 | def __call__(self): | 833 | def __call__(self): |
1139 | 773 | config_flags = config('config-flags') | 834 | config_flags = config(self._charm_flag) |
1140 | 774 | if not config_flags: | 835 | if not config_flags: |
1141 | 775 | return {} | 836 | return {} |
1142 | 776 | 837 | ||
1145 | 777 | flags = config_flags_parser(config_flags) | 838 | return {self._template_flag: |
1146 | 778 | return {'user_config_flags': flags} | 839 | config_flags_parser(config_flags)} |
1147 | 779 | 840 | ||
1148 | 780 | 841 | ||
1149 | 781 | class SubordinateConfigContext(OSContextGenerator): | 842 | class SubordinateConfigContext(OSContextGenerator): |
1150 | @@ -819,7 +880,6 @@ | |||
1151 | 819 | }, | 880 | }, |
1152 | 820 | } | 881 | } |
1153 | 821 | } | 882 | } |
1154 | 822 | |||
1155 | 823 | """ | 883 | """ |
1156 | 824 | 884 | ||
1157 | 825 | def __init__(self, service, config_file, interface): | 885 | def __init__(self, service, config_file, interface): |
1158 | @@ -849,26 +909,28 @@ | |||
1159 | 849 | 909 | ||
1160 | 850 | if self.service not in sub_config: | 910 | if self.service not in sub_config: |
1161 | 851 | log('Found subordinate_config on %s but it contained' | 911 | log('Found subordinate_config on %s but it contained' |
1163 | 852 | 'nothing for %s service' % (rid, self.service)) | 912 | 'nothing for %s service' % (rid, self.service), |
1164 | 913 | level=INFO) | ||
1165 | 853 | continue | 914 | continue |
1166 | 854 | 915 | ||
1167 | 855 | sub_config = sub_config[self.service] | 916 | sub_config = sub_config[self.service] |
1168 | 856 | if self.config_file not in sub_config: | 917 | if self.config_file not in sub_config: |
1169 | 857 | log('Found subordinate_config on %s but it contained' | 918 | log('Found subordinate_config on %s but it contained' |
1171 | 858 | 'nothing for %s' % (rid, self.config_file)) | 919 | 'nothing for %s' % (rid, self.config_file), |
1172 | 920 | level=INFO) | ||
1173 | 859 | continue | 921 | continue |
1174 | 860 | 922 | ||
1175 | 861 | sub_config = sub_config[self.config_file] | 923 | sub_config = sub_config[self.config_file] |
1176 | 862 | for k, v in sub_config.iteritems(): | 924 | for k, v in sub_config.iteritems(): |
1177 | 863 | if k == 'sections': | 925 | if k == 'sections': |
1178 | 864 | for section, config_dict in v.iteritems(): | 926 | for section, config_dict in v.iteritems(): |
1180 | 865 | log("adding section '%s'" % (section)) | 927 | log("Adding section '%s'" % (section), |
1181 | 928 | level=DEBUG) | ||
1182 | 866 | ctxt[k][section] = config_dict | 929 | ctxt[k][section] = config_dict |
1183 | 867 | else: | 930 | else: |
1184 | 868 | ctxt[k] = v | 931 | ctxt[k] = v |
1185 | 869 | 932 | ||
1188 | 870 | log("%d section(s) found" % (len(ctxt['sections'])), level=INFO) | 933 | log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) |
1187 | 871 | |||
1189 | 872 | return ctxt | 934 | return ctxt |
1190 | 873 | 935 | ||
1191 | 874 | 936 | ||
1192 | @@ -880,15 +942,14 @@ | |||
1193 | 880 | False if config('debug') is None else config('debug') | 942 | False if config('debug') is None else config('debug') |
1194 | 881 | ctxt['verbose'] = \ | 943 | ctxt['verbose'] = \ |
1195 | 882 | False if config('verbose') is None else config('verbose') | 944 | False if config('verbose') is None else config('verbose') |
1196 | 945 | |||
1197 | 883 | return ctxt | 946 | return ctxt |
1198 | 884 | 947 | ||
1199 | 885 | 948 | ||
1200 | 886 | class SyslogContext(OSContextGenerator): | 949 | class SyslogContext(OSContextGenerator): |
1201 | 887 | 950 | ||
1202 | 888 | def __call__(self): | 951 | def __call__(self): |
1206 | 889 | ctxt = { | 952 | ctxt = {'use_syslog': config('use-syslog')} |
1204 | 890 | 'use_syslog': config('use-syslog') | ||
1205 | 891 | } | ||
1207 | 892 | return ctxt | 953 | return ctxt |
1208 | 893 | 954 | ||
1209 | 894 | 955 | ||
1210 | @@ -896,13 +957,9 @@ | |||
1211 | 896 | 957 | ||
1212 | 897 | def __call__(self): | 958 | def __call__(self): |
1213 | 898 | if config('prefer-ipv6'): | 959 | if config('prefer-ipv6'): |
1217 | 899 | return { | 960 | return {'bind_host': '::'} |
1215 | 900 | 'bind_host': '::' | ||
1216 | 901 | } | ||
1218 | 902 | else: | 961 | else: |
1222 | 903 | return { | 962 | return {'bind_host': '0.0.0.0'} |
1220 | 904 | 'bind_host': '0.0.0.0' | ||
1221 | 905 | } | ||
1223 | 906 | 963 | ||
1224 | 907 | 964 | ||
1225 | 908 | class WorkerConfigContext(OSContextGenerator): | 965 | class WorkerConfigContext(OSContextGenerator): |
1226 | @@ -914,11 +971,42 @@ | |||
1227 | 914 | except ImportError: | 971 | except ImportError: |
1228 | 915 | apt_install('python-psutil', fatal=True) | 972 | apt_install('python-psutil', fatal=True) |
1229 | 916 | from psutil import NUM_CPUS | 973 | from psutil import NUM_CPUS |
1230 | 974 | |||
1231 | 917 | return NUM_CPUS | 975 | return NUM_CPUS |
1232 | 918 | 976 | ||
1233 | 919 | def __call__(self): | 977 | def __call__(self): |
1238 | 920 | multiplier = config('worker-multiplier') or 1 | 978 | multiplier = config('worker-multiplier') or 0 |
1239 | 921 | ctxt = { | 979 | ctxt = {"workers": self.num_cpus * multiplier} |
1240 | 922 | "workers": self.num_cpus * multiplier | 980 | return ctxt |
1241 | 923 | } | 981 | |
1242 | 982 | |||
1243 | 983 | class ZeroMQContext(OSContextGenerator): | ||
1244 | 984 | interfaces = ['zeromq-configuration'] | ||
1245 | 985 | |||
1246 | 986 | def __call__(self): | ||
1247 | 987 | ctxt = {} | ||
1248 | 988 | if is_relation_made('zeromq-configuration', 'host'): | ||
1249 | 989 | for rid in relation_ids('zeromq-configuration'): | ||
1250 | 990 | for unit in related_units(rid): | ||
1251 | 991 | ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) | ||
1252 | 992 | ctxt['zmq_host'] = relation_get('host', unit, rid) | ||
1253 | 993 | |||
1254 | 994 | return ctxt | ||
1255 | 995 | |||
1256 | 996 | |||
1257 | 997 | class NotificationDriverContext(OSContextGenerator): | ||
1258 | 998 | |||
1259 | 999 | def __init__(self, zmq_relation='zeromq-configuration', | ||
1260 | 1000 | amqp_relation='amqp'): | ||
1261 | 1001 | """ | ||
1262 | 1002 | :param zmq_relation: Name of Zeromq relation to check | ||
1263 | 1003 | """ | ||
1264 | 1004 | self.zmq_relation = zmq_relation | ||
1265 | 1005 | self.amqp_relation = amqp_relation | ||
1266 | 1006 | |||
1267 | 1007 | def __call__(self): | ||
1268 | 1008 | ctxt = {'notifications': 'False'} | ||
1269 | 1009 | if is_relation_made(self.amqp_relation): | ||
1270 | 1010 | ctxt['notifications'] = "True" | ||
1271 | 1011 | |||
1272 | 924 | return ctxt | 1012 | return ctxt |
1273 | 925 | 1013 | ||
1274 | === modified file 'hooks/charmhelpers/contrib/openstack/neutron.py' | |||
1275 | --- hooks/charmhelpers/contrib/openstack/neutron.py 2014-07-28 14:41:41 +0000 | |||
1276 | +++ hooks/charmhelpers/contrib/openstack/neutron.py 2014-11-21 15:33:38 +0000 | |||
1277 | @@ -138,10 +138,25 @@ | |||
1278 | 138 | relation_prefix='neutron', | 138 | relation_prefix='neutron', |
1279 | 139 | ssl_dir=NEUTRON_CONF_DIR)], | 139 | ssl_dir=NEUTRON_CONF_DIR)], |
1280 | 140 | 'services': [], | 140 | 'services': [], |
1282 | 141 | 'packages': [['neutron-plugin-cisco']], | 141 | 'packages': [[headers_package()] + determine_dkms_package(), |
1283 | 142 | ['neutron-plugin-cisco']], | ||
1284 | 142 | 'server_packages': ['neutron-server', | 143 | 'server_packages': ['neutron-server', |
1285 | 143 | 'neutron-plugin-cisco'], | 144 | 'neutron-plugin-cisco'], |
1286 | 144 | 'server_services': ['neutron-server'] | 145 | 'server_services': ['neutron-server'] |
1287 | 146 | }, | ||
1288 | 147 | 'Calico': { | ||
1289 | 148 | 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', | ||
1290 | 149 | 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', | ||
1291 | 150 | 'contexts': [ | ||
1292 | 151 | context.SharedDBContext(user=config('neutron-database-user'), | ||
1293 | 152 | database=config('neutron-database'), | ||
1294 | 153 | relation_prefix='neutron', | ||
1295 | 154 | ssl_dir=NEUTRON_CONF_DIR)], | ||
1296 | 155 | 'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'], | ||
1297 | 156 | 'packages': [[headers_package()] + determine_dkms_package(), | ||
1298 | 157 | ['calico-compute', 'bird', 'neutron-dhcp-agent']], | ||
1299 | 158 | 'server_packages': ['neutron-server', 'calico-control'], | ||
1300 | 159 | 'server_services': ['neutron-server'] | ||
1301 | 145 | } | 160 | } |
1302 | 146 | } | 161 | } |
1303 | 147 | if release >= 'icehouse': | 162 | if release >= 'icehouse': |
1304 | @@ -162,7 +177,8 @@ | |||
1305 | 162 | elif manager == 'neutron': | 177 | elif manager == 'neutron': |
1306 | 163 | plugins = neutron_plugins() | 178 | plugins = neutron_plugins() |
1307 | 164 | else: | 179 | else: |
1309 | 165 | log('Error: Network manager does not support plugins.') | 180 | log("Network manager '%s' does not support plugins." % (manager), |
1310 | 181 | level=ERROR) | ||
1311 | 166 | raise Exception | 182 | raise Exception |
1312 | 167 | 183 | ||
1313 | 168 | try: | 184 | try: |
1314 | 169 | 185 | ||
1315 | === modified file 'hooks/charmhelpers/contrib/openstack/utils.py' | |||
1316 | --- hooks/charmhelpers/contrib/openstack/utils.py 2014-10-06 21:03:50 +0000 | |||
1317 | +++ hooks/charmhelpers/contrib/openstack/utils.py 2014-11-21 15:33:38 +0000 | |||
1318 | @@ -2,6 +2,7 @@ | |||
1319 | 2 | 2 | ||
1320 | 3 | # Common python helper functions used for OpenStack charms. | 3 | # Common python helper functions used for OpenStack charms. |
1321 | 4 | from collections import OrderedDict | 4 | from collections import OrderedDict |
1322 | 5 | from functools import wraps | ||
1323 | 5 | 6 | ||
1324 | 6 | import subprocess | 7 | import subprocess |
1325 | 7 | import json | 8 | import json |
1326 | @@ -468,6 +469,14 @@ | |||
1327 | 468 | return result.split('.')[0] | 469 | return result.split('.')[0] |
1328 | 469 | 470 | ||
1329 | 470 | 471 | ||
1330 | 472 | def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): | ||
1331 | 473 | mm_map = {} | ||
1332 | 474 | if os.path.isfile(mm_file): | ||
1333 | 475 | with open(mm_file, 'r') as f: | ||
1334 | 476 | mm_map = json.load(f) | ||
1335 | 477 | return mm_map | ||
1336 | 478 | |||
1337 | 479 | |||
1338 | 471 | def sync_db_with_multi_ipv6_addresses(database, database_user, | 480 | def sync_db_with_multi_ipv6_addresses(database, database_user, |
1339 | 472 | relation_prefix=None): | 481 | relation_prefix=None): |
1340 | 473 | hosts = get_ipv6_addr(dynamic_only=False) | 482 | hosts = get_ipv6_addr(dynamic_only=False) |
1341 | @@ -484,3 +493,18 @@ | |||
1342 | 484 | 493 | ||
1343 | 485 | for rid in relation_ids('shared-db'): | 494 | for rid in relation_ids('shared-db'): |
1344 | 486 | relation_set(relation_id=rid, **kwargs) | 495 | relation_set(relation_id=rid, **kwargs) |
1345 | 496 | |||
1346 | 497 | |||
1347 | 498 | def os_requires_version(ostack_release, pkg): | ||
1348 | 499 | """ | ||
1349 | 500 | Decorator for hook to specify minimum supported release | ||
1350 | 501 | """ | ||
1351 | 502 | def wrap(f): | ||
1352 | 503 | @wraps(f) | ||
1353 | 504 | def wrapped_f(*args): | ||
1354 | 505 | if os_release(pkg) < ostack_release: | ||
1355 | 506 | raise Exception("This hook is not supported on releases" | ||
1356 | 507 | " before %s" % ostack_release) | ||
1357 | 508 | f(*args) | ||
1358 | 509 | return wrapped_f | ||
1359 | 510 | return wrap | ||
1360 | 487 | 511 | ||
1361 | === modified file 'hooks/charmhelpers/contrib/storage/linux/ceph.py' | |||
1362 | --- hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-07-28 14:41:41 +0000 | |||
1363 | +++ hooks/charmhelpers/contrib/storage/linux/ceph.py 2014-11-21 15:33:38 +0000 | |||
1364 | @@ -16,19 +16,18 @@ | |||
1365 | 16 | from subprocess import ( | 16 | from subprocess import ( |
1366 | 17 | check_call, | 17 | check_call, |
1367 | 18 | check_output, | 18 | check_output, |
1369 | 19 | CalledProcessError | 19 | CalledProcessError, |
1370 | 20 | ) | 20 | ) |
1371 | 21 | |||
1372 | 22 | from charmhelpers.core.hookenv import ( | 21 | from charmhelpers.core.hookenv import ( |
1373 | 23 | relation_get, | 22 | relation_get, |
1374 | 24 | relation_ids, | 23 | relation_ids, |
1375 | 25 | related_units, | 24 | related_units, |
1376 | 26 | log, | 25 | log, |
1377 | 26 | DEBUG, | ||
1378 | 27 | INFO, | 27 | INFO, |
1379 | 28 | WARNING, | 28 | WARNING, |
1381 | 29 | ERROR | 29 | ERROR, |
1382 | 30 | ) | 30 | ) |
1383 | 31 | |||
1384 | 32 | from charmhelpers.core.host import ( | 31 | from charmhelpers.core.host import ( |
1385 | 33 | mount, | 32 | mount, |
1386 | 34 | mounts, | 33 | mounts, |
1387 | @@ -37,7 +36,6 @@ | |||
1388 | 37 | service_running, | 36 | service_running, |
1389 | 38 | umount, | 37 | umount, |
1390 | 39 | ) | 38 | ) |
1391 | 40 | |||
1392 | 41 | from charmhelpers.fetch import ( | 39 | from charmhelpers.fetch import ( |
1393 | 42 | apt_install, | 40 | apt_install, |
1394 | 43 | ) | 41 | ) |
1395 | @@ -56,69 +54,60 @@ | |||
1396 | 56 | 54 | ||
1397 | 57 | 55 | ||
1398 | 58 | def install(): | 56 | def install(): |
1400 | 59 | ''' Basic Ceph client installation ''' | 57 | """Basic Ceph client installation.""" |
1401 | 60 | ceph_dir = "/etc/ceph" | 58 | ceph_dir = "/etc/ceph" |
1402 | 61 | if not os.path.exists(ceph_dir): | 59 | if not os.path.exists(ceph_dir): |
1403 | 62 | os.mkdir(ceph_dir) | 60 | os.mkdir(ceph_dir) |
1404 | 61 | |||
1405 | 63 | apt_install('ceph-common', fatal=True) | 62 | apt_install('ceph-common', fatal=True) |
1406 | 64 | 63 | ||
1407 | 65 | 64 | ||
1408 | 66 | def rbd_exists(service, pool, rbd_img): | 65 | def rbd_exists(service, pool, rbd_img): |
1410 | 67 | ''' Check to see if a RADOS block device exists ''' | 66 | """Check to see if a RADOS block device exists.""" |
1411 | 68 | try: | 67 | try: |
1414 | 69 | out = check_output(['rbd', 'list', '--id', service, | 68 | out = check_output(['rbd', 'list', '--id', service, '--pool', pool]) |
1413 | 70 | '--pool', pool]) | ||
1415 | 71 | except CalledProcessError: | 69 | except CalledProcessError: |
1416 | 72 | return False | 70 | return False |
1419 | 73 | else: | 71 | |
1420 | 74 | return rbd_img in out | 72 | return rbd_img in out |
1421 | 75 | 73 | ||
1422 | 76 | 74 | ||
1423 | 77 | def create_rbd_image(service, pool, image, sizemb): | 75 | def create_rbd_image(service, pool, image, sizemb): |
1436 | 78 | ''' Create a new RADOS block device ''' | 76 | """Create a new RADOS block device.""" |
1437 | 79 | cmd = [ | 77 | cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, |
1438 | 80 | 'rbd', | 78 | '--pool', pool] |
1427 | 81 | 'create', | ||
1428 | 82 | image, | ||
1429 | 83 | '--size', | ||
1430 | 84 | str(sizemb), | ||
1431 | 85 | '--id', | ||
1432 | 86 | service, | ||
1433 | 87 | '--pool', | ||
1434 | 88 | pool | ||
1435 | 89 | ] | ||
1439 | 90 | check_call(cmd) | 79 | check_call(cmd) |
1440 | 91 | 80 | ||
1441 | 92 | 81 | ||
1442 | 93 | def pool_exists(service, name): | 82 | def pool_exists(service, name): |
1444 | 94 | ''' Check to see if a RADOS pool already exists ''' | 83 | """Check to see if a RADOS pool already exists.""" |
1445 | 95 | try: | 84 | try: |
1446 | 96 | out = check_output(['rados', '--id', service, 'lspools']) | 85 | out = check_output(['rados', '--id', service, 'lspools']) |
1447 | 97 | except CalledProcessError: | 86 | except CalledProcessError: |
1448 | 98 | return False | 87 | return False |
1451 | 99 | else: | 88 | |
1452 | 100 | return name in out | 89 | return name in out |
1453 | 101 | 90 | ||
1454 | 102 | 91 | ||
1455 | 103 | def get_osds(service): | 92 | def get_osds(service): |
1460 | 104 | ''' | 93 | """Return a list of all Ceph Object Storage Daemons currently in the |
1461 | 105 | Return a list of all Ceph Object Storage Daemons | 94 | cluster. |
1462 | 106 | currently in the cluster | 95 | """ |
1459 | 107 | ''' | ||
1463 | 108 | version = ceph_version() | 96 | version = ceph_version() |
1464 | 109 | if version and version >= '0.56': | 97 | if version and version >= '0.56': |
1465 | 110 | return json.loads(check_output(['ceph', '--id', service, | 98 | return json.loads(check_output(['ceph', '--id', service, |
1466 | 111 | 'osd', 'ls', '--format=json'])) | 99 | 'osd', 'ls', '--format=json'])) |
1473 | 112 | else: | 100 | |
1474 | 113 | return None | 101 | return None |
1475 | 114 | 102 | ||
1476 | 115 | 103 | ||
1477 | 116 | def create_pool(service, name, replicas=2): | 104 | def create_pool(service, name, replicas=3): |
1478 | 117 | ''' Create a new RADOS pool ''' | 105 | """Create a new RADOS pool.""" |
1479 | 118 | if pool_exists(service, name): | 106 | if pool_exists(service, name): |
1480 | 119 | log("Ceph pool {} already exists, skipping creation".format(name), | 107 | log("Ceph pool {} already exists, skipping creation".format(name), |
1481 | 120 | level=WARNING) | 108 | level=WARNING) |
1482 | 121 | return | 109 | return |
1483 | 110 | |||
1484 | 122 | # Calculate the number of placement groups based | 111 | # Calculate the number of placement groups based |
1485 | 123 | # on upstream recommended best practices. | 112 | # on upstream recommended best practices. |
1486 | 124 | osds = get_osds(service) | 113 | osds = get_osds(service) |
1487 | @@ -128,27 +117,19 @@ | |||
1488 | 128 | # NOTE(james-page): Default to 200 for older ceph versions | 117 | # NOTE(james-page): Default to 200 for older ceph versions |
1489 | 129 | # which don't support OSD query from cli | 118 | # which don't support OSD query from cli |
1490 | 130 | pgnum = 200 | 119 | pgnum = 200 |
1496 | 131 | cmd = [ | 120 | |
1497 | 132 | 'ceph', '--id', service, | 121 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] |
1493 | 133 | 'osd', 'pool', 'create', | ||
1494 | 134 | name, str(pgnum) | ||
1495 | 135 | ] | ||
1498 | 136 | check_call(cmd) | 122 | check_call(cmd) |
1504 | 137 | cmd = [ | 123 | |
1505 | 138 | 'ceph', '--id', service, | 124 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', |
1506 | 139 | 'osd', 'pool', 'set', name, | 125 | str(replicas)] |
1502 | 140 | 'size', str(replicas) | ||
1503 | 141 | ] | ||
1507 | 142 | check_call(cmd) | 126 | check_call(cmd) |
1508 | 143 | 127 | ||
1509 | 144 | 128 | ||
1510 | 145 | def delete_pool(service, name): | 129 | def delete_pool(service, name): |
1517 | 146 | ''' Delete a RADOS pool from ceph ''' | 130 | """Delete a RADOS pool from ceph.""" |
1518 | 147 | cmd = [ | 131 | cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, |
1519 | 148 | 'ceph', '--id', service, | 132 | '--yes-i-really-really-mean-it'] |
1514 | 149 | 'osd', 'pool', 'delete', | ||
1515 | 150 | name, '--yes-i-really-really-mean-it' | ||
1516 | 151 | ] | ||
1520 | 152 | check_call(cmd) | 133 | check_call(cmd) |
1521 | 153 | 134 | ||
1522 | 154 | 135 | ||
1523 | @@ -161,44 +142,43 @@ | |||
1524 | 161 | 142 | ||
1525 | 162 | 143 | ||
1526 | 163 | def create_keyring(service, key): | 144 | def create_keyring(service, key): |
1528 | 164 | ''' Create a new Ceph keyring containing key''' | 145 | """Create a new Ceph keyring containing key.""" |
1529 | 165 | keyring = _keyring_path(service) | 146 | keyring = _keyring_path(service) |
1530 | 166 | if os.path.exists(keyring): | 147 | if os.path.exists(keyring): |
1532 | 167 | log('ceph: Keyring exists at %s.' % keyring, level=WARNING) | 148 | log('Ceph keyring exists at %s.' % keyring, level=WARNING) |
1533 | 168 | return | 149 | return |
1541 | 169 | cmd = [ | 150 | |
1542 | 170 | 'ceph-authtool', | 151 | cmd = ['ceph-authtool', keyring, '--create-keyring', |
1543 | 171 | keyring, | 152 | '--name=client.{}'.format(service), '--add-key={}'.format(key)] |
1537 | 172 | '--create-keyring', | ||
1538 | 173 | '--name=client.{}'.format(service), | ||
1539 | 174 | '--add-key={}'.format(key) | ||
1540 | 175 | ] | ||
1544 | 176 | check_call(cmd) | 153 | check_call(cmd) |
1546 | 177 | log('ceph: Created new ring at %s.' % keyring, level=INFO) | 154 | log('Created new ceph keyring at %s.' % keyring, level=DEBUG) |
1547 | 178 | 155 | ||
1548 | 179 | 156 | ||
1549 | 180 | def create_key_file(service, key): | 157 | def create_key_file(service, key): |
1551 | 181 | ''' Create a file containing key ''' | 158 | """Create a file containing key.""" |
1552 | 182 | keyfile = _keyfile_path(service) | 159 | keyfile = _keyfile_path(service) |
1553 | 183 | if os.path.exists(keyfile): | 160 | if os.path.exists(keyfile): |
1555 | 184 | log('ceph: Keyfile exists at %s.' % keyfile, level=WARNING) | 161 | log('Keyfile exists at %s.' % keyfile, level=WARNING) |
1556 | 185 | return | 162 | return |
1557 | 163 | |||
1558 | 186 | with open(keyfile, 'w') as fd: | 164 | with open(keyfile, 'w') as fd: |
1559 | 187 | fd.write(key) | 165 | fd.write(key) |
1561 | 188 | log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) | 166 | |
1562 | 167 | log('Created new keyfile at %s.' % keyfile, level=INFO) | ||
1563 | 189 | 168 | ||
1564 | 190 | 169 | ||
1565 | 191 | def get_ceph_nodes(): | 170 | def get_ceph_nodes(): |
1567 | 192 | ''' Query named relation 'ceph' to detemine current nodes ''' | 171 | """Query named relation 'ceph' to determine current nodes.""" |
1568 | 193 | hosts = [] | 172 | hosts = [] |
1569 | 194 | for r_id in relation_ids('ceph'): | 173 | for r_id in relation_ids('ceph'): |
1570 | 195 | for unit in related_units(r_id): | 174 | for unit in related_units(r_id): |
1571 | 196 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) | 175 | hosts.append(relation_get('private-address', unit=unit, rid=r_id)) |
1572 | 176 | |||
1573 | 197 | return hosts | 177 | return hosts |
1574 | 198 | 178 | ||
1575 | 199 | 179 | ||
1576 | 200 | def configure(service, key, auth, use_syslog): | 180 | def configure(service, key, auth, use_syslog): |
1578 | 201 | ''' Perform basic configuration of Ceph ''' | 181 | """Perform basic configuration of Ceph.""" |
1579 | 202 | create_keyring(service, key) | 182 | create_keyring(service, key) |
1580 | 203 | create_key_file(service, key) | 183 | create_key_file(service, key) |
1581 | 204 | hosts = get_ceph_nodes() | 184 | hosts = get_ceph_nodes() |
1582 | @@ -211,17 +191,17 @@ | |||
1583 | 211 | 191 | ||
1584 | 212 | 192 | ||
1585 | 213 | def image_mapped(name): | 193 | def image_mapped(name): |
1587 | 214 | ''' Determine whether a RADOS block device is mapped locally ''' | 194 | """Determine whether a RADOS block device is mapped locally.""" |
1588 | 215 | try: | 195 | try: |
1589 | 216 | out = check_output(['rbd', 'showmapped']) | 196 | out = check_output(['rbd', 'showmapped']) |
1590 | 217 | except CalledProcessError: | 197 | except CalledProcessError: |
1591 | 218 | return False | 198 | return False |
1594 | 219 | else: | 199 | |
1595 | 220 | return name in out | 200 | return name in out |
1596 | 221 | 201 | ||
1597 | 222 | 202 | ||
1598 | 223 | def map_block_storage(service, pool, image): | 203 | def map_block_storage(service, pool, image): |
1600 | 224 | ''' Map a RADOS block device for local use ''' | 204 | """Map a RADOS block device for local use.""" |
1601 | 225 | cmd = [ | 205 | cmd = [ |
1602 | 226 | 'rbd', | 206 | 'rbd', |
1603 | 227 | 'map', | 207 | 'map', |
1604 | @@ -235,31 +215,32 @@ | |||
1605 | 235 | 215 | ||
1606 | 236 | 216 | ||
1607 | 237 | def filesystem_mounted(fs): | 217 | def filesystem_mounted(fs): |
1609 | 238 | ''' Determine whether a filesytems is already mounted ''' | 218 | """Determine whether a filesytems is already mounted.""" |
1610 | 239 | return fs in [f for f, m in mounts()] | 219 | return fs in [f for f, m in mounts()] |
1611 | 240 | 220 | ||
1612 | 241 | 221 | ||
1613 | 242 | def make_filesystem(blk_device, fstype='ext4', timeout=10): | 222 | def make_filesystem(blk_device, fstype='ext4', timeout=10): |
1615 | 243 | ''' Make a new filesystem on the specified block device ''' | 223 | """Make a new filesystem on the specified block device.""" |
1616 | 244 | count = 0 | 224 | count = 0 |
1617 | 245 | e_noent = os.errno.ENOENT | 225 | e_noent = os.errno.ENOENT |
1618 | 246 | while not os.path.exists(blk_device): | 226 | while not os.path.exists(blk_device): |
1619 | 247 | if count >= timeout: | 227 | if count >= timeout: |
1621 | 248 | log('ceph: gave up waiting on block device %s' % blk_device, | 228 | log('Gave up waiting on block device %s' % blk_device, |
1622 | 249 | level=ERROR) | 229 | level=ERROR) |
1623 | 250 | raise IOError(e_noent, os.strerror(e_noent), blk_device) | 230 | raise IOError(e_noent, os.strerror(e_noent), blk_device) |
1626 | 251 | log('ceph: waiting for block device %s to appear' % blk_device, | 231 | |
1627 | 252 | level=INFO) | 232 | log('Waiting for block device %s to appear' % blk_device, |
1628 | 233 | level=DEBUG) | ||
1629 | 253 | count += 1 | 234 | count += 1 |
1630 | 254 | time.sleep(1) | 235 | time.sleep(1) |
1631 | 255 | else: | 236 | else: |
1633 | 256 | log('ceph: Formatting block device %s as filesystem %s.' % | 237 | log('Formatting block device %s as filesystem %s.' % |
1634 | 257 | (blk_device, fstype), level=INFO) | 238 | (blk_device, fstype), level=INFO) |
1635 | 258 | check_call(['mkfs', '-t', fstype, blk_device]) | 239 | check_call(['mkfs', '-t', fstype, blk_device]) |
1636 | 259 | 240 | ||
1637 | 260 | 241 | ||
1638 | 261 | def place_data_on_block_device(blk_device, data_src_dst): | 242 | def place_data_on_block_device(blk_device, data_src_dst): |
1640 | 262 | ''' Migrate data in data_src_dst to blk_device and then remount ''' | 243 | """Migrate data in data_src_dst to blk_device and then remount.""" |
1641 | 263 | # mount block device into /mnt | 244 | # mount block device into /mnt |
1642 | 264 | mount(blk_device, '/mnt') | 245 | mount(blk_device, '/mnt') |
1643 | 265 | # copy data to /mnt | 246 | # copy data to /mnt |
1644 | @@ -279,8 +260,8 @@ | |||
1645 | 279 | 260 | ||
1646 | 280 | # TODO: re-use | 261 | # TODO: re-use |
1647 | 281 | def modprobe(module): | 262 | def modprobe(module): |
1650 | 282 | ''' Load a kernel module and configure for auto-load on reboot ''' | 263 | """Load a kernel module and configure for auto-load on reboot.""" |
1651 | 283 | log('ceph: Loading kernel module', level=INFO) | 264 | log('Loading kernel module', level=INFO) |
1652 | 284 | cmd = ['modprobe', module] | 265 | cmd = ['modprobe', module] |
1653 | 285 | check_call(cmd) | 266 | check_call(cmd) |
1654 | 286 | with open('/etc/modules', 'r+') as modules: | 267 | with open('/etc/modules', 'r+') as modules: |
1655 | @@ -289,7 +270,7 @@ | |||
1656 | 289 | 270 | ||
1657 | 290 | 271 | ||
1658 | 291 | def copy_files(src, dst, symlinks=False, ignore=None): | 272 | def copy_files(src, dst, symlinks=False, ignore=None): |
1660 | 292 | ''' Copy files from src to dst ''' | 273 | """Copy files from src to dst.""" |
1661 | 293 | for item in os.listdir(src): | 274 | for item in os.listdir(src): |
1662 | 294 | s = os.path.join(src, item) | 275 | s = os.path.join(src, item) |
1663 | 295 | d = os.path.join(dst, item) | 276 | d = os.path.join(dst, item) |
1664 | @@ -300,9 +281,9 @@ | |||
1665 | 300 | 281 | ||
1666 | 301 | 282 | ||
1667 | 302 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, | 283 | def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, |
1671 | 303 | blk_device, fstype, system_services=[]): | 284 | blk_device, fstype, system_services=[], |
1672 | 304 | """ | 285 | replicas=3): |
1673 | 305 | NOTE: This function must only be called from a single service unit for | 286 | """NOTE: This function must only be called from a single service unit for |
1674 | 306 | the same rbd_img otherwise data loss will occur. | 287 | the same rbd_img otherwise data loss will occur. |
1675 | 307 | 288 | ||
1676 | 308 | Ensures given pool and RBD image exists, is mapped to a block device, | 289 | Ensures given pool and RBD image exists, is mapped to a block device, |
1677 | @@ -316,15 +297,16 @@ | |||
1678 | 316 | """ | 297 | """ |
1679 | 317 | # Ensure pool, RBD image, RBD mappings are in place. | 298 | # Ensure pool, RBD image, RBD mappings are in place. |
1680 | 318 | if not pool_exists(service, pool): | 299 | if not pool_exists(service, pool): |
1683 | 319 | log('ceph: Creating new pool {}.'.format(pool)) | 300 | log('Creating new pool {}.'.format(pool), level=INFO) |
1684 | 320 | create_pool(service, pool) | 301 | create_pool(service, pool, replicas=replicas) |
1685 | 321 | 302 | ||
1686 | 322 | if not rbd_exists(service, pool, rbd_img): | 303 | if not rbd_exists(service, pool, rbd_img): |
1688 | 323 | log('ceph: Creating RBD image ({}).'.format(rbd_img)) | 304 | log('Creating RBD image ({}).'.format(rbd_img), level=INFO) |
1689 | 324 | create_rbd_image(service, pool, rbd_img, sizemb) | 305 | create_rbd_image(service, pool, rbd_img, sizemb) |
1690 | 325 | 306 | ||
1691 | 326 | if not image_mapped(rbd_img): | 307 | if not image_mapped(rbd_img): |
1693 | 327 | log('ceph: Mapping RBD Image {} as a Block Device.'.format(rbd_img)) | 308 | log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), |
1694 | 309 | level=INFO) | ||
1695 | 328 | map_block_storage(service, pool, rbd_img) | 310 | map_block_storage(service, pool, rbd_img) |
1696 | 329 | 311 | ||
1697 | 330 | # make file system | 312 | # make file system |
1698 | @@ -339,42 +321,44 @@ | |||
1699 | 339 | 321 | ||
1700 | 340 | for svc in system_services: | 322 | for svc in system_services: |
1701 | 341 | if service_running(svc): | 323 | if service_running(svc): |
1704 | 342 | log('ceph: Stopping services {} prior to migrating data.' | 324 | log('Stopping services {} prior to migrating data.' |
1705 | 343 | .format(svc)) | 325 | .format(svc), level=DEBUG) |
1706 | 344 | service_stop(svc) | 326 | service_stop(svc) |
1707 | 345 | 327 | ||
1708 | 346 | place_data_on_block_device(blk_device, mount_point) | 328 | place_data_on_block_device(blk_device, mount_point) |
1709 | 347 | 329 | ||
1710 | 348 | for svc in system_services: | 330 | for svc in system_services: |
1713 | 349 | log('ceph: Starting service {} after migrating data.' | 331 | log('Starting service {} after migrating data.' |
1714 | 350 | .format(svc)) | 332 | .format(svc), level=DEBUG) |
1715 | 351 | service_start(svc) | 333 | service_start(svc) |
1716 | 352 | 334 | ||
1717 | 353 | 335 | ||
1718 | 354 | def ensure_ceph_keyring(service, user=None, group=None): | 336 | def ensure_ceph_keyring(service, user=None, group=None): |
1722 | 355 | ''' | 337 | """Ensures a ceph keyring is created for a named service and optionally |
1723 | 356 | Ensures a ceph keyring is created for a named service | 338 | ensures user and group ownership. |
1721 | 357 | and optionally ensures user and group ownership. | ||
1724 | 358 | 339 | ||
1725 | 359 | Returns False if no ceph key is available in relation state. | 340 | Returns False if no ceph key is available in relation state. |
1727 | 360 | ''' | 341 | """ |
1728 | 361 | key = None | 342 | key = None |
1729 | 362 | for rid in relation_ids('ceph'): | 343 | for rid in relation_ids('ceph'): |
1730 | 363 | for unit in related_units(rid): | 344 | for unit in related_units(rid): |
1731 | 364 | key = relation_get('key', rid=rid, unit=unit) | 345 | key = relation_get('key', rid=rid, unit=unit) |
1732 | 365 | if key: | 346 | if key: |
1733 | 366 | break | 347 | break |
1734 | 348 | |||
1735 | 367 | if not key: | 349 | if not key: |
1736 | 368 | return False | 350 | return False |
1737 | 351 | |||
1738 | 369 | create_keyring(service=service, key=key) | 352 | create_keyring(service=service, key=key) |
1739 | 370 | keyring = _keyring_path(service) | 353 | keyring = _keyring_path(service) |
1740 | 371 | if user and group: | 354 | if user and group: |
1741 | 372 | check_call(['chown', '%s.%s' % (user, group), keyring]) | 355 | check_call(['chown', '%s.%s' % (user, group), keyring]) |
1742 | 356 | |||
1743 | 373 | return True | 357 | return True |
1744 | 374 | 358 | ||
1745 | 375 | 359 | ||
1746 | 376 | def ceph_version(): | 360 | def ceph_version(): |
1748 | 377 | ''' Retrieve the local version of ceph ''' | 361 | """Retrieve the local version of ceph.""" |
1749 | 378 | if os.path.exists('/usr/bin/ceph'): | 362 | if os.path.exists('/usr/bin/ceph'): |
1750 | 379 | cmd = ['ceph', '-v'] | 363 | cmd = ['ceph', '-v'] |
1751 | 380 | output = check_output(cmd) | 364 | output = check_output(cmd) |
1752 | 381 | 365 | ||
1753 | === modified file 'hooks/charmhelpers/core/hookenv.py' | |||
1754 | --- hooks/charmhelpers/core/hookenv.py 2014-10-06 21:03:50 +0000 | |||
1755 | +++ hooks/charmhelpers/core/hookenv.py 2014-11-21 15:33:38 +0000 | |||
1756 | @@ -214,6 +214,12 @@ | |||
1757 | 214 | except KeyError: | 214 | except KeyError: |
1758 | 215 | return (self._prev_dict or {})[key] | 215 | return (self._prev_dict or {})[key] |
1759 | 216 | 216 | ||
1760 | 217 | def keys(self): | ||
1761 | 218 | prev_keys = [] | ||
1762 | 219 | if self._prev_dict is not None: | ||
1763 | 220 | prev_keys = self._prev_dict.keys() | ||
1764 | 221 | return list(set(prev_keys + dict.keys(self))) | ||
1765 | 222 | |||
1766 | 217 | def load_previous(self, path=None): | 223 | def load_previous(self, path=None): |
1767 | 218 | """Load previous copy of config from disk. | 224 | """Load previous copy of config from disk. |
1768 | 219 | 225 | ||
1769 | 220 | 226 | ||
1770 | === modified file 'hooks/charmhelpers/core/host.py' | |||
1771 | --- hooks/charmhelpers/core/host.py 2014-10-06 21:03:50 +0000 | |||
1772 | +++ hooks/charmhelpers/core/host.py 2014-11-21 15:33:38 +0000 | |||
1773 | @@ -6,13 +6,13 @@ | |||
1774 | 6 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> | 6 | # Matthew Wedgwood <matthew.wedgwood@canonical.com> |
1775 | 7 | 7 | ||
1776 | 8 | import os | 8 | import os |
1777 | 9 | import re | ||
1778 | 9 | import pwd | 10 | import pwd |
1779 | 10 | import grp | 11 | import grp |
1780 | 11 | import random | 12 | import random |
1781 | 12 | import string | 13 | import string |
1782 | 13 | import subprocess | 14 | import subprocess |
1783 | 14 | import hashlib | 15 | import hashlib |
1784 | 15 | import shutil | ||
1785 | 16 | from contextlib import contextmanager | 16 | from contextlib import contextmanager |
1786 | 17 | 17 | ||
1787 | 18 | from collections import OrderedDict | 18 | from collections import OrderedDict |
1788 | @@ -317,7 +317,13 @@ | |||
1789 | 317 | ip_output = (line for line in ip_output if line) | 317 | ip_output = (line for line in ip_output if line) |
1790 | 318 | for line in ip_output: | 318 | for line in ip_output: |
1791 | 319 | if line.split()[1].startswith(int_type): | 319 | if line.split()[1].startswith(int_type): |
1793 | 320 | interfaces.append(line.split()[1].replace(":", "")) | 320 | matched = re.search('.*: (bond[0-9]+\.[0-9]+)@.*', line) |
1794 | 321 | if matched: | ||
1795 | 322 | interface = matched.groups()[0] | ||
1796 | 323 | else: | ||
1797 | 324 | interface = line.split()[1].replace(":", "") | ||
1798 | 325 | interfaces.append(interface) | ||
1799 | 326 | |||
1800 | 321 | return interfaces | 327 | return interfaces |
1801 | 322 | 328 | ||
1802 | 323 | 329 | ||
1803 | 324 | 330 | ||
1804 | === modified file 'hooks/charmhelpers/core/services/__init__.py' | |||
1805 | --- hooks/charmhelpers/core/services/__init__.py 2014-08-13 13:12:02 +0000 | |||
1806 | +++ hooks/charmhelpers/core/services/__init__.py 2014-11-21 15:33:38 +0000 | |||
1807 | @@ -1,2 +1,2 @@ | |||
1810 | 1 | from .base import * | 1 | from .base import * # NOQA |
1811 | 2 | from .helpers import * | 2 | from .helpers import * # NOQA |
1812 | 3 | 3 | ||
1813 | === modified file 'hooks/charmhelpers/fetch/__init__.py' | |||
1814 | --- hooks/charmhelpers/fetch/__init__.py 2014-10-06 21:03:50 +0000 | |||
1815 | +++ hooks/charmhelpers/fetch/__init__.py 2014-11-21 15:33:38 +0000 | |||
1816 | @@ -72,6 +72,7 @@ | |||
1817 | 72 | FETCH_HANDLERS = ( | 72 | FETCH_HANDLERS = ( |
1818 | 73 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', | 73 | 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', |
1819 | 74 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', | 74 | 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', |
1820 | 75 | 'charmhelpers.fetch.giturl.GitUrlFetchHandler', | ||
1821 | 75 | ) | 76 | ) |
1822 | 76 | 77 | ||
1823 | 77 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. | 78 | APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. |
1824 | @@ -218,6 +219,7 @@ | |||
1825 | 218 | pocket for the release. | 219 | pocket for the release. |
1826 | 219 | 'cloud:' may be used to activate official cloud archive pockets, | 220 | 'cloud:' may be used to activate official cloud archive pockets, |
1827 | 220 | such as 'cloud:icehouse' | 221 | such as 'cloud:icehouse' |
1828 | 222 | 'distro' may be used as a noop | ||
1829 | 221 | 223 | ||
1830 | 222 | @param key: A key to be added to the system's APT keyring and used | 224 | @param key: A key to be added to the system's APT keyring and used |
1831 | 223 | to verify the signatures on packages. Ideally, this should be an | 225 | to verify the signatures on packages. Ideally, this should be an |
1832 | @@ -251,8 +253,10 @@ | |||
1833 | 251 | release = lsb_release()['DISTRIB_CODENAME'] | 253 | release = lsb_release()['DISTRIB_CODENAME'] |
1834 | 252 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: | 254 | with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: |
1835 | 253 | apt.write(PROPOSED_POCKET.format(release)) | 255 | apt.write(PROPOSED_POCKET.format(release)) |
1836 | 256 | elif source == 'distro': | ||
1837 | 257 | pass | ||
1838 | 254 | else: | 258 | else: |
1840 | 255 | raise SourceConfigError("Unknown source: {!r}".format(source)) | 259 | log("Unknown source: {!r}".format(source)) |
1841 | 256 | 260 | ||
1842 | 257 | if key: | 261 | if key: |
1843 | 258 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: | 262 | if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: |
1844 | 259 | 263 | ||
1845 | === added file 'hooks/charmhelpers/fetch/giturl.py' | |||
1846 | --- hooks/charmhelpers/fetch/giturl.py 1970-01-01 00:00:00 +0000 | |||
1847 | +++ hooks/charmhelpers/fetch/giturl.py 2014-11-21 15:33:38 +0000 | |||
1848 | @@ -0,0 +1,44 @@ | |||
1849 | 1 | import os | ||
1850 | 2 | from charmhelpers.fetch import ( | ||
1851 | 3 | BaseFetchHandler, | ||
1852 | 4 | UnhandledSource | ||
1853 | 5 | ) | ||
1854 | 6 | from charmhelpers.core.host import mkdir | ||
1855 | 7 | |||
1856 | 8 | try: | ||
1857 | 9 | from git import Repo | ||
1858 | 10 | except ImportError: | ||
1859 | 11 | from charmhelpers.fetch import apt_install | ||
1860 | 12 | apt_install("python-git") | ||
1861 | 13 | from git import Repo | ||
1862 | 14 | |||
1863 | 15 | |||
1864 | 16 | class GitUrlFetchHandler(BaseFetchHandler): | ||
1865 | 17 | """Handler for git branches via generic and github URLs""" | ||
1866 | 18 | def can_handle(self, source): | ||
1867 | 19 | url_parts = self.parse_url(source) | ||
1868 | 20 | #TODO (mattyw) no support for ssh git@ yet | ||
1869 | 21 | if url_parts.scheme not in ('http', 'https', 'git'): | ||
1870 | 22 | return False | ||
1871 | 23 | else: | ||
1872 | 24 | return True | ||
1873 | 25 | |||
1874 | 26 | def clone(self, source, dest, branch): | ||
1875 | 27 | if not self.can_handle(source): | ||
1876 | 28 | raise UnhandledSource("Cannot handle {}".format(source)) | ||
1877 | 29 | |||
1878 | 30 | repo = Repo.clone_from(source, dest) | ||
1879 | 31 | repo.git.checkout(branch) | ||
1880 | 32 | |||
1881 | 33 | def install(self, source, branch="master"): | ||
1882 | 34 | url_parts = self.parse_url(source) | ||
1883 | 35 | branch_name = url_parts.path.strip("/").split("/")[-1] | ||
1884 | 36 | dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", | ||
1885 | 37 | branch_name) | ||
1886 | 38 | if not os.path.exists(dest_dir): | ||
1887 | 39 | mkdir(dest_dir, perms=0755) | ||
1888 | 40 | try: | ||
1889 | 41 | self.clone(source, dest_dir, branch) | ||
1890 | 42 | except OSError as e: | ||
1891 | 43 | raise UnhandledSource(e.strerror) | ||
1892 | 44 | return dest_dir | ||
1893 | 0 | 45 | ||
1894 | === added file 'templates/icehouse/cisco_plugins.ini' | |||
1895 | --- templates/icehouse/cisco_plugins.ini 1970-01-01 00:00:00 +0000 | |||
1896 | +++ templates/icehouse/cisco_plugins.ini 2014-11-21 15:33:38 +0000 | |||
1897 | @@ -0,0 +1,43 @@ | |||
1898 | 1 | ############################################################################### | ||
1899 | 2 | # [ WARNING ] | ||
1900 | 3 | # Configuration file maintained by Juju. Local changes may be overwritten. | ||
1901 | 4 | ############################################################################### | ||
1902 | 5 | [cisco_plugins] | ||
1903 | 6 | |||
1904 | 7 | [cisco] | ||
1905 | 8 | |||
1906 | 9 | [cisco_n1k] | ||
1907 | 10 | integration_bridge = br-int | ||
1908 | 11 | default_policy_profile = default-pp | ||
1909 | 12 | network_node_policy_profile = default-pp | ||
1910 | 13 | {% if openstack_release != 'havana' -%} | ||
1911 | 14 | http_timeout = 120 | ||
1912 | 15 | # (BoolOpt) Specify whether plugin should attempt to synchronize with the VSM | ||
1913 | 16 | # when neutron is started. | ||
1914 | 17 | # Default value: False, indicating no full sync will be performed. | ||
1915 | 18 | # | ||
1916 | 19 | enable_sync_on_start = False | ||
1917 | 20 | {% endif -%} | ||
1918 | 21 | restrict_policy_profiles = {{ restrict_policy_profiles }} | ||
1919 | 22 | {% if n1kv_user_config_flags -%} | ||
1920 | 23 | {% for key, value in n1kv_user_config_flags.iteritems() -%} | ||
1921 | 24 | {{ key }} = {{ value }} | ||
1922 | 25 | {% endfor -%} | ||
1923 | 26 | {% endif -%} | ||
1924 | 27 | |||
1925 | 28 | [CISCO_PLUGINS] | ||
1926 | 29 | vswitch_plugin = neutron.plugins.cisco.n1kv.n1kv_neutron_plugin.N1kvNeutronPluginV2 | ||
1927 | 30 | |||
1928 | 31 | [N1KV:{{ vsm_ip }}] | ||
1929 | 32 | password = {{ vsm_password }} | ||
1930 | 33 | username = {{ vsm_username }} | ||
1931 | 34 | |||
1932 | 35 | {% include "parts/section-database" %} | ||
1933 | 36 | |||
1934 | 37 | [securitygroup] | ||
1935 | 38 | {% if neutron_security_groups -%} | ||
1936 | 39 | firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver | ||
1937 | 40 | enable_security_group = True | ||
1938 | 41 | {% else -%} | ||
1939 | 42 | firewall_driver = neutron.agent.firewall.NoopFirewallDriver | ||
1940 | 43 | {% endif -%} | ||
1941 | 0 | 44 | ||
1942 | === modified file 'templates/icehouse/nova.conf' | |||
1943 | --- templates/icehouse/nova.conf 2014-10-07 11:37:20 +0000 | |||
1944 | +++ templates/icehouse/nova.conf 2014-11-21 15:33:38 +0000 | |||
1945 | @@ -76,6 +76,18 @@ | |||
1946 | 76 | {% endif -%} | 76 | {% endif -%} |
1947 | 77 | {% endif -%} | 77 | {% endif -%} |
1948 | 78 | 78 | ||
1949 | 79 | {% if neutron_plugin and neutron_plugin == 'n1kv' -%} | ||
1950 | 80 | libvirt_user_virtio_for_bridges = True | ||
1951 | 81 | nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver | ||
1952 | 82 | {% if neutron_security_groups -%} | ||
1953 | 83 | security_group_api = {{ network_manager }} | ||
1954 | 84 | nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver | ||
1955 | 85 | {% endif -%} | ||
1956 | 86 | {% if external_network -%} | ||
1957 | 87 | default_floating_pool = {{ external_network }} | ||
1958 | 88 | {% endif -%} | ||
1959 | 89 | {% endif -%} | ||
1960 | 90 | |||
1961 | 79 | {% if network_manager_config -%} | 91 | {% if network_manager_config -%} |
1962 | 80 | {% for key, value in network_manager_config.iteritems() -%} | 92 | {% for key, value in network_manager_config.iteritems() -%} |
1963 | 81 | {{ key }} = {{ value }} | 93 | {{ key }} = {{ value }} |
1964 | 82 | 94 | ||
1965 | === modified file 'tests/basic_deployment.py' | |||
1966 | --- tests/basic_deployment.py 2014-10-14 15:17:57 +0000 | |||
1967 | +++ tests/basic_deployment.py 2014-11-21 15:33:38 +0000 | |||
1968 | @@ -19,7 +19,7 @@ | |||
1969 | 19 | class NovaCCBasicDeployment(OpenStackAmuletDeployment): | 19 | class NovaCCBasicDeployment(OpenStackAmuletDeployment): |
1970 | 20 | """Amulet tests on a basic nova cloud controller deployment.""" | 20 | """Amulet tests on a basic nova cloud controller deployment.""" |
1971 | 21 | 21 | ||
1973 | 22 | def __init__(self, series=None, openstack=None, source=None, stable=False): | 22 | def __init__(self, series=None, openstack=None, source=None, stable=True): |
1974 | 23 | """Deploy the entire test environment.""" | 23 | """Deploy the entire test environment.""" |
1975 | 24 | super(NovaCCBasicDeployment, self).__init__(series, openstack, source, stable) | 24 | super(NovaCCBasicDeployment, self).__init__(series, openstack, source, stable) |
1976 | 25 | self._add_services() | 25 | self._add_services() |
UOSCI bot says: controller- next for shivrao mp242447
charm_lint_check #1166 nova-cloud-
LINT OK: passed
LINT Results (max last 5 lines): client- timeout has no default value
I: config.yaml: option os-admin-network has no default value
I: config.yaml: option haproxy-
I: config.yaml: option ssl_cert has no default value
I: config.yaml: option nvp-l3-uuid has no default value
I: config.yaml: option os-internal-network has no default value
Full lint test output: http:// paste.ubuntu. com/9137369/ 10.98.191. 181:8080/ job/charm_ lint_check/ 1166/
Build: http://